]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Reduce lock contention
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
1da177e4
LT
102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 107 .mailbox = 0x0042C,
89aad428 108 .max_cmds = 100,
1da177e4 109 .cache_line_size = 0x20,
7dd21308 110 .clear_isr = 1,
1da177e4
LT
111 {
112 .set_interrupt_mask_reg = 0x0022C,
113 .clr_interrupt_mask_reg = 0x00230,
214777ba 114 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 115 .sense_interrupt_mask_reg = 0x0022C,
214777ba 116 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 117 .clr_interrupt_reg = 0x00228,
214777ba 118 .clr_interrupt_reg32 = 0x00228,
1da177e4 119 .sense_interrupt_reg = 0x00224,
214777ba 120 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
121 .ioarrin_reg = 0x00404,
122 .sense_uproc_interrupt_reg = 0x00214,
214777ba 123 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 124 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
125 .set_uproc_interrupt_reg32 = 0x00214,
126 .clr_uproc_interrupt_reg = 0x00218,
127 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
128 }
129 },
130 { /* Snipe and Scamp */
131 .mailbox = 0x0052C,
89aad428 132 .max_cmds = 100,
1da177e4 133 .cache_line_size = 0x20,
7dd21308 134 .clear_isr = 1,
1da177e4
LT
135 {
136 .set_interrupt_mask_reg = 0x00288,
137 .clr_interrupt_mask_reg = 0x0028C,
214777ba 138 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 139 .sense_interrupt_mask_reg = 0x00288,
214777ba 140 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 141 .clr_interrupt_reg = 0x00284,
214777ba 142 .clr_interrupt_reg32 = 0x00284,
1da177e4 143 .sense_interrupt_reg = 0x00280,
214777ba 144 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
145 .ioarrin_reg = 0x00504,
146 .sense_uproc_interrupt_reg = 0x00290,
214777ba 147 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 148 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
149 .set_uproc_interrupt_reg32 = 0x00290,
150 .clr_uproc_interrupt_reg = 0x00294,
151 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
152 }
153 },
a74c1639 154 { /* CRoC */
110def85 155 .mailbox = 0x00044,
89aad428 156 .max_cmds = 1000,
a74c1639 157 .cache_line_size = 0x20,
7dd21308 158 .clear_isr = 0,
a74c1639
WB
159 {
160 .set_interrupt_mask_reg = 0x00010,
161 .clr_interrupt_mask_reg = 0x00018,
214777ba 162 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 163 .sense_interrupt_mask_reg = 0x00010,
214777ba 164 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 165 .clr_interrupt_reg = 0x00008,
214777ba 166 .clr_interrupt_reg32 = 0x0000C,
a74c1639 167 .sense_interrupt_reg = 0x00000,
214777ba 168 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
169 .ioarrin_reg = 0x00070,
170 .sense_uproc_interrupt_reg = 0x00020,
214777ba 171 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 172 .set_uproc_interrupt_reg = 0x00020,
214777ba 173 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 174 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
175 .clr_uproc_interrupt_reg32 = 0x0002C,
176 .init_feedback_reg = 0x0005C,
dcbad00e 177 .dump_addr_reg = 0x00064,
8701f185
WB
178 .dump_data_reg = 0x00068,
179 .endian_swap_reg = 0x00084
a74c1639
WB
180 }
181 },
1da177e4
LT
182};
183
184static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
185 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
194};
195
203fa3fe 196static int ipr_max_bus_speeds[] = {
1da177e4
LT
197 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
198};
199
200MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
201MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
202module_param_named(max_speed, ipr_max_speed, uint, 0);
203MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
204module_param_named(log_level, ipr_log_level, uint, 0);
205MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
206module_param_named(testmode, ipr_testmode, int, 0);
207MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 208module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
209MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
210module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
211MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 212module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 213MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
214module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
215MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
216module_param_named(max_devs, ipr_max_devs, int, 0);
217MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
218 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 219module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
220MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
1da177e4
LT
221MODULE_LICENSE("GPL");
222MODULE_VERSION(IPR_DRIVER_VERSION);
223
1da177e4
LT
224/* A constant array of IOASCs/URCs/Error Messages */
225static const
226struct ipr_error_table_t ipr_error_table[] = {
933916f3 227 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
228 "8155: An unknown error was received"},
229 {0x00330000, 0, 0,
230 "Soft underlength error"},
231 {0x005A0000, 0, 0,
232 "Command to be cancelled not found"},
233 {0x00808000, 0, 0,
234 "Qualified success"},
933916f3 235 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 236 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 237 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 238 "4101: Soft device bus fabric error"},
5aa3a333
WB
239 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFC: Logical block guard error recovered by the device"},
241 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
242 "FFFC: Logical block reference tag error recovered by the device"},
243 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
244 "4171: Recovered scatter list tag / sequence number error"},
245 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
247 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
248 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
249 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFFD: Recovered logical block reference tag error detected by the IOA"},
251 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 253 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF9: Device sector reassign successful"},
933916f3 255 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 257 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "7001: IOA sector reassignment successful"},
933916f3 259 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 261 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 263 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 265 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF6: Device hardware error recovered by the IOA"},
933916f3 267 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF6: Device hardware error recovered by the device"},
933916f3 269 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 271 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFFA: Undefined device response recovered by the IOA"},
933916f3 273 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "FFF6: Device bus error, message or command phase"},
933916f3 275 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 276 "FFFE: Task Management Function failed"},
933916f3 277 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "FFF6: Failure prediction threshold exceeded"},
933916f3 279 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
280 "8009: Impending cache battery pack failure"},
281 {0x02040400, 0, 0,
282 "34FF: Disk device format in progress"},
65f56475
BK
283 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9070: IOA requested reset"},
1da177e4
LT
285 {0x023F0000, 0, 0,
286 "Synchronization required"},
287 {0x024E0000, 0, 0,
288 "No ready, IOA shutdown"},
289 {0x025A0000, 0, 0,
290 "Not ready, IOA has been shutdown"},
933916f3 291 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
292 "3020: Storage subsystem configuration error"},
293 {0x03110B00, 0, 0,
294 "FFF5: Medium error, data unreadable, recommend reassign"},
295 {0x03110C00, 0, 0,
296 "7000: Medium error, data unreadable, do not reassign"},
933916f3 297 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 298 "FFF3: Disk media format bad"},
933916f3 299 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 300 "3002: Addressed device failed to respond to selection"},
933916f3 301 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 302 "3100: Device bus error"},
933916f3 303 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
304 "3109: IOA timed out a device command"},
305 {0x04088000, 0, 0,
306 "3120: SCSI bus is not operational"},
933916f3 307 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 308 "4100: Hard device bus fabric error"},
5aa3a333
WB
309 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310C: Logical block guard error detected by the device"},
311 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
312 "310C: Logical block reference tag error detected by the device"},
313 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
314 "4170: Scatter list tag / sequence number error"},
315 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
316 "8150: Logical block CRC error on IOA to Host transfer"},
317 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
318 "4170: Logical block sequence number error on IOA to Host transfer"},
319 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "310D: Logical block reference tag error detected by the IOA"},
321 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
322 "310D: Logical block guard error detected by the IOA"},
933916f3 323 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "9000: IOA reserved area data check"},
933916f3 325 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "9001: IOA reserved area invalid data pattern"},
933916f3 327 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "9002: IOA reserved area LRC error"},
5aa3a333
WB
329 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "Hardware Error, IOA metadata access error"},
933916f3 331 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 332 "102E: Out of alternate sectors for disk storage"},
933916f3 333 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "FFF4: Data transfer underlength error"},
933916f3 335 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "FFF4: Data transfer overlength error"},
933916f3 337 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 338 "3400: Logical unit failure"},
933916f3 339 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "FFF4: Device microcode is corrupt"},
933916f3 341 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
342 "8150: PCI bus error"},
343 {0x04430000, 1, 0,
344 "Unsupported device bus message received"},
933916f3 345 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 346 "FFF4: Disk device problem"},
933916f3 347 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 348 "8150: Permanent IOA failure"},
933916f3 349 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "3010: Disk device returned wrong response to IOA"},
933916f3 351 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
352 "8151: IOA microcode error"},
353 {0x04448500, 0, 0,
354 "Device bus status error"},
933916f3 355 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
357 {0x04448700, 0, 0,
358 "ATA device status error"},
1da177e4
LT
359 {0x04490000, 0, 0,
360 "Message reject received from the device"},
933916f3 361 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "8008: A permanent cache battery pack failure occurred"},
933916f3 363 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "9090: Disk unit has been modified after the last known status"},
933916f3 365 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 366 "9081: IOA detected device error"},
933916f3 367 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 368 "9082: IOA detected device error"},
933916f3 369 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 370 "3110: Device bus error, message or command phase"},
933916f3 371 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 372 "3110: SAS Command / Task Management Function failed"},
933916f3 373 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 374 "9091: Incorrect hardware configuration change has been detected"},
933916f3 375 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 376 "9073: Invalid multi-adapter configuration"},
933916f3 377 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 378 "4010: Incorrect connection between cascaded expanders"},
933916f3 379 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 380 "4020: Connections exceed IOA design limits"},
933916f3 381 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 382 "4030: Incorrect multipath connection"},
933916f3 383 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 384 "4110: Unsupported enclosure function"},
933916f3 385 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
386 "FFF4: Command to logical unit failed"},
387 {0x05240000, 1, 0,
388 "Illegal request, invalid request type or request packet"},
389 {0x05250000, 0, 0,
390 "Illegal request, invalid resource handle"},
b0df54bb
BK
391 {0x05258000, 0, 0,
392 "Illegal request, commands not allowed to this device"},
393 {0x05258100, 0, 0,
394 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
395 {0x05258200, 0, 0,
396 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
397 {0x05260000, 0, 0,
398 "Illegal request, invalid field in parameter list"},
399 {0x05260100, 0, 0,
400 "Illegal request, parameter not supported"},
401 {0x05260200, 0, 0,
402 "Illegal request, parameter value invalid"},
403 {0x052C0000, 0, 0,
404 "Illegal request, command sequence error"},
b0df54bb
BK
405 {0x052C8000, 1, 0,
406 "Illegal request, dual adapter support not enabled"},
933916f3 407 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 409 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 411 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 412 "3140: Device bus not ready to ready transition"},
933916f3 413 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
414 "FFFB: SCSI bus was reset"},
415 {0x06290500, 0, 0,
416 "FFFE: SCSI bus transition to single ended"},
417 {0x06290600, 0, 0,
418 "FFFE: SCSI bus transition to LVD"},
933916f3 419 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 420 "FFFB: SCSI bus was reset by another initiator"},
933916f3 421 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 422 "3029: A device replacement has occurred"},
933916f3 423 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 424 "9051: IOA cache data exists for a missing or failed device"},
933916f3 425 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 426 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 427 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 428 "9025: Disk unit is not supported at its physical location"},
933916f3 429 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 430 "3020: IOA detected a SCSI bus configuration error"},
933916f3 431 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 432 "3150: SCSI bus configuration error"},
933916f3 433 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 434 "9074: Asymmetric advanced function disk configuration"},
933916f3 435 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 436 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 437 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 439 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 440 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 441 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 442 "9076: Configuration error, missing remote IOA"},
933916f3 443 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 444 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
445 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
446 "4070: Logically bad block written on device"},
933916f3 447 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 448 "9041: Array protection temporarily suspended"},
933916f3 449 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 450 "9042: Corrupt array parity detected on specified device"},
933916f3 451 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 453 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 454 "9071: Link operational transition"},
933916f3 455 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 456 "9072: Link not operational transition"},
933916f3 457 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9032: Array exposed but still protected"},
e435340c
BK
459 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
460 "70DD: Device forced failed by disrupt device command"},
933916f3 461 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 462 "4061: Multipath redundancy level got better"},
933916f3 463 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 464 "4060: Multipath redundancy level got worse"},
1da177e4
LT
465 {0x07270000, 0, 0,
466 "Failure due to other device"},
933916f3 467 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9008: IOA does not support functions expected by devices"},
933916f3 469 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9010: Cache data associated with attached devices cannot be found"},
933916f3 471 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9011: Cache data belongs to devices other than those attached"},
933916f3 473 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 475 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 477 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9022: Exposed array is missing a required device"},
933916f3 479 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9023: Array member(s) not at required physical locations"},
933916f3 481 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9024: Array not functional due to present hardware configuration"},
933916f3 483 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9026: Array not functional due to present hardware configuration"},
933916f3 485 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9027: Array is missing a device and parity is out of sync"},
933916f3 487 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9028: Maximum number of arrays already exist"},
933916f3 489 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9050: Required cache data cannot be located for a disk unit"},
933916f3 491 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 492 "9052: Cache data exists for a device that has been modified"},
933916f3 493 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9054: IOA resources not available due to previous problems"},
933916f3 495 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 496 "9092: Disk unit requires initialization before use"},
933916f3 497 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 498 "9029: Incorrect hardware configuration change has been detected"},
933916f3 499 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 500 "9060: One or more disk pairs are missing from an array"},
933916f3 501 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 502 "9061: One or more disks are missing from an array"},
933916f3 503 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 504 "9062: One or more disks are missing from an array"},
933916f3 505 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
506 "9063: Maximum number of functional arrays has been exceeded"},
507 {0x0B260000, 0, 0,
508 "Aborted command, invalid descriptor"},
509 {0x0B5A0000, 0, 0,
510 "Command terminated by host"}
511};
512
513static const struct ipr_ses_table_entry ipr_ses_table[] = {
514 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
515 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
516 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
517 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
518 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
519 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
520 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
521 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
524 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
527};
528
529/*
530 * Function Prototypes
531 */
532static int ipr_reset_alert(struct ipr_cmnd *);
533static void ipr_process_ccn(struct ipr_cmnd *);
534static void ipr_process_error(struct ipr_cmnd *);
535static void ipr_reset_ioa_job(struct ipr_cmnd *);
536static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
537 enum ipr_shutdown_type);
538
539#ifdef CONFIG_SCSI_IPR_TRACE
540/**
541 * ipr_trc_hook - Add a trace entry to the driver trace
542 * @ipr_cmd: ipr command struct
543 * @type: trace type
544 * @add_data: additional data
545 *
546 * Return value:
547 * none
548 **/
549static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
550 u8 type, u32 add_data)
551{
552 struct ipr_trace_entry *trace_entry;
553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
554
56d6aa33 555 trace_entry = &ioa_cfg->trace[atomic_add_return
556 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
1da177e4
LT
557 trace_entry->time = jiffies;
558 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
559 trace_entry->type = type;
a32c055f
WB
560 if (ipr_cmd->ioa_cfg->sis64)
561 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
562 else
563 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 564 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
565 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
566 trace_entry->u.add_data = add_data;
56d6aa33 567 wmb();
1da177e4
LT
568}
569#else
203fa3fe 570#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
571#endif
572
172cd6e1
BK
573/**
574 * ipr_lock_and_done - Acquire lock and complete command
575 * @ipr_cmd: ipr command struct
576 *
577 * Return value:
578 * none
579 **/
580static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
581{
582 unsigned long lock_flags;
583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
584
585 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
586 ipr_cmd->done(ipr_cmd);
587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
588}
589
1da177e4
LT
590/**
591 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
592 * @ipr_cmd: ipr command struct
593 *
594 * Return value:
595 * none
596 **/
597static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
598{
599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
600 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
601 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 602 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 603 int hrrq_id;
1da177e4 604
05a6538a 605 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 606 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 607 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 608 ioarcb->data_transfer_length = 0;
1da177e4 609 ioarcb->read_data_transfer_length = 0;
a32c055f 610 ioarcb->ioadl_len = 0;
1da177e4 611 ioarcb->read_ioadl_len = 0;
a32c055f 612
96d21f00 613 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
614 ioarcb->u.sis64_addr_data.data_ioadl_addr =
615 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
616 ioasa64->u.gata.status = 0;
617 } else {
a32c055f
WB
618 ioarcb->write_ioadl_addr =
619 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
620 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 621 ioasa->u.gata.status = 0;
a32c055f
WB
622 }
623
96d21f00
WB
624 ioasa->hdr.ioasc = 0;
625 ioasa->hdr.residual_data_len = 0;
1da177e4 626 ipr_cmd->scsi_cmd = NULL;
35a39691 627 ipr_cmd->qc = NULL;
1da177e4
LT
628 ipr_cmd->sense_buffer[0] = 0;
629 ipr_cmd->dma_use_sg = 0;
630}
631
632/**
633 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
634 * @ipr_cmd: ipr command struct
635 *
636 * Return value:
637 * none
638 **/
172cd6e1
BK
639static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
640 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
641{
642 ipr_reinit_ipr_cmnd(ipr_cmd);
643 ipr_cmd->u.scratch = 0;
644 ipr_cmd->sibling = NULL;
172cd6e1 645 ipr_cmd->fast_done = fast_done;
1da177e4
LT
646 init_timer(&ipr_cmd->timer);
647}
648
649/**
00bfef2c 650 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
651 * @ioa_cfg: ioa config struct
652 *
653 * Return value:
654 * pointer to ipr command struct
655 **/
656static
05a6538a 657struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 658{
05a6538a 659 struct ipr_cmnd *ipr_cmd = NULL;
660
661 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
662 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
663 struct ipr_cmnd, queue);
664 list_del(&ipr_cmd->queue);
665 }
1da177e4 666
1da177e4
LT
667
668 return ipr_cmd;
669}
670
00bfef2c
BK
671/**
672 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
673 * @ioa_cfg: ioa config struct
674 *
675 * Return value:
676 * pointer to ipr command struct
677 **/
678static
679struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
680{
05a6538a 681 struct ipr_cmnd *ipr_cmd =
682 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 683 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
684 return ipr_cmd;
685}
686
1da177e4
LT
687/**
688 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
689 * @ioa_cfg: ioa config struct
690 * @clr_ints: interrupts to clear
691 *
692 * This function masks all interrupts on the adapter, then clears the
693 * interrupts specified in the mask
694 *
695 * Return value:
696 * none
697 **/
698static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
699 u32 clr_ints)
700{
701 volatile u32 int_reg;
56d6aa33 702 int i;
1da177e4
LT
703
704 /* Stop new interrupts */
56d6aa33 705 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
706 spin_lock(&ioa_cfg->hrrq[i]._lock);
707 ioa_cfg->hrrq[i].allow_interrupts = 0;
708 spin_unlock(&ioa_cfg->hrrq[i]._lock);
709 }
710 wmb();
1da177e4
LT
711
712 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
713 if (ioa_cfg->sis64)
714 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
715 else
716 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
717
718 /* Clear any pending interrupts */
214777ba
WB
719 if (ioa_cfg->sis64)
720 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
721 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
722 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
723}
724
725/**
726 * ipr_save_pcix_cmd_reg - Save PCI-X command register
727 * @ioa_cfg: ioa config struct
728 *
729 * Return value:
730 * 0 on success / -EIO on failure
731 **/
732static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
733{
734 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
735
7dce0e1c
BK
736 if (pcix_cmd_reg == 0)
737 return 0;
1da177e4
LT
738
739 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
740 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
741 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
742 return -EIO;
743 }
744
745 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
746 return 0;
747}
748
749/**
750 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
751 * @ioa_cfg: ioa config struct
752 *
753 * Return value:
754 * 0 on success / -EIO on failure
755 **/
756static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
757{
758 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
759
760 if (pcix_cmd_reg) {
761 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
762 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
763 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
764 return -EIO;
765 }
1da177e4
LT
766 }
767
768 return 0;
769}
770
35a39691
BK
771/**
772 * ipr_sata_eh_done - done function for aborted SATA commands
773 * @ipr_cmd: ipr command struct
774 *
775 * This function is invoked for ops generated to SATA
776 * devices which are being aborted.
777 *
778 * Return value:
779 * none
780 **/
781static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
782{
35a39691
BK
783 struct ata_queued_cmd *qc = ipr_cmd->qc;
784 struct ipr_sata_port *sata_port = qc->ap->private_data;
785
786 qc->err_mask |= AC_ERR_OTHER;
787 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 788 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
789 ata_qc_complete(qc);
790}
791
1da177e4
LT
792/**
793 * ipr_scsi_eh_done - mid-layer done function for aborted ops
794 * @ipr_cmd: ipr command struct
795 *
796 * This function is invoked by the interrupt handler for
797 * ops generated by the SCSI mid-layer which are being aborted.
798 *
799 * Return value:
800 * none
801 **/
802static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
803{
1da177e4
LT
804 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
805
806 scsi_cmd->result |= (DID_ERROR << 16);
807
63015bc9 808 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 809 scsi_cmd->scsi_done(scsi_cmd);
05a6538a 810 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
811}
812
813/**
814 * ipr_fail_all_ops - Fails all outstanding ops.
815 * @ioa_cfg: ioa config struct
816 *
817 * This function fails all outstanding ops.
818 *
819 * Return value:
820 * none
821 **/
822static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
823{
824 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 825 struct ipr_hrr_queue *hrrq;
1da177e4
LT
826
827 ENTER;
05a6538a 828 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 829 spin_lock(&hrrq->_lock);
05a6538a 830 list_for_each_entry_safe(ipr_cmd,
831 temp, &hrrq->hrrq_pending_q, queue) {
832 list_del(&ipr_cmd->queue);
1da177e4 833
05a6538a 834 ipr_cmd->s.ioasa.hdr.ioasc =
835 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
836 ipr_cmd->s.ioasa.hdr.ilid =
837 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 838
05a6538a 839 if (ipr_cmd->scsi_cmd)
840 ipr_cmd->done = ipr_scsi_eh_done;
841 else if (ipr_cmd->qc)
842 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 843
05a6538a 844 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
845 IPR_IOASC_IOA_WAS_RESET);
846 del_timer(&ipr_cmd->timer);
847 ipr_cmd->done(ipr_cmd);
848 }
56d6aa33 849 spin_unlock(&hrrq->_lock);
1da177e4 850 }
1da177e4
LT
851 LEAVE;
852}
853
a32c055f
WB
854/**
855 * ipr_send_command - Send driver initiated requests.
856 * @ipr_cmd: ipr command struct
857 *
858 * This function sends a command to the adapter using the correct write call.
859 * In the case of sis64, calculate the ioarcb size required. Then or in the
860 * appropriate bits.
861 *
862 * Return value:
863 * none
864 **/
865static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
866{
867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
868 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
869
870 if (ioa_cfg->sis64) {
871 /* The default size is 256 bytes */
872 send_dma_addr |= 0x1;
873
874 /* If the number of ioadls * size of ioadl > 128 bytes,
875 then use a 512 byte ioarcb */
876 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
877 send_dma_addr |= 0x4;
878 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
879 } else
880 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
881}
882
1da177e4
LT
883/**
884 * ipr_do_req - Send driver initiated requests.
885 * @ipr_cmd: ipr command struct
886 * @done: done function
887 * @timeout_func: timeout function
888 * @timeout: timeout value
889 *
890 * This function sends the specified command to the adapter with the
891 * timeout given. The done function is invoked on command completion.
892 *
893 * Return value:
894 * none
895 **/
896static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
897 void (*done) (struct ipr_cmnd *),
898 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
899{
05a6538a 900 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
901
902 ipr_cmd->done = done;
903
904 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
905 ipr_cmd->timer.expires = jiffies + timeout;
906 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
907
908 add_timer(&ipr_cmd->timer);
909
910 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
911
a32c055f 912 ipr_send_command(ipr_cmd);
1da177e4
LT
913}
914
915/**
916 * ipr_internal_cmd_done - Op done function for an internally generated op.
917 * @ipr_cmd: ipr command struct
918 *
919 * This function is the op done function for an internally generated,
920 * blocking op. It simply wakes the sleeping thread.
921 *
922 * Return value:
923 * none
924 **/
925static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
926{
927 if (ipr_cmd->sibling)
928 ipr_cmd->sibling = NULL;
929 else
930 complete(&ipr_cmd->completion);
931}
932
a32c055f
WB
933/**
934 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
935 * @ipr_cmd: ipr command struct
936 * @dma_addr: dma address
937 * @len: transfer length
938 * @flags: ioadl flag value
939 *
940 * This function initializes an ioadl in the case where there is only a single
941 * descriptor.
942 *
943 * Return value:
944 * nothing
945 **/
946static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
947 u32 len, int flags)
948{
949 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
950 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
951
952 ipr_cmd->dma_use_sg = 1;
953
954 if (ipr_cmd->ioa_cfg->sis64) {
955 ioadl64->flags = cpu_to_be32(flags);
956 ioadl64->data_len = cpu_to_be32(len);
957 ioadl64->address = cpu_to_be64(dma_addr);
958
959 ipr_cmd->ioarcb.ioadl_len =
960 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
961 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
962 } else {
963 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
964 ioadl->address = cpu_to_be32(dma_addr);
965
966 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
967 ipr_cmd->ioarcb.read_ioadl_len =
968 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
969 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
970 } else {
971 ipr_cmd->ioarcb.ioadl_len =
972 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
973 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
974 }
975 }
976}
977
1da177e4
LT
978/**
979 * ipr_send_blocking_cmd - Send command and sleep on its completion.
980 * @ipr_cmd: ipr command struct
981 * @timeout_func: function to invoke if command times out
982 * @timeout: timeout
983 *
984 * Return value:
985 * none
986 **/
987static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
988 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
989 u32 timeout)
990{
991 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
992
993 init_completion(&ipr_cmd->completion);
994 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
995
996 spin_unlock_irq(ioa_cfg->host->host_lock);
997 wait_for_completion(&ipr_cmd->completion);
998 spin_lock_irq(ioa_cfg->host->host_lock);
999}
1000
05a6538a 1001static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1002{
1003 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1004 return 0;
1005 else
1006 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1007}
1008
1da177e4
LT
1009/**
1010 * ipr_send_hcam - Send an HCAM to the adapter.
1011 * @ioa_cfg: ioa config struct
1012 * @type: HCAM type
1013 * @hostrcb: hostrcb struct
1014 *
1015 * This function will send a Host Controlled Async command to the adapter.
1016 * If HCAMs are currently not allowed to be issued to the adapter, it will
1017 * place the hostrcb on the free queue.
1018 *
1019 * Return value:
1020 * none
1021 **/
1022static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1023 struct ipr_hostrcb *hostrcb)
1024{
1025 struct ipr_cmnd *ipr_cmd;
1026 struct ipr_ioarcb *ioarcb;
1027
56d6aa33 1028 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1029 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1030 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1031 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1032
1033 ipr_cmd->u.hostrcb = hostrcb;
1034 ioarcb = &ipr_cmd->ioarcb;
1035
1036 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1037 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1038 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1039 ioarcb->cmd_pkt.cdb[1] = type;
1040 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1041 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1042
a32c055f
WB
1043 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1044 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1045
1046 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1047 ipr_cmd->done = ipr_process_ccn;
1048 else
1049 ipr_cmd->done = ipr_process_error;
1050
1051 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1052
a32c055f 1053 ipr_send_command(ipr_cmd);
1da177e4
LT
1054 } else {
1055 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1056 }
1057}
1058
3e7ebdfa
WB
1059/**
1060 * ipr_update_ata_class - Update the ata class in the resource entry
1061 * @res: resource entry struct
1062 * @proto: cfgte device bus protocol value
1063 *
1064 * Return value:
1065 * none
1066 **/
1067static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1068{
203fa3fe 1069 switch (proto) {
3e7ebdfa
WB
1070 case IPR_PROTO_SATA:
1071 case IPR_PROTO_SAS_STP:
1072 res->ata_class = ATA_DEV_ATA;
1073 break;
1074 case IPR_PROTO_SATA_ATAPI:
1075 case IPR_PROTO_SAS_STP_ATAPI:
1076 res->ata_class = ATA_DEV_ATAPI;
1077 break;
1078 default:
1079 res->ata_class = ATA_DEV_UNKNOWN;
1080 break;
1081 };
1082}
1083
1da177e4
LT
1084/**
1085 * ipr_init_res_entry - Initialize a resource entry struct.
1086 * @res: resource entry struct
3e7ebdfa 1087 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1088 *
1089 * Return value:
1090 * none
1091 **/
3e7ebdfa
WB
1092static void ipr_init_res_entry(struct ipr_resource_entry *res,
1093 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1094{
3e7ebdfa
WB
1095 int found = 0;
1096 unsigned int proto;
1097 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1098 struct ipr_resource_entry *gscsi_res = NULL;
1099
ee0a90fa 1100 res->needs_sync_complete = 0;
1da177e4
LT
1101 res->in_erp = 0;
1102 res->add_to_ml = 0;
1103 res->del_from_ml = 0;
1104 res->resetting_device = 0;
1105 res->sdev = NULL;
35a39691 1106 res->sata_port = NULL;
3e7ebdfa
WB
1107
1108 if (ioa_cfg->sis64) {
1109 proto = cfgtew->u.cfgte64->proto;
1110 res->res_flags = cfgtew->u.cfgte64->res_flags;
1111 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1112 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1113
1114 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1115 sizeof(res->res_path));
1116
1117 res->bus = 0;
0cb992ed
WB
1118 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1119 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1120 res->lun = scsilun_to_int(&res->dev_lun);
1121
1122 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1123 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1124 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1125 found = 1;
1126 res->target = gscsi_res->target;
1127 break;
1128 }
1129 }
1130 if (!found) {
1131 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1132 ioa_cfg->max_devs_supported);
1133 set_bit(res->target, ioa_cfg->target_ids);
1134 }
3e7ebdfa
WB
1135 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1136 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1137 res->target = 0;
1138 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1139 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1140 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1141 ioa_cfg->max_devs_supported);
1142 set_bit(res->target, ioa_cfg->array_ids);
1143 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1144 res->bus = IPR_VSET_VIRTUAL_BUS;
1145 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1146 ioa_cfg->max_devs_supported);
1147 set_bit(res->target, ioa_cfg->vset_ids);
1148 } else {
1149 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1150 ioa_cfg->max_devs_supported);
1151 set_bit(res->target, ioa_cfg->target_ids);
1152 }
1153 } else {
1154 proto = cfgtew->u.cfgte->proto;
1155 res->qmodel = IPR_QUEUEING_MODEL(res);
1156 res->flags = cfgtew->u.cfgte->flags;
1157 if (res->flags & IPR_IS_IOA_RESOURCE)
1158 res->type = IPR_RES_TYPE_IOAFP;
1159 else
1160 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1161
1162 res->bus = cfgtew->u.cfgte->res_addr.bus;
1163 res->target = cfgtew->u.cfgte->res_addr.target;
1164 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1165 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1166 }
1167
1168 ipr_update_ata_class(res, proto);
1169}
1170
1171/**
1172 * ipr_is_same_device - Determine if two devices are the same.
1173 * @res: resource entry struct
1174 * @cfgtew: config table entry wrapper struct
1175 *
1176 * Return value:
1177 * 1 if the devices are the same / 0 otherwise
1178 **/
1179static int ipr_is_same_device(struct ipr_resource_entry *res,
1180 struct ipr_config_table_entry_wrapper *cfgtew)
1181{
1182 if (res->ioa_cfg->sis64) {
1183 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1184 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1185 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1186 sizeof(cfgtew->u.cfgte64->lun))) {
1187 return 1;
1188 }
1189 } else {
1190 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1191 res->target == cfgtew->u.cfgte->res_addr.target &&
1192 res->lun == cfgtew->u.cfgte->res_addr.lun)
1193 return 1;
1194 }
1195
1196 return 0;
1197}
1198
1199/**
b3b3b407 1200 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1201 * @res_path: resource path
1202 * @buf: buffer
b3b3b407 1203 * @len: length of buffer provided
3e7ebdfa
WB
1204 *
1205 * Return value:
1206 * pointer to buffer
1207 **/
b3b3b407 1208static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1209{
1210 int i;
5adcbeb3 1211 char *p = buffer;
3e7ebdfa 1212
46d74563 1213 *p = '\0';
5adcbeb3
WB
1214 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1215 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1216 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1217
1218 return buffer;
1219}
1220
b3b3b407
BK
1221/**
1222 * ipr_format_res_path - Format the resource path for printing.
1223 * @ioa_cfg: ioa config struct
1224 * @res_path: resource path
1225 * @buf: buffer
1226 * @len: length of buffer provided
1227 *
1228 * Return value:
1229 * pointer to buffer
1230 **/
1231static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1232 u8 *res_path, char *buffer, int len)
1233{
1234 char *p = buffer;
1235
1236 *p = '\0';
1237 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1238 __ipr_format_res_path(res_path, p, len - (buffer - p));
1239 return buffer;
1240}
1241
3e7ebdfa
WB
1242/**
1243 * ipr_update_res_entry - Update the resource entry.
1244 * @res: resource entry struct
1245 * @cfgtew: config table entry wrapper struct
1246 *
1247 * Return value:
1248 * none
1249 **/
1250static void ipr_update_res_entry(struct ipr_resource_entry *res,
1251 struct ipr_config_table_entry_wrapper *cfgtew)
1252{
1253 char buffer[IPR_MAX_RES_PATH_LENGTH];
1254 unsigned int proto;
1255 int new_path = 0;
1256
1257 if (res->ioa_cfg->sis64) {
1258 res->flags = cfgtew->u.cfgte64->flags;
1259 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1260 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1261
1262 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1263 sizeof(struct ipr_std_inq_data));
1264
1265 res->qmodel = IPR_QUEUEING_MODEL64(res);
1266 proto = cfgtew->u.cfgte64->proto;
1267 res->res_handle = cfgtew->u.cfgte64->res_handle;
1268 res->dev_id = cfgtew->u.cfgte64->dev_id;
1269
1270 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1271 sizeof(res->dev_lun.scsi_lun));
1272
1273 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1274 sizeof(res->res_path))) {
1275 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1276 sizeof(res->res_path));
1277 new_path = 1;
1278 }
1279
1280 if (res->sdev && new_path)
1281 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1282 ipr_format_res_path(res->ioa_cfg,
1283 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1284 } else {
1285 res->flags = cfgtew->u.cfgte->flags;
1286 if (res->flags & IPR_IS_IOA_RESOURCE)
1287 res->type = IPR_RES_TYPE_IOAFP;
1288 else
1289 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1290
1291 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1292 sizeof(struct ipr_std_inq_data));
1293
1294 res->qmodel = IPR_QUEUEING_MODEL(res);
1295 proto = cfgtew->u.cfgte->proto;
1296 res->res_handle = cfgtew->u.cfgte->res_handle;
1297 }
1298
1299 ipr_update_ata_class(res, proto);
1300}
1301
1302/**
1303 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1304 * for the resource.
1305 * @res: resource entry struct
1306 * @cfgtew: config table entry wrapper struct
1307 *
1308 * Return value:
1309 * none
1310 **/
1311static void ipr_clear_res_target(struct ipr_resource_entry *res)
1312{
1313 struct ipr_resource_entry *gscsi_res = NULL;
1314 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1315
1316 if (!ioa_cfg->sis64)
1317 return;
1318
1319 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1320 clear_bit(res->target, ioa_cfg->array_ids);
1321 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1322 clear_bit(res->target, ioa_cfg->vset_ids);
1323 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1324 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1325 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1326 return;
1327 clear_bit(res->target, ioa_cfg->target_ids);
1328
1329 } else if (res->bus == 0)
1330 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1331}
1332
1333/**
1334 * ipr_handle_config_change - Handle a config change from the adapter
1335 * @ioa_cfg: ioa config struct
1336 * @hostrcb: hostrcb
1337 *
1338 * Return value:
1339 * none
1340 **/
1341static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1342 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1343{
1344 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1345 struct ipr_config_table_entry_wrapper cfgtew;
1346 __be32 cc_res_handle;
1347
1da177e4
LT
1348 u32 is_ndn = 1;
1349
3e7ebdfa
WB
1350 if (ioa_cfg->sis64) {
1351 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1352 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1353 } else {
1354 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1355 cc_res_handle = cfgtew.u.cfgte->res_handle;
1356 }
1da177e4
LT
1357
1358 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1359 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1360 is_ndn = 0;
1361 break;
1362 }
1363 }
1364
1365 if (is_ndn) {
1366 if (list_empty(&ioa_cfg->free_res_q)) {
1367 ipr_send_hcam(ioa_cfg,
1368 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1369 hostrcb);
1370 return;
1371 }
1372
1373 res = list_entry(ioa_cfg->free_res_q.next,
1374 struct ipr_resource_entry, queue);
1375
1376 list_del(&res->queue);
3e7ebdfa 1377 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1378 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1379 }
1380
3e7ebdfa 1381 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1382
1383 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1384 if (res->sdev) {
1da177e4 1385 res->del_from_ml = 1;
3e7ebdfa 1386 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1387 if (ioa_cfg->allow_ml_add_del)
1388 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1389 } else {
1390 ipr_clear_res_target(res);
1da177e4 1391 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1392 }
5767a1c4 1393 } else if (!res->sdev || res->del_from_ml) {
1da177e4
LT
1394 res->add_to_ml = 1;
1395 if (ioa_cfg->allow_ml_add_del)
1396 schedule_work(&ioa_cfg->work_q);
1397 }
1398
1399 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1400}
1401
1402/**
1403 * ipr_process_ccn - Op done function for a CCN.
1404 * @ipr_cmd: ipr command struct
1405 *
1406 * This function is the op done function for a configuration
1407 * change notification host controlled async from the adapter.
1408 *
1409 * Return value:
1410 * none
1411 **/
1412static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1413{
1414 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1415 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1416 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1417
1418 list_del(&hostrcb->queue);
05a6538a 1419 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1420
1421 if (ioasc) {
1422 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1423 dev_err(&ioa_cfg->pdev->dev,
1424 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1425
1426 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1427 } else {
1428 ipr_handle_config_change(ioa_cfg, hostrcb);
1429 }
1430}
1431
8cf093e2
BK
1432/**
1433 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1434 * @i: index into buffer
1435 * @buf: string to modify
1436 *
1437 * This function will strip all trailing whitespace, pad the end
1438 * of the string with a single space, and NULL terminate the string.
1439 *
1440 * Return value:
1441 * new length of string
1442 **/
1443static int strip_and_pad_whitespace(int i, char *buf)
1444{
1445 while (i && buf[i] == ' ')
1446 i--;
1447 buf[i+1] = ' ';
1448 buf[i+2] = '\0';
1449 return i + 2;
1450}
1451
1452/**
1453 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1454 * @prefix: string to print at start of printk
1455 * @hostrcb: hostrcb pointer
1456 * @vpd: vendor/product id/sn struct
1457 *
1458 * Return value:
1459 * none
1460 **/
1461static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1462 struct ipr_vpd *vpd)
1463{
1464 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1465 int i = 0;
1466
1467 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1468 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1469
1470 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1471 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1472
1473 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1474 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1475
1476 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1477}
1478
1da177e4
LT
1479/**
1480 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1481 * @vpd: vendor/product id/sn struct
1da177e4
LT
1482 *
1483 * Return value:
1484 * none
1485 **/
cfc32139 1486static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1487{
1488 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1489 + IPR_SERIAL_NUM_LEN];
1490
cfc32139
BK
1491 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1492 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1493 IPR_PROD_ID_LEN);
1494 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1495 ipr_err("Vendor/Product ID: %s\n", buffer);
1496
cfc32139 1497 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1498 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1499 ipr_err(" Serial Number: %s\n", buffer);
1500}
1501
8cf093e2
BK
1502/**
1503 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1504 * @prefix: string to print at start of printk
1505 * @hostrcb: hostrcb pointer
1506 * @vpd: vendor/product id/sn/wwn struct
1507 *
1508 * Return value:
1509 * none
1510 **/
1511static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1512 struct ipr_ext_vpd *vpd)
1513{
1514 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1515 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1516 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1517}
1518
ee0f05b8
BK
1519/**
1520 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1521 * @vpd: vendor/product id/sn/wwn struct
1522 *
1523 * Return value:
1524 * none
1525 **/
1526static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1527{
1528 ipr_log_vpd(&vpd->vpd);
1529 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1530 be32_to_cpu(vpd->wwid[1]));
1531}
1532
1533/**
1534 * ipr_log_enhanced_cache_error - Log a cache error.
1535 * @ioa_cfg: ioa config struct
1536 * @hostrcb: hostrcb struct
1537 *
1538 * Return value:
1539 * none
1540 **/
1541static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1542 struct ipr_hostrcb *hostrcb)
1543{
4565e370
WB
1544 struct ipr_hostrcb_type_12_error *error;
1545
1546 if (ioa_cfg->sis64)
1547 error = &hostrcb->hcam.u.error64.u.type_12_error;
1548 else
1549 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1550
1551 ipr_err("-----Current Configuration-----\n");
1552 ipr_err("Cache Directory Card Information:\n");
1553 ipr_log_ext_vpd(&error->ioa_vpd);
1554 ipr_err("Adapter Card Information:\n");
1555 ipr_log_ext_vpd(&error->cfc_vpd);
1556
1557 ipr_err("-----Expected Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1562
1563 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1564 be32_to_cpu(error->ioa_data[0]),
1565 be32_to_cpu(error->ioa_data[1]),
1566 be32_to_cpu(error->ioa_data[2]));
1567}
1568
1da177e4
LT
1569/**
1570 * ipr_log_cache_error - Log a cache error.
1571 * @ioa_cfg: ioa config struct
1572 * @hostrcb: hostrcb struct
1573 *
1574 * Return value:
1575 * none
1576 **/
1577static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1578 struct ipr_hostrcb *hostrcb)
1579{
1580 struct ipr_hostrcb_type_02_error *error =
1581 &hostrcb->hcam.u.error.u.type_02_error;
1582
1583 ipr_err("-----Current Configuration-----\n");
1584 ipr_err("Cache Directory Card Information:\n");
cfc32139 1585 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1586 ipr_err("Adapter Card Information:\n");
cfc32139 1587 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1588
1589 ipr_err("-----Expected Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
cfc32139 1591 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1592 ipr_err("Adapter Card Information:\n");
cfc32139 1593 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1594
1595 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1596 be32_to_cpu(error->ioa_data[0]),
1597 be32_to_cpu(error->ioa_data[1]),
1598 be32_to_cpu(error->ioa_data[2]));
1599}
1600
ee0f05b8
BK
1601/**
1602 * ipr_log_enhanced_config_error - Log a configuration error.
1603 * @ioa_cfg: ioa config struct
1604 * @hostrcb: hostrcb struct
1605 *
1606 * Return value:
1607 * none
1608 **/
1609static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1610 struct ipr_hostrcb *hostrcb)
1611{
1612 int errors_logged, i;
1613 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1614 struct ipr_hostrcb_type_13_error *error;
1615
1616 error = &hostrcb->hcam.u.error.u.type_13_error;
1617 errors_logged = be32_to_cpu(error->errors_logged);
1618
1619 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1620 be32_to_cpu(error->errors_detected), errors_logged);
1621
1622 dev_entry = error->dev;
1623
1624 for (i = 0; i < errors_logged; i++, dev_entry++) {
1625 ipr_err_separator;
1626
1627 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1628 ipr_log_ext_vpd(&dev_entry->vpd);
1629
1630 ipr_err("-----New Device Information-----\n");
1631 ipr_log_ext_vpd(&dev_entry->new_vpd);
1632
1633 ipr_err("Cache Directory Card Information:\n");
1634 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1635
1636 ipr_err("Adapter Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1638 }
1639}
1640
4565e370
WB
1641/**
1642 * ipr_log_sis64_config_error - Log a device error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int errors_logged, i;
1653 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1654 struct ipr_hostrcb_type_23_error *error;
1655 char buffer[IPR_MAX_RES_PATH_LENGTH];
1656
1657 error = &hostrcb->hcam.u.error64.u.type_23_error;
1658 errors_logged = be32_to_cpu(error->errors_logged);
1659
1660 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1661 be32_to_cpu(error->errors_detected), errors_logged);
1662
1663 dev_entry = error->dev;
1664
1665 for (i = 0; i < errors_logged; i++, dev_entry++) {
1666 ipr_err_separator;
1667
1668 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1669 __ipr_format_res_path(dev_entry->res_path,
1670 buffer, sizeof(buffer)));
4565e370
WB
1671 ipr_log_ext_vpd(&dev_entry->vpd);
1672
1673 ipr_err("-----New Device Information-----\n");
1674 ipr_log_ext_vpd(&dev_entry->new_vpd);
1675
1676 ipr_err("Cache Directory Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1678
1679 ipr_err("Adapter Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1681 }
1682}
1683
1da177e4
LT
1684/**
1685 * ipr_log_config_error - Log a configuration error.
1686 * @ioa_cfg: ioa config struct
1687 * @hostrcb: hostrcb struct
1688 *
1689 * Return value:
1690 * none
1691 **/
1692static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1693 struct ipr_hostrcb *hostrcb)
1694{
1695 int errors_logged, i;
1696 struct ipr_hostrcb_device_data_entry *dev_entry;
1697 struct ipr_hostrcb_type_03_error *error;
1698
1699 error = &hostrcb->hcam.u.error.u.type_03_error;
1700 errors_logged = be32_to_cpu(error->errors_logged);
1701
1702 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 be32_to_cpu(error->errors_detected), errors_logged);
1704
cfc32139 1705 dev_entry = error->dev;
1da177e4
LT
1706
1707 for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 ipr_err_separator;
1709
fa15b1f6 1710 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1711 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1712
1713 ipr_err("-----New Device Information-----\n");
cfc32139 1714 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1715
1716 ipr_err("Cache Directory Card Information:\n");
cfc32139 1717 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1718
1719 ipr_err("Adapter Card Information:\n");
cfc32139 1720 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1721
1722 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1723 be32_to_cpu(dev_entry->ioa_data[0]),
1724 be32_to_cpu(dev_entry->ioa_data[1]),
1725 be32_to_cpu(dev_entry->ioa_data[2]),
1726 be32_to_cpu(dev_entry->ioa_data[3]),
1727 be32_to_cpu(dev_entry->ioa_data[4]));
1728 }
1729}
1730
ee0f05b8
BK
1731/**
1732 * ipr_log_enhanced_array_error - Log an array configuration error.
1733 * @ioa_cfg: ioa config struct
1734 * @hostrcb: hostrcb struct
1735 *
1736 * Return value:
1737 * none
1738 **/
1739static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1740 struct ipr_hostrcb *hostrcb)
1741{
1742 int i, num_entries;
1743 struct ipr_hostrcb_type_14_error *error;
1744 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1745 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1746
1747 error = &hostrcb->hcam.u.error.u.type_14_error;
1748
1749 ipr_err_separator;
1750
1751 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1752 error->protection_level,
1753 ioa_cfg->host->host_no,
1754 error->last_func_vset_res_addr.bus,
1755 error->last_func_vset_res_addr.target,
1756 error->last_func_vset_res_addr.lun);
1757
1758 ipr_err_separator;
1759
1760 array_entry = error->array_member;
1761 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1762 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1763
1764 for (i = 0; i < num_entries; i++, array_entry++) {
1765 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1766 continue;
1767
1768 if (be32_to_cpu(error->exposed_mode_adn) == i)
1769 ipr_err("Exposed Array Member %d:\n", i);
1770 else
1771 ipr_err("Array Member %d:\n", i);
1772
1773 ipr_log_ext_vpd(&array_entry->vpd);
1774 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1775 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1776 "Expected Location");
1777
1778 ipr_err_separator;
1779 }
1780}
1781
1da177e4
LT
1782/**
1783 * ipr_log_array_error - Log an array configuration error.
1784 * @ioa_cfg: ioa config struct
1785 * @hostrcb: hostrcb struct
1786 *
1787 * Return value:
1788 * none
1789 **/
1790static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1791 struct ipr_hostrcb *hostrcb)
1792{
1793 int i;
1794 struct ipr_hostrcb_type_04_error *error;
1795 struct ipr_hostrcb_array_data_entry *array_entry;
1796 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1797
1798 error = &hostrcb->hcam.u.error.u.type_04_error;
1799
1800 ipr_err_separator;
1801
1802 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803 error->protection_level,
1804 ioa_cfg->host->host_no,
1805 error->last_func_vset_res_addr.bus,
1806 error->last_func_vset_res_addr.target,
1807 error->last_func_vset_res_addr.lun);
1808
1809 ipr_err_separator;
1810
1811 array_entry = error->array_member;
1812
1813 for (i = 0; i < 18; i++) {
cfc32139 1814 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1815 continue;
1816
fa15b1f6 1817 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1818 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1819 else
1da177e4 1820 ipr_err("Array Member %d:\n", i);
1da177e4 1821
cfc32139 1822 ipr_log_vpd(&array_entry->vpd);
1da177e4 1823
fa15b1f6
BK
1824 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1825 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1826 "Expected Location");
1da177e4
LT
1827
1828 ipr_err_separator;
1829
1830 if (i == 9)
1831 array_entry = error->array_member2;
1832 else
1833 array_entry++;
1834 }
1835}
1836
1837/**
b0df54bb 1838 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1839 * @ioa_cfg: ioa config struct
b0df54bb
BK
1840 * @data: IOA error data
1841 * @len: data length
1da177e4
LT
1842 *
1843 * Return value:
1844 * none
1845 **/
ac719aba 1846static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1847{
1848 int i;
1da177e4 1849
b0df54bb 1850 if (len == 0)
1da177e4
LT
1851 return;
1852
ac719aba
BK
1853 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1854 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1855
b0df54bb 1856 for (i = 0; i < len / 4; i += 4) {
1da177e4 1857 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1858 be32_to_cpu(data[i]),
1859 be32_to_cpu(data[i+1]),
1860 be32_to_cpu(data[i+2]),
1861 be32_to_cpu(data[i+3]));
1da177e4
LT
1862 }
1863}
1864
ee0f05b8
BK
1865/**
1866 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1867 * @ioa_cfg: ioa config struct
1868 * @hostrcb: hostrcb struct
1869 *
1870 * Return value:
1871 * none
1872 **/
1873static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1874 struct ipr_hostrcb *hostrcb)
1875{
1876 struct ipr_hostrcb_type_17_error *error;
1877
4565e370
WB
1878 if (ioa_cfg->sis64)
1879 error = &hostrcb->hcam.u.error64.u.type_17_error;
1880 else
1881 error = &hostrcb->hcam.u.error.u.type_17_error;
1882
ee0f05b8 1883 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1884 strim(error->failure_reason);
ee0f05b8 1885
8cf093e2
BK
1886 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1887 be32_to_cpu(hostrcb->hcam.u.error.prc));
1888 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1889 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1890 be32_to_cpu(hostrcb->hcam.length) -
1891 (offsetof(struct ipr_hostrcb_error, u) +
1892 offsetof(struct ipr_hostrcb_type_17_error, data)));
1893}
1894
b0df54bb
BK
1895/**
1896 * ipr_log_dual_ioa_error - Log a dual adapter error.
1897 * @ioa_cfg: ioa config struct
1898 * @hostrcb: hostrcb struct
1899 *
1900 * Return value:
1901 * none
1902 **/
1903static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1904 struct ipr_hostrcb *hostrcb)
1905{
1906 struct ipr_hostrcb_type_07_error *error;
1907
1908 error = &hostrcb->hcam.u.error.u.type_07_error;
1909 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1910 strim(error->failure_reason);
b0df54bb 1911
8cf093e2
BK
1912 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1913 be32_to_cpu(hostrcb->hcam.u.error.prc));
1914 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1915 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1916 be32_to_cpu(hostrcb->hcam.length) -
1917 (offsetof(struct ipr_hostrcb_error, u) +
1918 offsetof(struct ipr_hostrcb_type_07_error, data)));
1919}
1920
49dc6a18
BK
1921static const struct {
1922 u8 active;
1923 char *desc;
1924} path_active_desc[] = {
1925 { IPR_PATH_NO_INFO, "Path" },
1926 { IPR_PATH_ACTIVE, "Active path" },
1927 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1928};
1929
1930static const struct {
1931 u8 state;
1932 char *desc;
1933} path_state_desc[] = {
1934 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1935 { IPR_PATH_HEALTHY, "is healthy" },
1936 { IPR_PATH_DEGRADED, "is degraded" },
1937 { IPR_PATH_FAILED, "is failed" }
1938};
1939
1940/**
1941 * ipr_log_fabric_path - Log a fabric path error
1942 * @hostrcb: hostrcb struct
1943 * @fabric: fabric descriptor
1944 *
1945 * Return value:
1946 * none
1947 **/
1948static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1949 struct ipr_hostrcb_fabric_desc *fabric)
1950{
1951 int i, j;
1952 u8 path_state = fabric->path_state;
1953 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1954 u8 state = path_state & IPR_PATH_STATE_MASK;
1955
1956 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1957 if (path_active_desc[i].active != active)
1958 continue;
1959
1960 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1961 if (path_state_desc[j].state != state)
1962 continue;
1963
1964 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1965 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1966 path_active_desc[i].desc, path_state_desc[j].desc,
1967 fabric->ioa_port);
1968 } else if (fabric->cascaded_expander == 0xff) {
1969 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1970 path_active_desc[i].desc, path_state_desc[j].desc,
1971 fabric->ioa_port, fabric->phy);
1972 } else if (fabric->phy == 0xff) {
1973 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1974 path_active_desc[i].desc, path_state_desc[j].desc,
1975 fabric->ioa_port, fabric->cascaded_expander);
1976 } else {
1977 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1978 path_active_desc[i].desc, path_state_desc[j].desc,
1979 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1980 }
1981 return;
1982 }
1983 }
1984
1985 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1986 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1987}
1988
4565e370
WB
1989/**
1990 * ipr_log64_fabric_path - Log a fabric path error
1991 * @hostrcb: hostrcb struct
1992 * @fabric: fabric descriptor
1993 *
1994 * Return value:
1995 * none
1996 **/
1997static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1998 struct ipr_hostrcb64_fabric_desc *fabric)
1999{
2000 int i, j;
2001 u8 path_state = fabric->path_state;
2002 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2003 u8 state = path_state & IPR_PATH_STATE_MASK;
2004 char buffer[IPR_MAX_RES_PATH_LENGTH];
2005
2006 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2007 if (path_active_desc[i].active != active)
2008 continue;
2009
2010 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2011 if (path_state_desc[j].state != state)
2012 continue;
2013
2014 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2015 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2016 ipr_format_res_path(hostrcb->ioa_cfg,
2017 fabric->res_path,
2018 buffer, sizeof(buffer)));
4565e370
WB
2019 return;
2020 }
2021 }
2022
2023 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2024 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2025 buffer, sizeof(buffer)));
4565e370
WB
2026}
2027
49dc6a18
BK
2028static const struct {
2029 u8 type;
2030 char *desc;
2031} path_type_desc[] = {
2032 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2033 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2034 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2035 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2036};
2037
2038static const struct {
2039 u8 status;
2040 char *desc;
2041} path_status_desc[] = {
2042 { IPR_PATH_CFG_NO_PROB, "Functional" },
2043 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2044 { IPR_PATH_CFG_FAILED, "Failed" },
2045 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2046 { IPR_PATH_NOT_DETECTED, "Missing" },
2047 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2048};
2049
2050static const char *link_rate[] = {
2051 "unknown",
2052 "disabled",
2053 "phy reset problem",
2054 "spinup hold",
2055 "port selector",
2056 "unknown",
2057 "unknown",
2058 "unknown",
2059 "1.5Gbps",
2060 "3.0Gbps",
2061 "unknown",
2062 "unknown",
2063 "unknown",
2064 "unknown",
2065 "unknown",
2066 "unknown"
2067};
2068
2069/**
2070 * ipr_log_path_elem - Log a fabric path element.
2071 * @hostrcb: hostrcb struct
2072 * @cfg: fabric path element struct
2073 *
2074 * Return value:
2075 * none
2076 **/
2077static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2078 struct ipr_hostrcb_config_element *cfg)
2079{
2080 int i, j;
2081 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2082 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2083
2084 if (type == IPR_PATH_CFG_NOT_EXIST)
2085 return;
2086
2087 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2088 if (path_type_desc[i].type != type)
2089 continue;
2090
2091 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2092 if (path_status_desc[j].status != status)
2093 continue;
2094
2095 if (type == IPR_PATH_CFG_IOA_PORT) {
2096 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2097 path_status_desc[j].desc, path_type_desc[i].desc,
2098 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2099 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2100 } else {
2101 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2102 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc[j].desc, path_type_desc[i].desc,
2104 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2105 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2106 } else if (cfg->cascaded_expander == 0xff) {
2107 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2108 "WWN=%08X%08X\n", path_status_desc[j].desc,
2109 path_type_desc[i].desc, cfg->phy,
2110 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112 } else if (cfg->phy == 0xff) {
2113 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc[j].desc,
2115 path_type_desc[i].desc, cfg->cascaded_expander,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else {
2119 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124 }
2125 }
2126 return;
2127 }
2128 }
2129
2130 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2131 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2132 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2133 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2134}
2135
4565e370
WB
2136/**
2137 * ipr_log64_path_elem - Log a fabric path element.
2138 * @hostrcb: hostrcb struct
2139 * @cfg: fabric path element struct
2140 *
2141 * Return value:
2142 * none
2143 **/
2144static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2145 struct ipr_hostrcb64_config_element *cfg)
2146{
2147 int i, j;
2148 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2149 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2150 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2151 char buffer[IPR_MAX_RES_PATH_LENGTH];
2152
2153 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2154 return;
2155
2156 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2157 if (path_type_desc[i].type != type)
2158 continue;
2159
2160 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2161 if (path_status_desc[j].status != status)
2162 continue;
2163
2164 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2165 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2166 ipr_format_res_path(hostrcb->ioa_cfg,
2167 cfg->res_path, buffer, sizeof(buffer)),
2168 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2169 be32_to_cpu(cfg->wwid[0]),
2170 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2171 return;
2172 }
2173 }
2174 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2175 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2176 ipr_format_res_path(hostrcb->ioa_cfg,
2177 cfg->res_path, buffer, sizeof(buffer)),
2178 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2179 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2180}
2181
49dc6a18
BK
2182/**
2183 * ipr_log_fabric_error - Log a fabric error.
2184 * @ioa_cfg: ioa config struct
2185 * @hostrcb: hostrcb struct
2186 *
2187 * Return value:
2188 * none
2189 **/
2190static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2191 struct ipr_hostrcb *hostrcb)
2192{
2193 struct ipr_hostrcb_type_20_error *error;
2194 struct ipr_hostrcb_fabric_desc *fabric;
2195 struct ipr_hostrcb_config_element *cfg;
2196 int i, add_len;
2197
2198 error = &hostrcb->hcam.u.error.u.type_20_error;
2199 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2200 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2201
2202 add_len = be32_to_cpu(hostrcb->hcam.length) -
2203 (offsetof(struct ipr_hostrcb_error, u) +
2204 offsetof(struct ipr_hostrcb_type_20_error, desc));
2205
2206 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2207 ipr_log_fabric_path(hostrcb, fabric);
2208 for_each_fabric_cfg(fabric, cfg)
2209 ipr_log_path_elem(hostrcb, cfg);
2210
2211 add_len -= be16_to_cpu(fabric->length);
2212 fabric = (struct ipr_hostrcb_fabric_desc *)
2213 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2214 }
2215
ac719aba 2216 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2217}
2218
4565e370
WB
2219/**
2220 * ipr_log_sis64_array_error - Log a sis64 array error.
2221 * @ioa_cfg: ioa config struct
2222 * @hostrcb: hostrcb struct
2223 *
2224 * Return value:
2225 * none
2226 **/
2227static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2228 struct ipr_hostrcb *hostrcb)
2229{
2230 int i, num_entries;
2231 struct ipr_hostrcb_type_24_error *error;
2232 struct ipr_hostrcb64_array_data_entry *array_entry;
2233 char buffer[IPR_MAX_RES_PATH_LENGTH];
2234 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2235
2236 error = &hostrcb->hcam.u.error64.u.type_24_error;
2237
2238 ipr_err_separator;
2239
2240 ipr_err("RAID %s Array Configuration: %s\n",
2241 error->protection_level,
b3b3b407
BK
2242 ipr_format_res_path(ioa_cfg, error->last_res_path,
2243 buffer, sizeof(buffer)));
4565e370
WB
2244
2245 ipr_err_separator;
2246
2247 array_entry = error->array_member;
7262026f
WB
2248 num_entries = min_t(u32, error->num_entries,
2249 ARRAY_SIZE(error->array_member));
4565e370
WB
2250
2251 for (i = 0; i < num_entries; i++, array_entry++) {
2252
2253 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2254 continue;
2255
2256 if (error->exposed_mode_adn == i)
2257 ipr_err("Exposed Array Member %d:\n", i);
2258 else
2259 ipr_err("Array Member %d:\n", i);
2260
2261 ipr_err("Array Member %d:\n", i);
2262 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2263 ipr_err("Current Location: %s\n",
b3b3b407
BK
2264 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2265 buffer, sizeof(buffer)));
7262026f 2266 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2267 ipr_format_res_path(ioa_cfg,
2268 array_entry->expected_res_path,
2269 buffer, sizeof(buffer)));
4565e370
WB
2270
2271 ipr_err_separator;
2272 }
2273}
2274
2275/**
2276 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2277 * @ioa_cfg: ioa config struct
2278 * @hostrcb: hostrcb struct
2279 *
2280 * Return value:
2281 * none
2282 **/
2283static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2284 struct ipr_hostrcb *hostrcb)
2285{
2286 struct ipr_hostrcb_type_30_error *error;
2287 struct ipr_hostrcb64_fabric_desc *fabric;
2288 struct ipr_hostrcb64_config_element *cfg;
2289 int i, add_len;
2290
2291 error = &hostrcb->hcam.u.error64.u.type_30_error;
2292
2293 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2294 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2295
2296 add_len = be32_to_cpu(hostrcb->hcam.length) -
2297 (offsetof(struct ipr_hostrcb64_error, u) +
2298 offsetof(struct ipr_hostrcb_type_30_error, desc));
2299
2300 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2301 ipr_log64_fabric_path(hostrcb, fabric);
2302 for_each_fabric_cfg(fabric, cfg)
2303 ipr_log64_path_elem(hostrcb, cfg);
2304
2305 add_len -= be16_to_cpu(fabric->length);
2306 fabric = (struct ipr_hostrcb64_fabric_desc *)
2307 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2308 }
2309
2310 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2311}
2312
b0df54bb
BK
2313/**
2314 * ipr_log_generic_error - Log an adapter error.
2315 * @ioa_cfg: ioa config struct
2316 * @hostrcb: hostrcb struct
2317 *
2318 * Return value:
2319 * none
2320 **/
2321static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2322 struct ipr_hostrcb *hostrcb)
2323{
ac719aba 2324 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2325 be32_to_cpu(hostrcb->hcam.length));
2326}
2327
1da177e4
LT
2328/**
2329 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2330 * @ioasc: IOASC
2331 *
2332 * This function will return the index of into the ipr_error_table
2333 * for the specified IOASC. If the IOASC is not in the table,
2334 * 0 will be returned, which points to the entry used for unknown errors.
2335 *
2336 * Return value:
2337 * index into the ipr_error_table
2338 **/
2339static u32 ipr_get_error(u32 ioasc)
2340{
2341 int i;
2342
2343 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2344 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2345 return i;
2346
2347 return 0;
2348}
2349
2350/**
2351 * ipr_handle_log_data - Log an adapter error.
2352 * @ioa_cfg: ioa config struct
2353 * @hostrcb: hostrcb struct
2354 *
2355 * This function logs an adapter error to the system.
2356 *
2357 * Return value:
2358 * none
2359 **/
2360static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2361 struct ipr_hostrcb *hostrcb)
2362{
2363 u32 ioasc;
2364 int error_index;
2365
2366 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2367 return;
2368
2369 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2370 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2371
4565e370
WB
2372 if (ioa_cfg->sis64)
2373 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2374 else
2375 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2376
4565e370
WB
2377 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2378 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2379 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2380 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2381 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2382 }
2383
2384 error_index = ipr_get_error(ioasc);
2385
2386 if (!ipr_error_table[error_index].log_hcam)
2387 return;
2388
49dc6a18 2389 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2390
2391 /* Set indication we have logged an error */
2392 ioa_cfg->errors_logged++;
2393
933916f3 2394 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2395 return;
cf852037
BK
2396 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2397 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2398
2399 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2400 case IPR_HOST_RCB_OVERLAY_ID_2:
2401 ipr_log_cache_error(ioa_cfg, hostrcb);
2402 break;
2403 case IPR_HOST_RCB_OVERLAY_ID_3:
2404 ipr_log_config_error(ioa_cfg, hostrcb);
2405 break;
2406 case IPR_HOST_RCB_OVERLAY_ID_4:
2407 case IPR_HOST_RCB_OVERLAY_ID_6:
2408 ipr_log_array_error(ioa_cfg, hostrcb);
2409 break;
b0df54bb
BK
2410 case IPR_HOST_RCB_OVERLAY_ID_7:
2411 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2412 break;
ee0f05b8
BK
2413 case IPR_HOST_RCB_OVERLAY_ID_12:
2414 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2415 break;
2416 case IPR_HOST_RCB_OVERLAY_ID_13:
2417 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_14:
2420 case IPR_HOST_RCB_OVERLAY_ID_16:
2421 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2422 break;
2423 case IPR_HOST_RCB_OVERLAY_ID_17:
2424 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2425 break;
49dc6a18
BK
2426 case IPR_HOST_RCB_OVERLAY_ID_20:
2427 ipr_log_fabric_error(ioa_cfg, hostrcb);
2428 break;
4565e370
WB
2429 case IPR_HOST_RCB_OVERLAY_ID_23:
2430 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2431 break;
2432 case IPR_HOST_RCB_OVERLAY_ID_24:
2433 case IPR_HOST_RCB_OVERLAY_ID_26:
2434 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2435 break;
2436 case IPR_HOST_RCB_OVERLAY_ID_30:
2437 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2438 break;
cf852037 2439 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2440 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2441 default:
a9cfca96 2442 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2443 break;
2444 }
2445}
2446
2447/**
2448 * ipr_process_error - Op done function for an adapter error log.
2449 * @ipr_cmd: ipr command struct
2450 *
2451 * This function is the op done function for an error log host
2452 * controlled async from the adapter. It will log the error and
2453 * send the HCAM back to the adapter.
2454 *
2455 * Return value:
2456 * none
2457 **/
2458static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2459{
2460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2461 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2462 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2463 u32 fd_ioasc;
2464
2465 if (ioa_cfg->sis64)
2466 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2467 else
2468 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2469
2470 list_del(&hostrcb->queue);
05a6538a 2471 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2472
2473 if (!ioasc) {
2474 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2475 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2476 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2477 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2478 dev_err(&ioa_cfg->pdev->dev,
2479 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2480 }
2481
2482 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2483}
2484
2485/**
2486 * ipr_timeout - An internally generated op has timed out.
2487 * @ipr_cmd: ipr command struct
2488 *
2489 * This function blocks host requests and initiates an
2490 * adapter reset.
2491 *
2492 * Return value:
2493 * none
2494 **/
2495static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2496{
2497 unsigned long lock_flags = 0;
2498 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2499
2500 ENTER;
2501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2502
2503 ioa_cfg->errors_logged++;
2504 dev_err(&ioa_cfg->pdev->dev,
2505 "Adapter being reset due to command timeout.\n");
2506
2507 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2508 ioa_cfg->sdt_state = GET_DUMP;
2509
2510 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2511 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2512
2513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 LEAVE;
2515}
2516
2517/**
2518 * ipr_oper_timeout - Adapter timed out transitioning to operational
2519 * @ipr_cmd: ipr command struct
2520 *
2521 * This function blocks host requests and initiates an
2522 * adapter reset.
2523 *
2524 * Return value:
2525 * none
2526 **/
2527static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2528{
2529 unsigned long lock_flags = 0;
2530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2531
2532 ENTER;
2533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2534
2535 ioa_cfg->errors_logged++;
2536 dev_err(&ioa_cfg->pdev->dev,
2537 "Adapter timed out transitioning to operational.\n");
2538
2539 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2540 ioa_cfg->sdt_state = GET_DUMP;
2541
2542 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2543 if (ipr_fastfail)
2544 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2545 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2546 }
2547
2548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2549 LEAVE;
2550}
2551
2552/**
2553 * ipr_reset_reload - Reset/Reload the IOA
2554 * @ioa_cfg: ioa config struct
2555 * @shutdown_type: shutdown type
2556 *
2557 * This function resets the adapter and re-initializes it.
2558 * This function assumes that all new host commands have been stopped.
2559 * Return value:
2560 * SUCCESS / FAILED
2561 **/
2562static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2563 enum ipr_shutdown_type shutdown_type)
2564{
2565 if (!ioa_cfg->in_reset_reload)
2566 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2567
2568 spin_unlock_irq(ioa_cfg->host->host_lock);
2569 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2570 spin_lock_irq(ioa_cfg->host->host_lock);
2571
2572 /* If we got hit with a host reset while we were already resetting
2573 the adapter for some reason, and the reset failed. */
56d6aa33 2574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
2575 ipr_trace;
2576 return FAILED;
2577 }
2578
2579 return SUCCESS;
2580}
2581
2582/**
2583 * ipr_find_ses_entry - Find matching SES in SES table
2584 * @res: resource entry struct of SES
2585 *
2586 * Return value:
2587 * pointer to SES table entry / NULL on failure
2588 **/
2589static const struct ipr_ses_table_entry *
2590ipr_find_ses_entry(struct ipr_resource_entry *res)
2591{
2592 int i, j, matches;
3e7ebdfa 2593 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2594 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2595
2596 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2597 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2598 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2599 vpids = &res->std_inq_data.vpids;
2600 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2601 matches++;
2602 else
2603 break;
2604 } else
2605 matches++;
2606 }
2607
2608 if (matches == IPR_PROD_ID_LEN)
2609 return ste;
2610 }
2611
2612 return NULL;
2613}
2614
2615/**
2616 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2617 * @ioa_cfg: ioa config struct
2618 * @bus: SCSI bus
2619 * @bus_width: bus width
2620 *
2621 * Return value:
2622 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2623 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2624 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2625 * max 160MHz = max 320MB/sec).
2626 **/
2627static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2628{
2629 struct ipr_resource_entry *res;
2630 const struct ipr_ses_table_entry *ste;
2631 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2632
2633 /* Loop through each config table entry in the config table buffer */
2634 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2635 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2636 continue;
2637
3e7ebdfa 2638 if (bus != res->bus)
1da177e4
LT
2639 continue;
2640
2641 if (!(ste = ipr_find_ses_entry(res)))
2642 continue;
2643
2644 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2645 }
2646
2647 return max_xfer_rate;
2648}
2649
2650/**
2651 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2652 * @ioa_cfg: ioa config struct
2653 * @max_delay: max delay in micro-seconds to wait
2654 *
2655 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2656 *
2657 * Return value:
2658 * 0 on success / other on failure
2659 **/
2660static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2661{
2662 volatile u32 pcii_reg;
2663 int delay = 1;
2664
2665 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2666 while (delay < max_delay) {
2667 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2668
2669 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2670 return 0;
2671
2672 /* udelay cannot be used if delay is more than a few milliseconds */
2673 if ((delay / 1000) > MAX_UDELAY_MS)
2674 mdelay(delay / 1000);
2675 else
2676 udelay(delay);
2677
2678 delay += delay;
2679 }
2680 return -EIO;
2681}
2682
dcbad00e
WB
2683/**
2684 * ipr_get_sis64_dump_data_section - Dump IOA memory
2685 * @ioa_cfg: ioa config struct
2686 * @start_addr: adapter address to dump
2687 * @dest: destination kernel buffer
2688 * @length_in_words: length to dump in 4 byte words
2689 *
2690 * Return value:
2691 * 0 on success
2692 **/
2693static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2694 u32 start_addr,
2695 __be32 *dest, u32 length_in_words)
2696{
2697 int i;
2698
2699 for (i = 0; i < length_in_words; i++) {
2700 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2701 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2702 dest++;
2703 }
2704
2705 return 0;
2706}
2707
1da177e4
LT
2708/**
2709 * ipr_get_ldump_data_section - Dump IOA memory
2710 * @ioa_cfg: ioa config struct
2711 * @start_addr: adapter address to dump
2712 * @dest: destination kernel buffer
2713 * @length_in_words: length to dump in 4 byte words
2714 *
2715 * Return value:
2716 * 0 on success / -EIO on failure
2717 **/
2718static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2719 u32 start_addr,
2720 __be32 *dest, u32 length_in_words)
2721{
2722 volatile u32 temp_pcii_reg;
2723 int i, delay = 0;
2724
dcbad00e
WB
2725 if (ioa_cfg->sis64)
2726 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2727 dest, length_in_words);
2728
1da177e4
LT
2729 /* Write IOA interrupt reg starting LDUMP state */
2730 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2731 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2732
2733 /* Wait for IO debug acknowledge */
2734 if (ipr_wait_iodbg_ack(ioa_cfg,
2735 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2736 dev_err(&ioa_cfg->pdev->dev,
2737 "IOA dump long data transfer timeout\n");
2738 return -EIO;
2739 }
2740
2741 /* Signal LDUMP interlocked - clear IO debug ack */
2742 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2743 ioa_cfg->regs.clr_interrupt_reg);
2744
2745 /* Write Mailbox with starting address */
2746 writel(start_addr, ioa_cfg->ioa_mailbox);
2747
2748 /* Signal address valid - clear IOA Reset alert */
2749 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2750 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2751
2752 for (i = 0; i < length_in_words; i++) {
2753 /* Wait for IO debug acknowledge */
2754 if (ipr_wait_iodbg_ack(ioa_cfg,
2755 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2756 dev_err(&ioa_cfg->pdev->dev,
2757 "IOA dump short data transfer timeout\n");
2758 return -EIO;
2759 }
2760
2761 /* Read data from mailbox and increment destination pointer */
2762 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2763 dest++;
2764
2765 /* For all but the last word of data, signal data received */
2766 if (i < (length_in_words - 1)) {
2767 /* Signal dump data received - Clear IO debug Ack */
2768 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2769 ioa_cfg->regs.clr_interrupt_reg);
2770 }
2771 }
2772
2773 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2774 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2775 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2776
2777 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2778 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2779
2780 /* Signal dump data received - Clear IO debug Ack */
2781 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2782 ioa_cfg->regs.clr_interrupt_reg);
2783
2784 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2785 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2786 temp_pcii_reg =
214777ba 2787 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2788
2789 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2790 return 0;
2791
2792 udelay(10);
2793 delay += 10;
2794 }
2795
2796 return 0;
2797}
2798
2799#ifdef CONFIG_SCSI_IPR_DUMP
2800/**
2801 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2802 * @ioa_cfg: ioa config struct
2803 * @pci_address: adapter address
2804 * @length: length of data to copy
2805 *
2806 * Copy data from PCI adapter to kernel buffer.
2807 * Note: length MUST be a 4 byte multiple
2808 * Return value:
2809 * 0 on success / other on failure
2810 **/
2811static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2812 unsigned long pci_address, u32 length)
2813{
2814 int bytes_copied = 0;
4d4dd706 2815 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2816 __be32 *page;
2817 unsigned long lock_flags = 0;
2818 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2819
4d4dd706
KSS
2820 if (ioa_cfg->sis64)
2821 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2822 else
2823 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2824
1da177e4 2825 while (bytes_copied < length &&
4d4dd706 2826 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2827 if (ioa_dump->page_offset >= PAGE_SIZE ||
2828 ioa_dump->page_offset == 0) {
2829 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2830
2831 if (!page) {
2832 ipr_trace;
2833 return bytes_copied;
2834 }
2835
2836 ioa_dump->page_offset = 0;
2837 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2838 ioa_dump->next_page_index++;
2839 } else
2840 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2841
2842 rem_len = length - bytes_copied;
2843 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2844 cur_len = min(rem_len, rem_page_len);
2845
2846 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2847 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2848 rc = -EIO;
2849 } else {
2850 rc = ipr_get_ldump_data_section(ioa_cfg,
2851 pci_address + bytes_copied,
2852 &page[ioa_dump->page_offset / 4],
2853 (cur_len / sizeof(u32)));
2854 }
2855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2856
2857 if (!rc) {
2858 ioa_dump->page_offset += cur_len;
2859 bytes_copied += cur_len;
2860 } else {
2861 ipr_trace;
2862 break;
2863 }
2864 schedule();
2865 }
2866
2867 return bytes_copied;
2868}
2869
2870/**
2871 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2872 * @hdr: dump entry header struct
2873 *
2874 * Return value:
2875 * nothing
2876 **/
2877static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2878{
2879 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2880 hdr->num_elems = 1;
2881 hdr->offset = sizeof(*hdr);
2882 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2883}
2884
2885/**
2886 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2887 * @ioa_cfg: ioa config struct
2888 * @driver_dump: driver dump struct
2889 *
2890 * Return value:
2891 * nothing
2892 **/
2893static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2894 struct ipr_driver_dump *driver_dump)
2895{
2896 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2897
2898 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2899 driver_dump->ioa_type_entry.hdr.len =
2900 sizeof(struct ipr_dump_ioa_type_entry) -
2901 sizeof(struct ipr_dump_entry_header);
2902 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2903 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2904 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2905 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2906 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2907 ucode_vpd->minor_release[1];
2908 driver_dump->hdr.num_entries++;
2909}
2910
2911/**
2912 * ipr_dump_version_data - Fill in the driver version in the dump.
2913 * @ioa_cfg: ioa config struct
2914 * @driver_dump: driver dump struct
2915 *
2916 * Return value:
2917 * nothing
2918 **/
2919static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2920 struct ipr_driver_dump *driver_dump)
2921{
2922 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2923 driver_dump->version_entry.hdr.len =
2924 sizeof(struct ipr_dump_version_entry) -
2925 sizeof(struct ipr_dump_entry_header);
2926 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2927 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2928 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2929 driver_dump->hdr.num_entries++;
2930}
2931
2932/**
2933 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2934 * @ioa_cfg: ioa config struct
2935 * @driver_dump: driver dump struct
2936 *
2937 * Return value:
2938 * nothing
2939 **/
2940static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2941 struct ipr_driver_dump *driver_dump)
2942{
2943 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2944 driver_dump->trace_entry.hdr.len =
2945 sizeof(struct ipr_dump_trace_entry) -
2946 sizeof(struct ipr_dump_entry_header);
2947 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2948 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2949 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2950 driver_dump->hdr.num_entries++;
2951}
2952
2953/**
2954 * ipr_dump_location_data - Fill in the IOA location in the dump.
2955 * @ioa_cfg: ioa config struct
2956 * @driver_dump: driver dump struct
2957 *
2958 * Return value:
2959 * nothing
2960 **/
2961static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2962 struct ipr_driver_dump *driver_dump)
2963{
2964 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2965 driver_dump->location_entry.hdr.len =
2966 sizeof(struct ipr_dump_location_entry) -
2967 sizeof(struct ipr_dump_entry_header);
2968 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2969 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2970 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2971 driver_dump->hdr.num_entries++;
2972}
2973
2974/**
2975 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2976 * @ioa_cfg: ioa config struct
2977 * @dump: dump struct
2978 *
2979 * Return value:
2980 * nothing
2981 **/
2982static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2983{
2984 unsigned long start_addr, sdt_word;
2985 unsigned long lock_flags = 0;
2986 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2987 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
2988 u32 num_entries, max_num_entries, start_off, end_off;
2989 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 2990 struct ipr_sdt *sdt;
dcbad00e 2991 int valid = 1;
1da177e4
LT
2992 int i;
2993
2994 ENTER;
2995
2996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2997
41e9a696 2998 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
2999 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3000 return;
3001 }
3002
110def85
WB
3003 if (ioa_cfg->sis64) {
3004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3005 ssleep(IPR_DUMP_DELAY_SECONDS);
3006 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3007 }
3008
1da177e4
LT
3009 start_addr = readl(ioa_cfg->ioa_mailbox);
3010
dcbad00e 3011 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3012 dev_err(&ioa_cfg->pdev->dev,
3013 "Invalid dump table format: %lx\n", start_addr);
3014 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3015 return;
3016 }
3017
3018 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3019
3020 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3021
3022 /* Initialize the overall dump header */
3023 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3024 driver_dump->hdr.num_entries = 1;
3025 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3026 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3027 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3028 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3029
3030 ipr_dump_version_data(ioa_cfg, driver_dump);
3031 ipr_dump_location_data(ioa_cfg, driver_dump);
3032 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3033 ipr_dump_trace_data(ioa_cfg, driver_dump);
3034
3035 /* Update dump_header */
3036 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3037
3038 /* IOA Dump entry */
3039 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3040 ioa_dump->hdr.len = 0;
3041 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3042 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3043
3044 /* First entries in sdt are actually a list of dump addresses and
3045 lengths to gather the real dump data. sdt represents the pointer
3046 to the ioa generated dump table. Dump data will be extracted based
3047 on entries in this table */
3048 sdt = &ioa_dump->sdt;
3049
4d4dd706
KSS
3050 if (ioa_cfg->sis64) {
3051 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3052 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3053 } else {
3054 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3055 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3056 }
3057
3058 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3059 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3060 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3061 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3062
3063 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3064 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3065 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3066 dev_err(&ioa_cfg->pdev->dev,
3067 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3068 rc, be32_to_cpu(sdt->hdr.state));
3069 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3070 ioa_cfg->sdt_state = DUMP_OBTAINED;
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 return;
3073 }
3074
3075 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3076
4d4dd706
KSS
3077 if (num_entries > max_num_entries)
3078 num_entries = max_num_entries;
3079
3080 /* Update dump length to the actual data to be copied */
3081 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3082 if (ioa_cfg->sis64)
3083 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3084 else
3085 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3086
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3088
3089 for (i = 0; i < num_entries; i++) {
4d4dd706 3090 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3091 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3092 break;
3093 }
3094
3095 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3096 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3097 if (ioa_cfg->sis64)
3098 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3099 else {
3100 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3101 end_off = be32_to_cpu(sdt->entry[i].end_token);
3102
3103 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3104 bytes_to_copy = end_off - start_off;
3105 else
3106 valid = 0;
3107 }
3108 if (valid) {
4d4dd706 3109 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3110 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3111 continue;
3112 }
3113
3114 /* Copy data from adapter to driver buffers */
3115 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3116 bytes_to_copy);
3117
3118 ioa_dump->hdr.len += bytes_copied;
3119
3120 if (bytes_copied != bytes_to_copy) {
3121 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3122 break;
3123 }
3124 }
3125 }
3126 }
3127
3128 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3129
3130 /* Update dump_header */
3131 driver_dump->hdr.len += ioa_dump->hdr.len;
3132 wmb();
3133 ioa_cfg->sdt_state = DUMP_OBTAINED;
3134 LEAVE;
3135}
3136
3137#else
203fa3fe 3138#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3139#endif
3140
3141/**
3142 * ipr_release_dump - Free adapter dump memory
3143 * @kref: kref struct
3144 *
3145 * Return value:
3146 * nothing
3147 **/
3148static void ipr_release_dump(struct kref *kref)
3149{
203fa3fe 3150 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3151 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3152 unsigned long lock_flags = 0;
3153 int i;
3154
3155 ENTER;
3156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3157 ioa_cfg->dump = NULL;
3158 ioa_cfg->sdt_state = INACTIVE;
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160
3161 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3162 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3163
4d4dd706 3164 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3165 kfree(dump);
3166 LEAVE;
3167}
3168
3169/**
3170 * ipr_worker_thread - Worker thread
c4028958 3171 * @work: ioa config struct
1da177e4
LT
3172 *
3173 * Called at task level from a work thread. This function takes care
3174 * of adding and removing device from the mid-layer as configuration
3175 * changes are detected by the adapter.
3176 *
3177 * Return value:
3178 * nothing
3179 **/
c4028958 3180static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3181{
3182 unsigned long lock_flags;
3183 struct ipr_resource_entry *res;
3184 struct scsi_device *sdev;
3185 struct ipr_dump *dump;
c4028958
DH
3186 struct ipr_ioa_cfg *ioa_cfg =
3187 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3188 u8 bus, target, lun;
3189 int did_work;
3190
3191 ENTER;
3192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3193
41e9a696 3194 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3195 dump = ioa_cfg->dump;
3196 if (!dump) {
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 return;
3199 }
3200 kref_get(&dump->kref);
3201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 ipr_get_ioa_dump(ioa_cfg, dump);
3203 kref_put(&dump->kref, ipr_release_dump);
3204
3205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3206 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3207 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3209 return;
3210 }
3211
3212restart:
3213 do {
3214 did_work = 0;
56d6aa33 3215 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3216 !ioa_cfg->allow_ml_add_del) {
1da177e4
LT
3217 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3218 return;
3219 }
3220
3221 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3222 if (res->del_from_ml && res->sdev) {
3223 did_work = 1;
3224 sdev = res->sdev;
3225 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3226 if (!res->add_to_ml)
3227 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3228 else
3229 res->del_from_ml = 0;
1da177e4
LT
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231 scsi_remove_device(sdev);
3232 scsi_device_put(sdev);
3233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3234 }
3235 break;
3236 }
3237 }
203fa3fe 3238 } while (did_work);
1da177e4
LT
3239
3240 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3241 if (res->add_to_ml) {
3e7ebdfa
WB
3242 bus = res->bus;
3243 target = res->target;
3244 lun = res->lun;
1121b794 3245 res->add_to_ml = 0;
1da177e4
LT
3246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3247 scsi_add_device(ioa_cfg->host, bus, target, lun);
3248 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3249 goto restart;
3250 }
3251 }
3252
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3254 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3255 LEAVE;
3256}
3257
3258#ifdef CONFIG_SCSI_IPR_TRACE
3259/**
3260 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3261 * @filp: open sysfs file
1da177e4 3262 * @kobj: kobject struct
91a69029 3263 * @bin_attr: bin_attribute struct
1da177e4
LT
3264 * @buf: buffer
3265 * @off: offset
3266 * @count: buffer size
3267 *
3268 * Return value:
3269 * number of bytes printed to buffer
3270 **/
2c3c8bea 3271static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3272 struct bin_attribute *bin_attr,
3273 char *buf, loff_t off, size_t count)
1da177e4 3274{
ee959b00
TJ
3275 struct device *dev = container_of(kobj, struct device, kobj);
3276 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3278 unsigned long lock_flags = 0;
d777aaf3 3279 ssize_t ret;
1da177e4
LT
3280
3281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3282 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3283 IPR_TRACE_SIZE);
1da177e4 3284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3285
3286 return ret;
1da177e4
LT
3287}
3288
3289static struct bin_attribute ipr_trace_attr = {
3290 .attr = {
3291 .name = "trace",
3292 .mode = S_IRUGO,
3293 },
3294 .size = 0,
3295 .read = ipr_read_trace,
3296};
3297#endif
3298
3299/**
3300 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3301 * @dev: class device struct
3302 * @buf: buffer
1da177e4
LT
3303 *
3304 * Return value:
3305 * number of bytes printed to buffer
3306 **/
ee959b00
TJ
3307static ssize_t ipr_show_fw_version(struct device *dev,
3308 struct device_attribute *attr, char *buf)
1da177e4 3309{
ee959b00 3310 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3311 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3312 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3313 unsigned long lock_flags = 0;
3314 int len;
3315
3316 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3317 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3318 ucode_vpd->major_release, ucode_vpd->card_type,
3319 ucode_vpd->minor_release[0],
3320 ucode_vpd->minor_release[1]);
3321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3322 return len;
3323}
3324
ee959b00 3325static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3326 .attr = {
3327 .name = "fw_version",
3328 .mode = S_IRUGO,
3329 },
3330 .show = ipr_show_fw_version,
3331};
3332
3333/**
3334 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3335 * @dev: class device struct
3336 * @buf: buffer
1da177e4
LT
3337 *
3338 * Return value:
3339 * number of bytes printed to buffer
3340 **/
ee959b00
TJ
3341static ssize_t ipr_show_log_level(struct device *dev,
3342 struct device_attribute *attr, char *buf)
1da177e4 3343{
ee959b00 3344 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3345 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3346 unsigned long lock_flags = 0;
3347 int len;
3348
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352 return len;
3353}
3354
3355/**
3356 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3357 * @dev: class device struct
3358 * @buf: buffer
1da177e4
LT
3359 *
3360 * Return value:
3361 * number of bytes printed to buffer
3362 **/
ee959b00 3363static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3364 struct device_attribute *attr,
1da177e4
LT
3365 const char *buf, size_t count)
3366{
ee959b00 3367 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3368 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3369 unsigned long lock_flags = 0;
3370
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 return strlen(buf);
3375}
3376
ee959b00 3377static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3378 .attr = {
3379 .name = "log_level",
3380 .mode = S_IRUGO | S_IWUSR,
3381 },
3382 .show = ipr_show_log_level,
3383 .store = ipr_store_log_level
3384};
3385
3386/**
3387 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3388 * @dev: device struct
3389 * @buf: buffer
3390 * @count: buffer size
1da177e4
LT
3391 *
3392 * This function will reset the adapter and wait a reasonable
3393 * amount of time for any errors that the adapter might log.
3394 *
3395 * Return value:
3396 * count on success / other on failure
3397 **/
ee959b00
TJ
3398static ssize_t ipr_store_diagnostics(struct device *dev,
3399 struct device_attribute *attr,
1da177e4
LT
3400 const char *buf, size_t count)
3401{
ee959b00 3402 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3403 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3404 unsigned long lock_flags = 0;
3405 int rc = count;
3406
3407 if (!capable(CAP_SYS_ADMIN))
3408 return -EACCES;
3409
1da177e4 3410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3411 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415 }
3416
1da177e4
LT
3417 ioa_cfg->errors_logged = 0;
3418 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3419
3420 if (ioa_cfg->in_reset_reload) {
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3423
3424 /* Wait for a second for any errors to be logged */
3425 msleep(1000);
3426 } else {
3427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3428 return -EIO;
3429 }
3430
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3433 rc = -EIO;
3434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435
3436 return rc;
3437}
3438
ee959b00 3439static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3440 .attr = {
3441 .name = "run_diagnostics",
3442 .mode = S_IWUSR,
3443 },
3444 .store = ipr_store_diagnostics
3445};
3446
f37eb54b
BK
3447/**
3448 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3449 * @class_dev: device struct
3450 * @buf: buffer
f37eb54b
BK
3451 *
3452 * Return value:
3453 * number of bytes printed to buffer
3454 **/
ee959b00
TJ
3455static ssize_t ipr_show_adapter_state(struct device *dev,
3456 struct device_attribute *attr, char *buf)
f37eb54b 3457{
ee959b00 3458 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3459 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3460 unsigned long lock_flags = 0;
3461 int len;
3462
3463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3464 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3465 len = snprintf(buf, PAGE_SIZE, "offline\n");
3466 else
3467 len = snprintf(buf, PAGE_SIZE, "online\n");
3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469 return len;
3470}
3471
3472/**
3473 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3474 * @dev: device struct
3475 * @buf: buffer
3476 * @count: buffer size
f37eb54b
BK
3477 *
3478 * This function will change the adapter's state.
3479 *
3480 * Return value:
3481 * count on success / other on failure
3482 **/
ee959b00
TJ
3483static ssize_t ipr_store_adapter_state(struct device *dev,
3484 struct device_attribute *attr,
f37eb54b
BK
3485 const char *buf, size_t count)
3486{
ee959b00 3487 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3488 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3489 unsigned long lock_flags;
56d6aa33 3490 int result = count, i;
f37eb54b
BK
3491
3492 if (!capable(CAP_SYS_ADMIN))
3493 return -EACCES;
3494
3495 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3496 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3497 !strncmp(buf, "online", 6)) {
3498 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3499 spin_lock(&ioa_cfg->hrrq[i]._lock);
3500 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3501 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3502 }
3503 wmb();
f37eb54b
BK
3504 ioa_cfg->reset_retries = 0;
3505 ioa_cfg->in_ioa_bringdown = 0;
3506 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3507 }
3508 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3509 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3510
3511 return result;
3512}
3513
ee959b00 3514static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3515 .attr = {
49dd0961 3516 .name = "online_state",
f37eb54b
BK
3517 .mode = S_IRUGO | S_IWUSR,
3518 },
3519 .show = ipr_show_adapter_state,
3520 .store = ipr_store_adapter_state
3521};
3522
1da177e4
LT
3523/**
3524 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3525 * @dev: device struct
3526 * @buf: buffer
3527 * @count: buffer size
1da177e4
LT
3528 *
3529 * This function will reset the adapter.
3530 *
3531 * Return value:
3532 * count on success / other on failure
3533 **/
ee959b00
TJ
3534static ssize_t ipr_store_reset_adapter(struct device *dev,
3535 struct device_attribute *attr,
1da177e4
LT
3536 const char *buf, size_t count)
3537{
ee959b00 3538 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3539 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3540 unsigned long lock_flags;
3541 int result = count;
3542
3543 if (!capable(CAP_SYS_ADMIN))
3544 return -EACCES;
3545
3546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3547 if (!ioa_cfg->in_reset_reload)
3548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3550 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3551
3552 return result;
3553}
3554
ee959b00 3555static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3556 .attr = {
3557 .name = "reset_host",
3558 .mode = S_IWUSR,
3559 },
3560 .store = ipr_store_reset_adapter
3561};
3562
3563/**
3564 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3565 * @buf_len: buffer length
3566 *
3567 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3568 * list to use for microcode download
3569 *
3570 * Return value:
3571 * pointer to sglist / NULL on failure
3572 **/
3573static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3574{
3575 int sg_size, order, bsize_elem, num_elem, i, j;
3576 struct ipr_sglist *sglist;
3577 struct scatterlist *scatterlist;
3578 struct page *page;
3579
3580 /* Get the minimum size per scatter/gather element */
3581 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3582
3583 /* Get the actual size per element */
3584 order = get_order(sg_size);
3585
3586 /* Determine the actual number of bytes per element */
3587 bsize_elem = PAGE_SIZE * (1 << order);
3588
3589 /* Determine the actual number of sg entries needed */
3590 if (buf_len % bsize_elem)
3591 num_elem = (buf_len / bsize_elem) + 1;
3592 else
3593 num_elem = buf_len / bsize_elem;
3594
3595 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3596 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3597 (sizeof(struct scatterlist) * (num_elem - 1)),
3598 GFP_KERNEL);
3599
3600 if (sglist == NULL) {
3601 ipr_trace;
3602 return NULL;
3603 }
3604
1da177e4 3605 scatterlist = sglist->scatterlist;
45711f1a 3606 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3607
3608 sglist->order = order;
3609 sglist->num_sg = num_elem;
3610
3611 /* Allocate a bunch of sg elements */
3612 for (i = 0; i < num_elem; i++) {
3613 page = alloc_pages(GFP_KERNEL, order);
3614 if (!page) {
3615 ipr_trace;
3616
3617 /* Free up what we already allocated */
3618 for (j = i - 1; j >= 0; j--)
45711f1a 3619 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3620 kfree(sglist);
3621 return NULL;
3622 }
3623
642f1490 3624 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3625 }
3626
3627 return sglist;
3628}
3629
3630/**
3631 * ipr_free_ucode_buffer - Frees a microcode download buffer
3632 * @p_dnld: scatter/gather list pointer
3633 *
3634 * Free a DMA'able ucode download buffer previously allocated with
3635 * ipr_alloc_ucode_buffer
3636 *
3637 * Return value:
3638 * nothing
3639 **/
3640static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3641{
3642 int i;
3643
3644 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3645 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3646
3647 kfree(sglist);
3648}
3649
3650/**
3651 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3652 * @sglist: scatter/gather list pointer
3653 * @buffer: buffer pointer
3654 * @len: buffer length
3655 *
3656 * Copy a microcode image from a user buffer into a buffer allocated by
3657 * ipr_alloc_ucode_buffer
3658 *
3659 * Return value:
3660 * 0 on success / other on failure
3661 **/
3662static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3663 u8 *buffer, u32 len)
3664{
3665 int bsize_elem, i, result = 0;
3666 struct scatterlist *scatterlist;
3667 void *kaddr;
3668
3669 /* Determine the actual number of bytes per element */
3670 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3671
3672 scatterlist = sglist->scatterlist;
3673
3674 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3675 struct page *page = sg_page(&scatterlist[i]);
3676
3677 kaddr = kmap(page);
1da177e4 3678 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3679 kunmap(page);
1da177e4
LT
3680
3681 scatterlist[i].length = bsize_elem;
3682
3683 if (result != 0) {
3684 ipr_trace;
3685 return result;
3686 }
3687 }
3688
3689 if (len % bsize_elem) {
45711f1a
JA
3690 struct page *page = sg_page(&scatterlist[i]);
3691
3692 kaddr = kmap(page);
1da177e4 3693 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3694 kunmap(page);
1da177e4
LT
3695
3696 scatterlist[i].length = len % bsize_elem;
3697 }
3698
3699 sglist->buffer_len = len;
3700 return result;
3701}
3702
a32c055f
WB
3703/**
3704 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3705 * @ipr_cmd: ipr command struct
3706 * @sglist: scatter/gather list
3707 *
3708 * Builds a microcode download IOA data list (IOADL).
3709 *
3710 **/
3711static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3712 struct ipr_sglist *sglist)
3713{
3714 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3715 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3716 struct scatterlist *scatterlist = sglist->scatterlist;
3717 int i;
3718
3719 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3720 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3721 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3722
3723 ioarcb->ioadl_len =
3724 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3725 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3726 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3727 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3728 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3729 }
3730
3731 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3732}
3733
1da177e4 3734/**
12baa420 3735 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3736 * @ipr_cmd: ipr command struct
3737 * @sglist: scatter/gather list
1da177e4 3738 *
12baa420 3739 * Builds a microcode download IOA data list (IOADL).
1da177e4 3740 *
1da177e4 3741 **/
12baa420
BK
3742static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3743 struct ipr_sglist *sglist)
1da177e4 3744{
1da177e4 3745 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3746 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3747 struct scatterlist *scatterlist = sglist->scatterlist;
3748 int i;
3749
12baa420 3750 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3751 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3752 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3753
3754 ioarcb->ioadl_len =
1da177e4
LT
3755 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3756
3757 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3758 ioadl[i].flags_and_data_len =
3759 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3760 ioadl[i].address =
3761 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3762 }
3763
12baa420
BK
3764 ioadl[i-1].flags_and_data_len |=
3765 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3766}
3767
3768/**
3769 * ipr_update_ioa_ucode - Update IOA's microcode
3770 * @ioa_cfg: ioa config struct
3771 * @sglist: scatter/gather list
3772 *
3773 * Initiate an adapter reset to update the IOA's microcode
3774 *
3775 * Return value:
3776 * 0 on success / -EIO on failure
3777 **/
3778static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3779 struct ipr_sglist *sglist)
3780{
3781 unsigned long lock_flags;
3782
3783 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3784 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3785 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3786 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3787 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3788 }
12baa420
BK
3789
3790 if (ioa_cfg->ucode_sglist) {
3791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3792 dev_err(&ioa_cfg->pdev->dev,
3793 "Microcode download already in progress\n");
3794 return -EIO;
1da177e4 3795 }
12baa420
BK
3796
3797 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3798 sglist->num_sg, DMA_TO_DEVICE);
3799
3800 if (!sglist->num_dma_sg) {
3801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3802 dev_err(&ioa_cfg->pdev->dev,
3803 "Failed to map microcode download buffer!\n");
1da177e4
LT
3804 return -EIO;
3805 }
3806
12baa420
BK
3807 ioa_cfg->ucode_sglist = sglist;
3808 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3810 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3811
3812 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3813 ioa_cfg->ucode_sglist = NULL;
3814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3815 return 0;
3816}
3817
3818/**
3819 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3820 * @class_dev: device struct
3821 * @buf: buffer
3822 * @count: buffer size
1da177e4
LT
3823 *
3824 * This function will update the firmware on the adapter.
3825 *
3826 * Return value:
3827 * count on success / other on failure
3828 **/
ee959b00
TJ
3829static ssize_t ipr_store_update_fw(struct device *dev,
3830 struct device_attribute *attr,
3831 const char *buf, size_t count)
1da177e4 3832{
ee959b00 3833 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3834 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3835 struct ipr_ucode_image_header *image_hdr;
3836 const struct firmware *fw_entry;
3837 struct ipr_sglist *sglist;
1da177e4
LT
3838 char fname[100];
3839 char *src;
3840 int len, result, dnld_size;
3841
3842 if (!capable(CAP_SYS_ADMIN))
3843 return -EACCES;
3844
3845 len = snprintf(fname, 99, "%s", buf);
3846 fname[len-1] = '\0';
3847
203fa3fe 3848 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3849 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3850 return -EIO;
3851 }
3852
3853 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3854
1da177e4
LT
3855 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3856 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3857 sglist = ipr_alloc_ucode_buffer(dnld_size);
3858
3859 if (!sglist) {
3860 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3861 release_firmware(fw_entry);
3862 return -ENOMEM;
3863 }
3864
3865 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3866
3867 if (result) {
3868 dev_err(&ioa_cfg->pdev->dev,
3869 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3870 goto out;
1da177e4
LT
3871 }
3872
14ed9cc7
WB
3873 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3874
12baa420 3875 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3876
12baa420
BK
3877 if (!result)
3878 result = count;
3879out:
1da177e4
LT
3880 ipr_free_ucode_buffer(sglist);
3881 release_firmware(fw_entry);
12baa420 3882 return result;
1da177e4
LT
3883}
3884
ee959b00 3885static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3886 .attr = {
3887 .name = "update_fw",
3888 .mode = S_IWUSR,
3889 },
3890 .store = ipr_store_update_fw
3891};
3892
75576bb9
WB
3893/**
3894 * ipr_show_fw_type - Show the adapter's firmware type.
3895 * @dev: class device struct
3896 * @buf: buffer
3897 *
3898 * Return value:
3899 * number of bytes printed to buffer
3900 **/
3901static ssize_t ipr_show_fw_type(struct device *dev,
3902 struct device_attribute *attr, char *buf)
3903{
3904 struct Scsi_Host *shost = class_to_shost(dev);
3905 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3906 unsigned long lock_flags = 0;
3907 int len;
3908
3909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3910 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3912 return len;
3913}
3914
3915static struct device_attribute ipr_ioa_fw_type_attr = {
3916 .attr = {
3917 .name = "fw_type",
3918 .mode = S_IRUGO,
3919 },
3920 .show = ipr_show_fw_type
3921};
3922
ee959b00 3923static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3924 &ipr_fw_version_attr,
3925 &ipr_log_level_attr,
3926 &ipr_diagnostics_attr,
f37eb54b 3927 &ipr_ioa_state_attr,
1da177e4
LT
3928 &ipr_ioa_reset_attr,
3929 &ipr_update_fw_attr,
75576bb9 3930 &ipr_ioa_fw_type_attr,
1da177e4
LT
3931 NULL,
3932};
3933
3934#ifdef CONFIG_SCSI_IPR_DUMP
3935/**
3936 * ipr_read_dump - Dump the adapter
2c3c8bea 3937 * @filp: open sysfs file
1da177e4 3938 * @kobj: kobject struct
91a69029 3939 * @bin_attr: bin_attribute struct
1da177e4
LT
3940 * @buf: buffer
3941 * @off: offset
3942 * @count: buffer size
3943 *
3944 * Return value:
3945 * number of bytes printed to buffer
3946 **/
2c3c8bea 3947static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3948 struct bin_attribute *bin_attr,
3949 char *buf, loff_t off, size_t count)
1da177e4 3950{
ee959b00 3951 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3952 struct Scsi_Host *shost = class_to_shost(cdev);
3953 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3954 struct ipr_dump *dump;
3955 unsigned long lock_flags = 0;
3956 char *src;
4d4dd706 3957 int len, sdt_end;
1da177e4
LT
3958 size_t rc = count;
3959
3960 if (!capable(CAP_SYS_ADMIN))
3961 return -EACCES;
3962
3963 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3964 dump = ioa_cfg->dump;
3965
3966 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3968 return 0;
3969 }
3970 kref_get(&dump->kref);
3971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3972
3973 if (off > dump->driver_dump.hdr.len) {
3974 kref_put(&dump->kref, ipr_release_dump);
3975 return 0;
3976 }
3977
3978 if (off + count > dump->driver_dump.hdr.len) {
3979 count = dump->driver_dump.hdr.len - off;
3980 rc = count;
3981 }
3982
3983 if (count && off < sizeof(dump->driver_dump)) {
3984 if (off + count > sizeof(dump->driver_dump))
3985 len = sizeof(dump->driver_dump) - off;
3986 else
3987 len = count;
3988 src = (u8 *)&dump->driver_dump + off;
3989 memcpy(buf, src, len);
3990 buf += len;
3991 off += len;
3992 count -= len;
3993 }
3994
3995 off -= sizeof(dump->driver_dump);
3996
4d4dd706
KSS
3997 if (ioa_cfg->sis64)
3998 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3999 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4000 sizeof(struct ipr_sdt_entry));
4001 else
4002 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4003 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4004
4005 if (count && off < sdt_end) {
4006 if (off + count > sdt_end)
4007 len = sdt_end - off;
1da177e4
LT
4008 else
4009 len = count;
4010 src = (u8 *)&dump->ioa_dump + off;
4011 memcpy(buf, src, len);
4012 buf += len;
4013 off += len;
4014 count -= len;
4015 }
4016
4d4dd706 4017 off -= sdt_end;
1da177e4
LT
4018
4019 while (count) {
4020 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4021 len = PAGE_ALIGN(off) - off;
4022 else
4023 len = count;
4024 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4025 src += off & ~PAGE_MASK;
4026 memcpy(buf, src, len);
4027 buf += len;
4028 off += len;
4029 count -= len;
4030 }
4031
4032 kref_put(&dump->kref, ipr_release_dump);
4033 return rc;
4034}
4035
4036/**
4037 * ipr_alloc_dump - Prepare for adapter dump
4038 * @ioa_cfg: ioa config struct
4039 *
4040 * Return value:
4041 * 0 on success / other on failure
4042 **/
4043static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4044{
4045 struct ipr_dump *dump;
4d4dd706 4046 __be32 **ioa_data;
1da177e4
LT
4047 unsigned long lock_flags = 0;
4048
0bc42e35 4049 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4050
4051 if (!dump) {
4052 ipr_err("Dump memory allocation failed\n");
4053 return -ENOMEM;
4054 }
4055
4d4dd706
KSS
4056 if (ioa_cfg->sis64)
4057 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4058 else
4059 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4060
4061 if (!ioa_data) {
4062 ipr_err("Dump memory allocation failed\n");
4063 kfree(dump);
4064 return -ENOMEM;
4065 }
4066
4067 dump->ioa_dump.ioa_data = ioa_data;
4068
1da177e4
LT
4069 kref_init(&dump->kref);
4070 dump->ioa_cfg = ioa_cfg;
4071
4072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4073
4074 if (INACTIVE != ioa_cfg->sdt_state) {
4075 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4076 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4077 kfree(dump);
4078 return 0;
4079 }
4080
4081 ioa_cfg->dump = dump;
4082 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4083 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4084 ioa_cfg->dump_taken = 1;
4085 schedule_work(&ioa_cfg->work_q);
4086 }
4087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4088
1da177e4
LT
4089 return 0;
4090}
4091
4092/**
4093 * ipr_free_dump - Free adapter dump memory
4094 * @ioa_cfg: ioa config struct
4095 *
4096 * Return value:
4097 * 0 on success / other on failure
4098 **/
4099static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4100{
4101 struct ipr_dump *dump;
4102 unsigned long lock_flags = 0;
4103
4104 ENTER;
4105
4106 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4107 dump = ioa_cfg->dump;
4108 if (!dump) {
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110 return 0;
4111 }
4112
4113 ioa_cfg->dump = NULL;
4114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4115
4116 kref_put(&dump->kref, ipr_release_dump);
4117
4118 LEAVE;
4119 return 0;
4120}
4121
4122/**
4123 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4124 * @filp: open sysfs file
1da177e4 4125 * @kobj: kobject struct
91a69029 4126 * @bin_attr: bin_attribute struct
1da177e4
LT
4127 * @buf: buffer
4128 * @off: offset
4129 * @count: buffer size
4130 *
4131 * Return value:
4132 * number of bytes printed to buffer
4133 **/
2c3c8bea 4134static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4135 struct bin_attribute *bin_attr,
4136 char *buf, loff_t off, size_t count)
1da177e4 4137{
ee959b00 4138 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4139 struct Scsi_Host *shost = class_to_shost(cdev);
4140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4141 int rc;
4142
4143 if (!capable(CAP_SYS_ADMIN))
4144 return -EACCES;
4145
4146 if (buf[0] == '1')
4147 rc = ipr_alloc_dump(ioa_cfg);
4148 else if (buf[0] == '0')
4149 rc = ipr_free_dump(ioa_cfg);
4150 else
4151 return -EINVAL;
4152
4153 if (rc)
4154 return rc;
4155 else
4156 return count;
4157}
4158
4159static struct bin_attribute ipr_dump_attr = {
4160 .attr = {
4161 .name = "dump",
4162 .mode = S_IRUSR | S_IWUSR,
4163 },
4164 .size = 0,
4165 .read = ipr_read_dump,
4166 .write = ipr_write_dump
4167};
4168#else
4169static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4170#endif
4171
4172/**
4173 * ipr_change_queue_depth - Change the device's queue depth
4174 * @sdev: scsi device struct
4175 * @qdepth: depth to set
e881a172 4176 * @reason: calling context
1da177e4
LT
4177 *
4178 * Return value:
4179 * actual depth set
4180 **/
e881a172
MC
4181static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4182 int reason)
1da177e4 4183{
35a39691
BK
4184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4185 struct ipr_resource_entry *res;
4186 unsigned long lock_flags = 0;
4187
e881a172
MC
4188 if (reason != SCSI_QDEPTH_DEFAULT)
4189 return -EOPNOTSUPP;
4190
35a39691
BK
4191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192 res = (struct ipr_resource_entry *)sdev->hostdata;
4193
4194 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4195 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197
1da177e4
LT
4198 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4199 return sdev->queue_depth;
4200}
4201
4202/**
4203 * ipr_change_queue_type - Change the device's queue type
4204 * @dsev: scsi device struct
4205 * @tag_type: type of tags to use
4206 *
4207 * Return value:
4208 * actual queue type set
4209 **/
4210static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4211{
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4213 struct ipr_resource_entry *res;
4214 unsigned long lock_flags = 0;
4215
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4217 res = (struct ipr_resource_entry *)sdev->hostdata;
4218
4219 if (res) {
4220 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4221 /*
4222 * We don't bother quiescing the device here since the
4223 * adapter firmware does it for us.
4224 */
4225 scsi_set_tag_type(sdev, tag_type);
4226
4227 if (tag_type)
4228 scsi_activate_tcq(sdev, sdev->queue_depth);
4229 else
4230 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4231 } else
4232 tag_type = 0;
4233 } else
4234 tag_type = 0;
4235
4236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237 return tag_type;
4238}
4239
4240/**
4241 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4242 * @dev: device struct
46d74563 4243 * @attr: device attribute structure
1da177e4
LT
4244 * @buf: buffer
4245 *
4246 * Return value:
4247 * number of bytes printed to buffer
4248 **/
10523b3b 4249static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4250{
4251 struct scsi_device *sdev = to_scsi_device(dev);
4252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4253 struct ipr_resource_entry *res;
4254 unsigned long lock_flags = 0;
4255 ssize_t len = -ENXIO;
4256
4257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 res = (struct ipr_resource_entry *)sdev->hostdata;
4259 if (res)
3e7ebdfa 4260 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4262 return len;
4263}
4264
4265static struct device_attribute ipr_adapter_handle_attr = {
4266 .attr = {
4267 .name = "adapter_handle",
4268 .mode = S_IRUSR,
4269 },
4270 .show = ipr_show_adapter_handle
4271};
4272
3e7ebdfa 4273/**
5adcbeb3
WB
4274 * ipr_show_resource_path - Show the resource path or the resource address for
4275 * this device.
3e7ebdfa 4276 * @dev: device struct
46d74563 4277 * @attr: device attribute structure
3e7ebdfa
WB
4278 * @buf: buffer
4279 *
4280 * Return value:
4281 * number of bytes printed to buffer
4282 **/
4283static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4284{
4285 struct scsi_device *sdev = to_scsi_device(dev);
4286 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4287 struct ipr_resource_entry *res;
4288 unsigned long lock_flags = 0;
4289 ssize_t len = -ENXIO;
4290 char buffer[IPR_MAX_RES_PATH_LENGTH];
4291
4292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4293 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4294 if (res && ioa_cfg->sis64)
3e7ebdfa 4295 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4296 __ipr_format_res_path(res->res_path, buffer,
4297 sizeof(buffer)));
5adcbeb3
WB
4298 else if (res)
4299 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4300 res->bus, res->target, res->lun);
4301
3e7ebdfa
WB
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4303 return len;
4304}
4305
4306static struct device_attribute ipr_resource_path_attr = {
4307 .attr = {
4308 .name = "resource_path",
75576bb9 4309 .mode = S_IRUGO,
3e7ebdfa
WB
4310 },
4311 .show = ipr_show_resource_path
4312};
4313
46d74563
WB
4314/**
4315 * ipr_show_device_id - Show the device_id for this device.
4316 * @dev: device struct
4317 * @attr: device attribute structure
4318 * @buf: buffer
4319 *
4320 * Return value:
4321 * number of bytes printed to buffer
4322 **/
4323static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4324{
4325 struct scsi_device *sdev = to_scsi_device(dev);
4326 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4327 struct ipr_resource_entry *res;
4328 unsigned long lock_flags = 0;
4329 ssize_t len = -ENXIO;
4330
4331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4332 res = (struct ipr_resource_entry *)sdev->hostdata;
4333 if (res && ioa_cfg->sis64)
4334 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4335 else if (res)
4336 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4337
4338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339 return len;
4340}
4341
4342static struct device_attribute ipr_device_id_attr = {
4343 .attr = {
4344 .name = "device_id",
4345 .mode = S_IRUGO,
4346 },
4347 .show = ipr_show_device_id
4348};
4349
75576bb9
WB
4350/**
4351 * ipr_show_resource_type - Show the resource type for this device.
4352 * @dev: device struct
46d74563 4353 * @attr: device attribute structure
75576bb9
WB
4354 * @buf: buffer
4355 *
4356 * Return value:
4357 * number of bytes printed to buffer
4358 **/
4359static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4360{
4361 struct scsi_device *sdev = to_scsi_device(dev);
4362 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4363 struct ipr_resource_entry *res;
4364 unsigned long lock_flags = 0;
4365 ssize_t len = -ENXIO;
4366
4367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4368 res = (struct ipr_resource_entry *)sdev->hostdata;
4369
4370 if (res)
4371 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4372
4373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4374 return len;
4375}
4376
4377static struct device_attribute ipr_resource_type_attr = {
4378 .attr = {
4379 .name = "resource_type",
4380 .mode = S_IRUGO,
4381 },
4382 .show = ipr_show_resource_type
4383};
4384
1da177e4
LT
4385static struct device_attribute *ipr_dev_attrs[] = {
4386 &ipr_adapter_handle_attr,
3e7ebdfa 4387 &ipr_resource_path_attr,
46d74563 4388 &ipr_device_id_attr,
75576bb9 4389 &ipr_resource_type_attr,
1da177e4
LT
4390 NULL,
4391};
4392
4393/**
4394 * ipr_biosparam - Return the HSC mapping
4395 * @sdev: scsi device struct
4396 * @block_device: block device pointer
4397 * @capacity: capacity of the device
4398 * @parm: Array containing returned HSC values.
4399 *
4400 * This function generates the HSC parms that fdisk uses.
4401 * We want to make sure we return something that places partitions
4402 * on 4k boundaries for best performance with the IOA.
4403 *
4404 * Return value:
4405 * 0 on success
4406 **/
4407static int ipr_biosparam(struct scsi_device *sdev,
4408 struct block_device *block_device,
4409 sector_t capacity, int *parm)
4410{
4411 int heads, sectors;
4412 sector_t cylinders;
4413
4414 heads = 128;
4415 sectors = 32;
4416
4417 cylinders = capacity;
4418 sector_div(cylinders, (128 * 32));
4419
4420 /* return result */
4421 parm[0] = heads;
4422 parm[1] = sectors;
4423 parm[2] = cylinders;
4424
4425 return 0;
4426}
4427
35a39691
BK
4428/**
4429 * ipr_find_starget - Find target based on bus/target.
4430 * @starget: scsi target struct
4431 *
4432 * Return value:
4433 * resource entry pointer if found / NULL if not found
4434 **/
4435static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4436{
4437 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4438 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4439 struct ipr_resource_entry *res;
4440
4441 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4442 if ((res->bus == starget->channel) &&
0ee1d714 4443 (res->target == starget->id)) {
35a39691
BK
4444 return res;
4445 }
4446 }
4447
4448 return NULL;
4449}
4450
4451static struct ata_port_info sata_port_info;
4452
4453/**
4454 * ipr_target_alloc - Prepare for commands to a SCSI target
4455 * @starget: scsi target struct
4456 *
4457 * If the device is a SATA device, this function allocates an
4458 * ATA port with libata, else it does nothing.
4459 *
4460 * Return value:
4461 * 0 on success / non-0 on failure
4462 **/
4463static int ipr_target_alloc(struct scsi_target *starget)
4464{
4465 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4466 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4467 struct ipr_sata_port *sata_port;
4468 struct ata_port *ap;
4469 struct ipr_resource_entry *res;
4470 unsigned long lock_flags;
4471
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = ipr_find_starget(starget);
4474 starget->hostdata = NULL;
4475
4476 if (res && ipr_is_gata(res)) {
4477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4478 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4479 if (!sata_port)
4480 return -ENOMEM;
4481
4482 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4483 if (ap) {
4484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4485 sata_port->ioa_cfg = ioa_cfg;
4486 sata_port->ap = ap;
4487 sata_port->res = res;
4488
4489 res->sata_port = sata_port;
4490 ap->private_data = sata_port;
4491 starget->hostdata = sata_port;
4492 } else {
4493 kfree(sata_port);
4494 return -ENOMEM;
4495 }
4496 }
4497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4498
4499 return 0;
4500}
4501
4502/**
4503 * ipr_target_destroy - Destroy a SCSI target
4504 * @starget: scsi target struct
4505 *
4506 * If the device was a SATA device, this function frees the libata
4507 * ATA port, else it does nothing.
4508 *
4509 **/
4510static void ipr_target_destroy(struct scsi_target *starget)
4511{
4512 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4513 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4514 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4515
4516 if (ioa_cfg->sis64) {
0ee1d714
BK
4517 if (!ipr_find_starget(starget)) {
4518 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4519 clear_bit(starget->id, ioa_cfg->array_ids);
4520 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4521 clear_bit(starget->id, ioa_cfg->vset_ids);
4522 else if (starget->channel == 0)
4523 clear_bit(starget->id, ioa_cfg->target_ids);
4524 }
3e7ebdfa 4525 }
35a39691
BK
4526
4527 if (sata_port) {
4528 starget->hostdata = NULL;
4529 ata_sas_port_destroy(sata_port->ap);
4530 kfree(sata_port);
4531 }
4532}
4533
4534/**
4535 * ipr_find_sdev - Find device based on bus/target/lun.
4536 * @sdev: scsi device struct
4537 *
4538 * Return value:
4539 * resource entry pointer if found / NULL if not found
4540 **/
4541static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4542{
4543 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4544 struct ipr_resource_entry *res;
4545
4546 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4547 if ((res->bus == sdev->channel) &&
4548 (res->target == sdev->id) &&
4549 (res->lun == sdev->lun))
35a39691
BK
4550 return res;
4551 }
4552
4553 return NULL;
4554}
4555
1da177e4
LT
4556/**
4557 * ipr_slave_destroy - Unconfigure a SCSI device
4558 * @sdev: scsi device struct
4559 *
4560 * Return value:
4561 * nothing
4562 **/
4563static void ipr_slave_destroy(struct scsi_device *sdev)
4564{
4565 struct ipr_resource_entry *res;
4566 struct ipr_ioa_cfg *ioa_cfg;
4567 unsigned long lock_flags = 0;
4568
4569 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4570
4571 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4572 res = (struct ipr_resource_entry *) sdev->hostdata;
4573 if (res) {
35a39691 4574 if (res->sata_port)
3e4ec344 4575 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4576 sdev->hostdata = NULL;
4577 res->sdev = NULL;
35a39691 4578 res->sata_port = NULL;
1da177e4
LT
4579 }
4580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4581}
4582
4583/**
4584 * ipr_slave_configure - Configure a SCSI device
4585 * @sdev: scsi device struct
4586 *
4587 * This function configures the specified scsi device.
4588 *
4589 * Return value:
4590 * 0 on success
4591 **/
4592static int ipr_slave_configure(struct scsi_device *sdev)
4593{
4594 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4595 struct ipr_resource_entry *res;
dd406ef8 4596 struct ata_port *ap = NULL;
1da177e4 4597 unsigned long lock_flags = 0;
3e7ebdfa 4598 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4599
4600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4601 res = sdev->hostdata;
4602 if (res) {
4603 if (ipr_is_af_dasd_device(res))
4604 sdev->type = TYPE_RAID;
0726ce26 4605 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4606 sdev->scsi_level = 4;
0726ce26
BK
4607 sdev->no_uld_attach = 1;
4608 }
1da177e4 4609 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4610 blk_queue_rq_timeout(sdev->request_queue,
4611 IPR_VSET_RW_TIMEOUT);
086fa5ff 4612 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4613 }
dd406ef8
BK
4614 if (ipr_is_gata(res) && res->sata_port)
4615 ap = res->sata_port->ap;
4616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617
4618 if (ap) {
35a39691 4619 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4620 ata_sas_slave_configure(sdev, ap);
4621 } else
35a39691 4622 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4623 if (ioa_cfg->sis64)
4624 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4625 ipr_format_res_path(ioa_cfg,
4626 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4627 return 0;
1da177e4
LT
4628 }
4629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4630 return 0;
4631}
4632
35a39691
BK
4633/**
4634 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4635 * @sdev: scsi device struct
4636 *
4637 * This function initializes an ATA port so that future commands
4638 * sent through queuecommand will work.
4639 *
4640 * Return value:
4641 * 0 on success
4642 **/
4643static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4644{
4645 struct ipr_sata_port *sata_port = NULL;
4646 int rc = -ENXIO;
4647
4648 ENTER;
4649 if (sdev->sdev_target)
4650 sata_port = sdev->sdev_target->hostdata;
b2024459 4651 if (sata_port) {
35a39691 4652 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4653 if (rc == 0)
4654 rc = ata_sas_sync_probe(sata_port->ap);
4655 }
4656
35a39691
BK
4657 if (rc)
4658 ipr_slave_destroy(sdev);
4659
4660 LEAVE;
4661 return rc;
4662}
4663
1da177e4
LT
4664/**
4665 * ipr_slave_alloc - Prepare for commands to a device.
4666 * @sdev: scsi device struct
4667 *
4668 * This function saves a pointer to the resource entry
4669 * in the scsi device struct if the device exists. We
4670 * can then use this pointer in ipr_queuecommand when
4671 * handling new commands.
4672 *
4673 * Return value:
692aebfc 4674 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4675 **/
4676static int ipr_slave_alloc(struct scsi_device *sdev)
4677{
4678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4679 struct ipr_resource_entry *res;
4680 unsigned long lock_flags;
692aebfc 4681 int rc = -ENXIO;
1da177e4
LT
4682
4683 sdev->hostdata = NULL;
4684
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686
35a39691
BK
4687 res = ipr_find_sdev(sdev);
4688 if (res) {
4689 res->sdev = sdev;
4690 res->add_to_ml = 0;
4691 res->in_erp = 0;
4692 sdev->hostdata = res;
4693 if (!ipr_is_naca_model(res))
4694 res->needs_sync_complete = 1;
4695 rc = 0;
4696 if (ipr_is_gata(res)) {
4697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4698 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4699 }
4700 }
4701
4702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4703
692aebfc 4704 return rc;
1da177e4
LT
4705}
4706
4707/**
4708 * ipr_eh_host_reset - Reset the host adapter
4709 * @scsi_cmd: scsi command struct
4710 *
4711 * Return value:
4712 * SUCCESS / FAILED
4713 **/
203fa3fe 4714static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4715{
4716 struct ipr_ioa_cfg *ioa_cfg;
4717 int rc;
4718
4719 ENTER;
4720 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4721
a92fa25c
KSS
4722 if (!ioa_cfg->in_reset_reload) {
4723 dev_err(&ioa_cfg->pdev->dev,
4724 "Adapter being reset as a result of error recovery.\n");
1da177e4 4725
a92fa25c
KSS
4726 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4727 ioa_cfg->sdt_state = GET_DUMP;
4728 }
1da177e4
LT
4729
4730 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4731
4732 LEAVE;
4733 return rc;
4734}
4735
203fa3fe 4736static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
df0ae249
JG
4737{
4738 int rc;
4739
4740 spin_lock_irq(cmd->device->host->host_lock);
4741 rc = __ipr_eh_host_reset(cmd);
4742 spin_unlock_irq(cmd->device->host->host_lock);
4743
4744 return rc;
4745}
4746
c6513096
BK
4747/**
4748 * ipr_device_reset - Reset the device
4749 * @ioa_cfg: ioa config struct
4750 * @res: resource entry struct
4751 *
4752 * This function issues a device reset to the affected device.
4753 * If the device is a SCSI device, a LUN reset will be sent
4754 * to the device first. If that does not work, a target reset
35a39691
BK
4755 * will be sent. If the device is a SATA device, a PHY reset will
4756 * be sent.
c6513096
BK
4757 *
4758 * Return value:
4759 * 0 on success / non-zero on failure
4760 **/
4761static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4762 struct ipr_resource_entry *res)
4763{
4764 struct ipr_cmnd *ipr_cmd;
4765 struct ipr_ioarcb *ioarcb;
4766 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4767 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4768 u32 ioasc;
4769
4770 ENTER;
4771 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4772 ioarcb = &ipr_cmd->ioarcb;
4773 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4774
4775 if (ipr_cmd->ioa_cfg->sis64) {
4776 regs = &ipr_cmd->i.ata_ioadl.regs;
4777 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4778 } else
4779 regs = &ioarcb->u.add_data.u.regs;
c6513096 4780
3e7ebdfa 4781 ioarcb->res_handle = res->res_handle;
c6513096
BK
4782 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4783 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4784 if (ipr_is_gata(res)) {
4785 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4786 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4787 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4788 }
c6513096
BK
4789
4790 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4791 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4792 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4793 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4794 if (ipr_cmd->ioa_cfg->sis64)
4795 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4796 sizeof(struct ipr_ioasa_gata));
4797 else
4798 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4799 sizeof(struct ipr_ioasa_gata));
4800 }
c6513096
BK
4801
4802 LEAVE;
203fa3fe 4803 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4804}
4805
35a39691
BK
4806/**
4807 * ipr_sata_reset - Reset the SATA port
cc0680a5 4808 * @link: SATA link to reset
35a39691
BK
4809 * @classes: class of the attached device
4810 *
cc0680a5 4811 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4812 *
4813 * Return value:
4814 * 0 on success / non-zero on failure
4815 **/
cc0680a5 4816static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4817 unsigned long deadline)
35a39691 4818{
cc0680a5 4819 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4820 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4821 struct ipr_resource_entry *res;
4822 unsigned long lock_flags = 0;
4823 int rc = -ENXIO;
4824
4825 ENTER;
4826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4827 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
4828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4829 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4830 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4831 }
4832
35a39691
BK
4833 res = sata_port->res;
4834 if (res) {
4835 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4836 *classes = res->ata_class;
35a39691
BK
4837 }
4838
4839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4840 LEAVE;
4841 return rc;
4842}
4843
1da177e4
LT
4844/**
4845 * ipr_eh_dev_reset - Reset the device
4846 * @scsi_cmd: scsi command struct
4847 *
4848 * This function issues a device reset to the affected device.
4849 * A LUN reset will be sent to the device first. If that does
4850 * not work, a target reset will be sent.
4851 *
4852 * Return value:
4853 * SUCCESS / FAILED
4854 **/
203fa3fe 4855static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4856{
4857 struct ipr_cmnd *ipr_cmd;
4858 struct ipr_ioa_cfg *ioa_cfg;
4859 struct ipr_resource_entry *res;
35a39691
BK
4860 struct ata_port *ap;
4861 int rc = 0;
05a6538a 4862 struct ipr_hrr_queue *hrrq;
1da177e4
LT
4863
4864 ENTER;
4865 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4866 res = scsi_cmd->device->hostdata;
4867
eeb88307 4868 if (!res)
1da177e4
LT
4869 return FAILED;
4870
4871 /*
4872 * If we are currently going through reset/reload, return failed. This will force the
4873 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4874 * reset to complete
4875 */
4876 if (ioa_cfg->in_reset_reload)
4877 return FAILED;
56d6aa33 4878 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
4879 return FAILED;
4880
05a6538a 4881 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 4882 spin_lock(&hrrq->_lock);
05a6538a 4883 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4884 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4885 if (ipr_cmd->scsi_cmd)
4886 ipr_cmd->done = ipr_scsi_eh_done;
4887 if (ipr_cmd->qc)
4888 ipr_cmd->done = ipr_sata_eh_done;
4889 if (ipr_cmd->qc &&
4890 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4891 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4892 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4893 }
7402ecef 4894 }
1da177e4 4895 }
56d6aa33 4896 spin_unlock(&hrrq->_lock);
1da177e4 4897 }
1da177e4 4898 res->resetting_device = 1;
fb3ed3cb 4899 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4900
4901 if (ipr_is_gata(res) && res->sata_port) {
4902 ap = res->sata_port->ap;
4903 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4904 ata_std_error_handler(ap);
35a39691 4905 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 4906
05a6538a 4907 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 4908 spin_lock(&hrrq->_lock);
05a6538a 4909 list_for_each_entry(ipr_cmd,
4910 &hrrq->hrrq_pending_q, queue) {
4911 if (ipr_cmd->ioarcb.res_handle ==
4912 res->res_handle) {
4913 rc = -EIO;
4914 break;
4915 }
5af23d26 4916 }
56d6aa33 4917 spin_unlock(&hrrq->_lock);
5af23d26 4918 }
35a39691
BK
4919 } else
4920 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4921 res->resetting_device = 0;
4922
1da177e4 4923 LEAVE;
203fa3fe 4924 return rc ? FAILED : SUCCESS;
1da177e4
LT
4925}
4926
203fa3fe 4927static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
4928{
4929 int rc;
4930
4931 spin_lock_irq(cmd->device->host->host_lock);
4932 rc = __ipr_eh_dev_reset(cmd);
4933 spin_unlock_irq(cmd->device->host->host_lock);
4934
4935 return rc;
4936}
4937
1da177e4
LT
4938/**
4939 * ipr_bus_reset_done - Op done function for bus reset.
4940 * @ipr_cmd: ipr command struct
4941 *
4942 * This function is the op done function for a bus reset
4943 *
4944 * Return value:
4945 * none
4946 **/
4947static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4948{
4949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4950 struct ipr_resource_entry *res;
4951
4952 ENTER;
3e7ebdfa
WB
4953 if (!ioa_cfg->sis64)
4954 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4955 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4956 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4957 break;
4958 }
1da177e4 4959 }
1da177e4
LT
4960
4961 /*
4962 * If abort has not completed, indicate the reset has, else call the
4963 * abort's done function to wake the sleeping eh thread
4964 */
4965 if (ipr_cmd->sibling->sibling)
4966 ipr_cmd->sibling->sibling = NULL;
4967 else
4968 ipr_cmd->sibling->done(ipr_cmd->sibling);
4969
05a6538a 4970 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
4971 LEAVE;
4972}
4973
4974/**
4975 * ipr_abort_timeout - An abort task has timed out
4976 * @ipr_cmd: ipr command struct
4977 *
4978 * This function handles when an abort task times out. If this
4979 * happens we issue a bus reset since we have resources tied
4980 * up that must be freed before returning to the midlayer.
4981 *
4982 * Return value:
4983 * none
4984 **/
4985static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4986{
4987 struct ipr_cmnd *reset_cmd;
4988 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4989 struct ipr_cmd_pkt *cmd_pkt;
4990 unsigned long lock_flags = 0;
4991
4992 ENTER;
4993 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4994 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996 return;
4997 }
4998
fb3ed3cb 4999 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5000 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5001 ipr_cmd->sibling = reset_cmd;
5002 reset_cmd->sibling = ipr_cmd;
5003 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5004 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5005 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5006 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5007 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5008
5009 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5011 LEAVE;
5012}
5013
5014/**
5015 * ipr_cancel_op - Cancel specified op
5016 * @scsi_cmd: scsi command struct
5017 *
5018 * This function cancels specified op.
5019 *
5020 * Return value:
5021 * SUCCESS / FAILED
5022 **/
203fa3fe 5023static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5024{
5025 struct ipr_cmnd *ipr_cmd;
5026 struct ipr_ioa_cfg *ioa_cfg;
5027 struct ipr_resource_entry *res;
5028 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5029 u32 ioasc, int_reg;
1da177e4 5030 int op_found = 0;
05a6538a 5031 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5032
5033 ENTER;
5034 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5035 res = scsi_cmd->device->hostdata;
5036
8fa728a2
JG
5037 /* If we are currently going through reset/reload, return failed.
5038 * This will force the mid-layer to call ipr_eh_host_reset,
5039 * which will then go to sleep and wait for the reset to complete
5040 */
56d6aa33 5041 if (ioa_cfg->in_reset_reload ||
5042 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5043 return FAILED;
a92fa25c
KSS
5044 if (!res)
5045 return FAILED;
5046
5047 /*
5048 * If we are aborting a timed out op, chances are that the timeout was caused
5049 * by a still not detected EEH error. In such cases, reading a register will
5050 * trigger the EEH recovery infrastructure.
5051 */
5052 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5053
5054 if (!ipr_is_gscsi(res))
1da177e4
LT
5055 return FAILED;
5056
05a6538a 5057 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5058 spin_lock(&hrrq->_lock);
05a6538a 5059 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5060 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5061 ipr_cmd->done = ipr_scsi_eh_done;
5062 op_found = 1;
5063 break;
5064 }
1da177e4 5065 }
56d6aa33 5066 spin_unlock(&hrrq->_lock);
1da177e4
LT
5067 }
5068
5069 if (!op_found)
5070 return SUCCESS;
5071
5072 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5073 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5074 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5075 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5076 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5077 ipr_cmd->u.sdev = scsi_cmd->device;
5078
fb3ed3cb
BK
5079 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5080 scsi_cmd->cmnd[0]);
1da177e4 5081 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5082 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5083
5084 /*
5085 * If the abort task timed out and we sent a bus reset, we will get
5086 * one the following responses to the abort
5087 */
5088 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5089 ioasc = 0;
5090 ipr_trace;
5091 }
5092
05a6538a 5093 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
ee0a90fa
BK
5094 if (!ipr_is_naca_model(res))
5095 res->needs_sync_complete = 1;
1da177e4
LT
5096
5097 LEAVE;
203fa3fe 5098 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5099}
5100
5101/**
5102 * ipr_eh_abort - Abort a single op
5103 * @scsi_cmd: scsi command struct
5104 *
5105 * Return value:
5106 * SUCCESS / FAILED
5107 **/
203fa3fe 5108static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5109{
8fa728a2
JG
5110 unsigned long flags;
5111 int rc;
1da177e4
LT
5112
5113 ENTER;
1da177e4 5114
8fa728a2
JG
5115 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5116 rc = ipr_cancel_op(scsi_cmd);
5117 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
5118
5119 LEAVE;
8fa728a2 5120 return rc;
1da177e4
LT
5121}
5122
5123/**
5124 * ipr_handle_other_interrupt - Handle "other" interrupts
5125 * @ioa_cfg: ioa config struct
634651fa 5126 * @int_reg: interrupt register
1da177e4
LT
5127 *
5128 * Return value:
5129 * IRQ_NONE / IRQ_HANDLED
5130 **/
634651fa 5131static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5132 u32 int_reg)
1da177e4
LT
5133{
5134 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5135 u32 int_mask_reg;
56d6aa33 5136
7dacb64f
WB
5137 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5138 int_reg &= ~int_mask_reg;
5139
5140 /* If an interrupt on the adapter did not occur, ignore it.
5141 * Or in the case of SIS 64, check for a stage change interrupt.
5142 */
5143 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5144 if (ioa_cfg->sis64) {
5145 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5146 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5147 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5148
5149 /* clear stage change */
5150 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5151 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5152 list_del(&ioa_cfg->reset_cmd->queue);
5153 del_timer(&ioa_cfg->reset_cmd->timer);
5154 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5155 return IRQ_HANDLED;
5156 }
5157 }
5158
5159 return IRQ_NONE;
5160 }
1da177e4
LT
5161
5162 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5163 /* Mask the interrupt */
5164 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5165
5166 /* Clear the interrupt */
5167 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5168 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5169
5170 list_del(&ioa_cfg->reset_cmd->queue);
5171 del_timer(&ioa_cfg->reset_cmd->timer);
5172 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5173 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5174 if (ioa_cfg->clear_isr) {
5175 if (ipr_debug && printk_ratelimit())
5176 dev_err(&ioa_cfg->pdev->dev,
5177 "Spurious interrupt detected. 0x%08X\n", int_reg);
5178 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5179 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5180 return IRQ_NONE;
5181 }
1da177e4
LT
5182 } else {
5183 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5184 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5185 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5186 dev_err(&ioa_cfg->pdev->dev,
5187 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5188 else
5189 dev_err(&ioa_cfg->pdev->dev,
5190 "Permanent IOA failure. 0x%08X\n", int_reg);
5191
5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5193 ioa_cfg->sdt_state = GET_DUMP;
5194
5195 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5196 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5197 }
56d6aa33 5198
1da177e4
LT
5199 return rc;
5200}
5201
3feeb89d
WB
5202/**
5203 * ipr_isr_eh - Interrupt service routine error handler
5204 * @ioa_cfg: ioa config struct
5205 * @msg: message to log
5206 *
5207 * Return value:
5208 * none
5209 **/
05a6538a 5210static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5211{
5212 ioa_cfg->errors_logged++;
05a6538a 5213 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5214
5215 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5216 ioa_cfg->sdt_state = GET_DUMP;
5217
5218 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5219}
5220
05a6538a 5221static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5222 struct list_head *doneq)
5223{
5224 u32 ioasc;
5225 u16 cmd_index;
5226 struct ipr_cmnd *ipr_cmd;
5227 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5228 int num_hrrq = 0;
5229
5230 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5231 if (!hrr_queue->allow_interrupts)
05a6538a 5232 return 0;
5233
5234 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5235 hrr_queue->toggle_bit) {
5236
5237 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5238 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5239 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5240
5241 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5242 cmd_index < hrr_queue->min_cmd_id)) {
5243 ipr_isr_eh(ioa_cfg,
5244 "Invalid response handle from IOA: ",
5245 cmd_index);
5246 break;
5247 }
5248
5249 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5250 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5251
5252 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5253
5254 list_move_tail(&ipr_cmd->queue, doneq);
5255
5256 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5257 hrr_queue->hrrq_curr++;
5258 } else {
5259 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5260 hrr_queue->toggle_bit ^= 1u;
5261 }
5262 num_hrrq++;
5263 }
5264 return num_hrrq;
5265}
1da177e4
LT
5266/**
5267 * ipr_isr - Interrupt service routine
5268 * @irq: irq number
5269 * @devp: pointer to ioa config struct
1da177e4
LT
5270 *
5271 * Return value:
5272 * IRQ_NONE / IRQ_HANDLED
5273 **/
7d12e780 5274static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5275{
05a6538a 5276 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5277 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5278 unsigned long hrrq_flags = 0;
7dacb64f 5279 u32 int_reg = 0;
1da177e4
LT
5280 u32 ioasc;
5281 u16 cmd_index;
3feeb89d 5282 int num_hrrq = 0;
7dacb64f 5283 int irq_none = 0;
172cd6e1 5284 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5285 irqreturn_t rc = IRQ_NONE;
172cd6e1 5286 LIST_HEAD(doneq);
1da177e4 5287
56d6aa33 5288 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5289 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5290 if (!hrrq->allow_interrupts) {
5291 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5292 return IRQ_NONE;
5293 }
5294
1da177e4
LT
5295 while (1) {
5296 ipr_cmd = NULL;
5297
05a6538a 5298 while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5299 hrrq->toggle_bit) {
1da177e4 5300
05a6538a 5301 cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
1da177e4
LT
5302 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5303
05a6538a 5304 if (unlikely(cmd_index > hrrq->max_cmd_id ||
5305 cmd_index < hrrq->min_cmd_id)) {
5306 ipr_isr_eh(ioa_cfg,
5307 "Invalid response handle from IOA: ",
5308 cmd_index);
172cd6e1
BK
5309 rc = IRQ_HANDLED;
5310 goto unlock_out;
1da177e4
LT
5311 }
5312
5313 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
96d21f00 5314 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5315
5316 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5317
172cd6e1 5318 list_move_tail(&ipr_cmd->queue, &doneq);
1da177e4
LT
5319
5320 rc = IRQ_HANDLED;
5321
05a6538a 5322 if (hrrq->hrrq_curr < hrrq->hrrq_end) {
5323 hrrq->hrrq_curr++;
1da177e4 5324 } else {
05a6538a 5325 hrrq->hrrq_curr = hrrq->hrrq_start;
5326 hrrq->toggle_bit ^= 1u;
1da177e4
LT
5327 }
5328 }
5329
7dd21308
BK
5330 if (ipr_cmd && !ioa_cfg->clear_isr)
5331 break;
5332
1da177e4
LT
5333 if (ipr_cmd != NULL) {
5334 /* Clear the PCI interrupt */
a5442ba4 5335 num_hrrq = 0;
3feeb89d 5336 do {
214777ba 5337 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5338 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
5339 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5340 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5341
7dacb64f
WB
5342 } else if (rc == IRQ_NONE && irq_none == 0) {
5343 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5344 irq_none++;
a5442ba4
WB
5345 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5346 int_reg & IPR_PCII_HRRQ_UPDATED) {
05a6538a 5347 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq);
172cd6e1
BK
5348 rc = IRQ_HANDLED;
5349 goto unlock_out;
1da177e4
LT
5350 } else
5351 break;
5352 }
5353
5354 if (unlikely(rc == IRQ_NONE))
634651fa 5355 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5356
172cd6e1 5357unlock_out:
56d6aa33 5358 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5359 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5360 list_del(&ipr_cmd->queue);
5361 del_timer(&ipr_cmd->timer);
5362 ipr_cmd->fast_done(ipr_cmd);
5363 }
05a6538a 5364 return rc;
5365}
5366
5367/**
5368 * ipr_isr_mhrrq - Interrupt service routine
5369 * @irq: irq number
5370 * @devp: pointer to ioa config struct
5371 *
5372 * Return value:
5373 * IRQ_NONE / IRQ_HANDLED
5374 **/
5375static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5376{
5377 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
56d6aa33 5378 unsigned long hrrq_flags = 0;
05a6538a 5379 struct ipr_cmnd *ipr_cmd, *temp;
5380 irqreturn_t rc = IRQ_NONE;
5381 LIST_HEAD(doneq);
172cd6e1 5382
56d6aa33 5383 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5384
5385 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5386 if (!hrrq->allow_interrupts) {
5387 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5388 return IRQ_NONE;
5389 }
5390
5391 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5392 hrrq->toggle_bit)
5393
5394 if (__ipr_process_hrrq(hrrq, &doneq))
5395 rc = IRQ_HANDLED;
5396
56d6aa33 5397 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5398
5399 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5400 list_del(&ipr_cmd->queue);
5401 del_timer(&ipr_cmd->timer);
5402 ipr_cmd->fast_done(ipr_cmd);
5403 }
1da177e4
LT
5404 return rc;
5405}
5406
a32c055f
WB
5407/**
5408 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5409 * @ioa_cfg: ioa config struct
5410 * @ipr_cmd: ipr command struct
5411 *
5412 * Return value:
5413 * 0 on success / -1 on failure
5414 **/
5415static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5416 struct ipr_cmnd *ipr_cmd)
5417{
5418 int i, nseg;
5419 struct scatterlist *sg;
5420 u32 length;
5421 u32 ioadl_flags = 0;
5422 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5425
5426 length = scsi_bufflen(scsi_cmd);
5427 if (!length)
5428 return 0;
5429
5430 nseg = scsi_dma_map(scsi_cmd);
5431 if (nseg < 0) {
51f52a47
AB
5432 if (printk_ratelimit())
5433 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
a32c055f
WB
5434 return -1;
5435 }
5436
5437 ipr_cmd->dma_use_sg = nseg;
5438
438b0331 5439 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5440 ioarcb->ioadl_len =
5441 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5442
a32c055f
WB
5443 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5444 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5445 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5446 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5447 ioadl_flags = IPR_IOADL_FLAGS_READ;
5448
5449 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5450 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5451 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5452 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5453 }
5454
5455 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5456 return 0;
5457}
5458
1da177e4
LT
5459/**
5460 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5461 * @ioa_cfg: ioa config struct
5462 * @ipr_cmd: ipr command struct
5463 *
5464 * Return value:
5465 * 0 on success / -1 on failure
5466 **/
5467static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5468 struct ipr_cmnd *ipr_cmd)
5469{
63015bc9
FT
5470 int i, nseg;
5471 struct scatterlist *sg;
1da177e4
LT
5472 u32 length;
5473 u32 ioadl_flags = 0;
5474 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5475 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5476 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5477
63015bc9
FT
5478 length = scsi_bufflen(scsi_cmd);
5479 if (!length)
1da177e4
LT
5480 return 0;
5481
63015bc9
FT
5482 nseg = scsi_dma_map(scsi_cmd);
5483 if (nseg < 0) {
5484 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5485 return -1;
5486 }
51b1c7e1 5487
63015bc9
FT
5488 ipr_cmd->dma_use_sg = nseg;
5489
5490 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5491 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5492 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5493 ioarcb->data_transfer_length = cpu_to_be32(length);
5494 ioarcb->ioadl_len =
63015bc9
FT
5495 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5496 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5497 ioadl_flags = IPR_IOADL_FLAGS_READ;
5498 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5499 ioarcb->read_ioadl_len =
5500 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5501 }
1da177e4 5502
a32c055f
WB
5503 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5504 ioadl = ioarcb->u.add_data.u.ioadl;
5505 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5506 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5507 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5508 }
1da177e4 5509
63015bc9
FT
5510 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5511 ioadl[i].flags_and_data_len =
5512 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5513 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5514 }
5515
63015bc9
FT
5516 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5517 return 0;
1da177e4
LT
5518}
5519
5520/**
5521 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5522 * @scsi_cmd: scsi command struct
5523 *
5524 * Return value:
5525 * task attributes
5526 **/
5527static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5528{
5529 u8 tag[2];
5530 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5531
5532 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5533 switch (tag[0]) {
5534 case MSG_SIMPLE_TAG:
5535 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5536 break;
5537 case MSG_HEAD_TAG:
5538 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5539 break;
5540 case MSG_ORDERED_TAG:
5541 rc = IPR_FLAGS_LO_ORDERED_TASK;
5542 break;
5543 };
5544 }
5545
5546 return rc;
5547}
5548
5549/**
5550 * ipr_erp_done - Process completion of ERP for a device
5551 * @ipr_cmd: ipr command struct
5552 *
5553 * This function copies the sense buffer into the scsi_cmd
5554 * struct and pushes the scsi_done function.
5555 *
5556 * Return value:
5557 * nothing
5558 **/
5559static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5560{
5561 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5562 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5563 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5564
5565 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5566 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5567 scmd_printk(KERN_ERR, scsi_cmd,
5568 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5569 } else {
5570 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5571 SCSI_SENSE_BUFFERSIZE);
5572 }
5573
5574 if (res) {
ee0a90fa
BK
5575 if (!ipr_is_naca_model(res))
5576 res->needs_sync_complete = 1;
1da177e4
LT
5577 res->in_erp = 0;
5578 }
63015bc9 5579 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5580 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5581 scsi_cmd->scsi_done(scsi_cmd);
5582}
5583
5584/**
5585 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5586 * @ipr_cmd: ipr command struct
5587 *
5588 * Return value:
5589 * none
5590 **/
5591static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5592{
51b1c7e1 5593 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5594 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5595 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5596
5597 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5598 ioarcb->data_transfer_length = 0;
1da177e4 5599 ioarcb->read_data_transfer_length = 0;
a32c055f 5600 ioarcb->ioadl_len = 0;
1da177e4 5601 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5602 ioasa->hdr.ioasc = 0;
5603 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5604
5605 if (ipr_cmd->ioa_cfg->sis64)
5606 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5607 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5608 else {
5609 ioarcb->write_ioadl_addr =
5610 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5611 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5612 }
1da177e4
LT
5613}
5614
5615/**
5616 * ipr_erp_request_sense - Send request sense to a device
5617 * @ipr_cmd: ipr command struct
5618 *
5619 * This function sends a request sense to a device as a result
5620 * of a check condition.
5621 *
5622 * Return value:
5623 * nothing
5624 **/
5625static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5626{
5627 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5628 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5629
5630 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5631 ipr_erp_done(ipr_cmd);
5632 return;
5633 }
5634
5635 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5636
5637 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5638 cmd_pkt->cdb[0] = REQUEST_SENSE;
5639 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5640 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5641 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5642 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5643
a32c055f
WB
5644 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5645 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5646
5647 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5648 IPR_REQUEST_SENSE_TIMEOUT * 2);
5649}
5650
5651/**
5652 * ipr_erp_cancel_all - Send cancel all to a device
5653 * @ipr_cmd: ipr command struct
5654 *
5655 * This function sends a cancel all to a device to clear the
5656 * queue. If we are running TCQ on the device, QERR is set to 1,
5657 * which means all outstanding ops have been dropped on the floor.
5658 * Cancel all will return them to us.
5659 *
5660 * Return value:
5661 * nothing
5662 **/
5663static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5664{
5665 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5666 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5667 struct ipr_cmd_pkt *cmd_pkt;
5668
5669 res->in_erp = 1;
5670
5671 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5672
5673 if (!scsi_get_tag_type(scsi_cmd->device)) {
5674 ipr_erp_request_sense(ipr_cmd);
5675 return;
5676 }
5677
5678 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5679 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5680 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5681
5682 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5683 IPR_CANCEL_ALL_TIMEOUT);
5684}
5685
5686/**
5687 * ipr_dump_ioasa - Dump contents of IOASA
5688 * @ioa_cfg: ioa config struct
5689 * @ipr_cmd: ipr command struct
fe964d0a 5690 * @res: resource entry struct
1da177e4
LT
5691 *
5692 * This function is invoked by the interrupt handler when ops
5693 * fail. It will log the IOASA if appropriate. Only called
5694 * for GPDD ops.
5695 *
5696 * Return value:
5697 * none
5698 **/
5699static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5700 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5701{
5702 int i;
5703 u16 data_len;
b0692dd4 5704 u32 ioasc, fd_ioasc;
96d21f00 5705 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5706 __be32 *ioasa_data = (__be32 *)ioasa;
5707 int error_index;
5708
96d21f00
WB
5709 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5710 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5711
5712 if (0 == ioasc)
5713 return;
5714
5715 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5716 return;
5717
b0692dd4
BK
5718 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5719 error_index = ipr_get_error(fd_ioasc);
5720 else
5721 error_index = ipr_get_error(ioasc);
1da177e4
LT
5722
5723 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5724 /* Don't log an error if the IOA already logged one */
96d21f00 5725 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5726 return;
5727
cc9bd5d4
BK
5728 if (!ipr_is_gscsi(res))
5729 return;
5730
1da177e4
LT
5731 if (ipr_error_table[error_index].log_ioasa == 0)
5732 return;
5733 }
5734
fe964d0a 5735 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5736
96d21f00
WB
5737 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5738 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5739 data_len = sizeof(struct ipr_ioasa64);
5740 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5741 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5742
5743 ipr_err("IOASA Dump:\n");
5744
5745 for (i = 0; i < data_len / 4; i += 4) {
5746 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5747 be32_to_cpu(ioasa_data[i]),
5748 be32_to_cpu(ioasa_data[i+1]),
5749 be32_to_cpu(ioasa_data[i+2]),
5750 be32_to_cpu(ioasa_data[i+3]));
5751 }
5752}
5753
5754/**
5755 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5756 * @ioasa: IOASA
5757 * @sense_buf: sense data buffer
5758 *
5759 * Return value:
5760 * none
5761 **/
5762static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5763{
5764 u32 failing_lba;
5765 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5766 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5767 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5768 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5769
5770 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5771
5772 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5773 return;
5774
5775 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5776
5777 if (ipr_is_vset_device(res) &&
5778 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5779 ioasa->u.vset.failing_lba_hi != 0) {
5780 sense_buf[0] = 0x72;
5781 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5782 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5783 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5784
5785 sense_buf[7] = 12;
5786 sense_buf[8] = 0;
5787 sense_buf[9] = 0x0A;
5788 sense_buf[10] = 0x80;
5789
5790 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5791
5792 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5793 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5794 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5795 sense_buf[15] = failing_lba & 0x000000ff;
5796
5797 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5798
5799 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5800 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5801 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5802 sense_buf[19] = failing_lba & 0x000000ff;
5803 } else {
5804 sense_buf[0] = 0x70;
5805 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5806 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5807 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5808
5809 /* Illegal request */
5810 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5811 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5812 sense_buf[7] = 10; /* additional length */
5813
5814 /* IOARCB was in error */
5815 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5816 sense_buf[15] = 0xC0;
5817 else /* Parameter data was invalid */
5818 sense_buf[15] = 0x80;
5819
5820 sense_buf[16] =
5821 ((IPR_FIELD_POINTER_MASK &
96d21f00 5822 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5823 sense_buf[17] =
5824 (IPR_FIELD_POINTER_MASK &
96d21f00 5825 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5826 } else {
5827 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5828 if (ipr_is_vset_device(res))
5829 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5830 else
5831 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5832
5833 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5834 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5835 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5836 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5837 sense_buf[6] = failing_lba & 0x000000ff;
5838 }
5839
5840 sense_buf[7] = 6; /* additional length */
5841 }
5842 }
5843}
5844
ee0a90fa
BK
5845/**
5846 * ipr_get_autosense - Copy autosense data to sense buffer
5847 * @ipr_cmd: ipr command struct
5848 *
5849 * This function copies the autosense buffer to the buffer
5850 * in the scsi_cmd, if there is autosense available.
5851 *
5852 * Return value:
5853 * 1 if autosense was available / 0 if not
5854 **/
5855static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5856{
96d21f00
WB
5857 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5858 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5859
96d21f00 5860 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5861 return 0;
5862
96d21f00
WB
5863 if (ipr_cmd->ioa_cfg->sis64)
5864 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5865 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5866 SCSI_SENSE_BUFFERSIZE));
5867 else
5868 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5869 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5870 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5871 return 1;
5872}
5873
1da177e4
LT
5874/**
5875 * ipr_erp_start - Process an error response for a SCSI op
5876 * @ioa_cfg: ioa config struct
5877 * @ipr_cmd: ipr command struct
5878 *
5879 * This function determines whether or not to initiate ERP
5880 * on the affected device.
5881 *
5882 * Return value:
5883 * nothing
5884 **/
5885static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5886 struct ipr_cmnd *ipr_cmd)
5887{
5888 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5889 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5890 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5891 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5892
5893 if (!res) {
5894 ipr_scsi_eh_done(ipr_cmd);
5895 return;
5896 }
5897
8a048994 5898 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5899 ipr_gen_sense(ipr_cmd);
5900
cc9bd5d4
BK
5901 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5902
8a048994 5903 switch (masked_ioasc) {
1da177e4 5904 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5905 if (ipr_is_naca_model(res))
5906 scsi_cmd->result |= (DID_ABORT << 16);
5907 else
5908 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5909 break;
5910 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5911 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5912 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5913 break;
5914 case IPR_IOASC_HW_SEL_TIMEOUT:
5915 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5916 if (!ipr_is_naca_model(res))
5917 res->needs_sync_complete = 1;
1da177e4
LT
5918 break;
5919 case IPR_IOASC_SYNC_REQUIRED:
5920 if (!res->in_erp)
5921 res->needs_sync_complete = 1;
5922 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5923 break;
5924 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5925 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5926 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5927 break;
5928 case IPR_IOASC_BUS_WAS_RESET:
5929 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5930 /*
5931 * Report the bus reset and ask for a retry. The device
5932 * will give CC/UA the next command.
5933 */
5934 if (!res->resetting_device)
5935 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5936 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5937 if (!ipr_is_naca_model(res))
5938 res->needs_sync_complete = 1;
1da177e4
LT
5939 break;
5940 case IPR_IOASC_HW_DEV_BUS_STATUS:
5941 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5942 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5943 if (!ipr_get_autosense(ipr_cmd)) {
5944 if (!ipr_is_naca_model(res)) {
5945 ipr_erp_cancel_all(ipr_cmd);
5946 return;
5947 }
5948 }
1da177e4 5949 }
ee0a90fa
BK
5950 if (!ipr_is_naca_model(res))
5951 res->needs_sync_complete = 1;
1da177e4
LT
5952 break;
5953 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5954 break;
5955 default:
5b7304fb
BK
5956 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5957 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5958 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5959 res->needs_sync_complete = 1;
5960 break;
5961 }
5962
63015bc9 5963 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5964 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5965 scsi_cmd->scsi_done(scsi_cmd);
5966}
5967
5968/**
5969 * ipr_scsi_done - mid-layer done function
5970 * @ipr_cmd: ipr command struct
5971 *
5972 * This function is invoked by the interrupt handler for
5973 * ops generated by the SCSI mid-layer
5974 *
5975 * Return value:
5976 * none
5977 **/
5978static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5979{
5980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5981 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5982 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5983 unsigned long hrrq_flags;
1da177e4 5984
96d21f00 5985 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5986
5987 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
5988 scsi_dma_unmap(scsi_cmd);
5989
56d6aa33 5990 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
05a6538a 5991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 5992 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 5993 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 5994 } else {
56d6aa33 5995 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
1da177e4 5996 ipr_erp_start(ioa_cfg, ipr_cmd);
56d6aa33 5997 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 5998 }
1da177e4
LT
5999}
6000
1da177e4
LT
6001/**
6002 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6003 * @shost: scsi host struct
1da177e4 6004 * @scsi_cmd: scsi command struct
1da177e4
LT
6005 *
6006 * This function queues a request generated by the mid-layer.
6007 *
6008 * Return value:
6009 * 0 on success
6010 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6011 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6012 **/
00bfef2c
BK
6013static int ipr_queuecommand(struct Scsi_Host *shost,
6014 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6015{
6016 struct ipr_ioa_cfg *ioa_cfg;
6017 struct ipr_resource_entry *res;
6018 struct ipr_ioarcb *ioarcb;
6019 struct ipr_cmnd *ipr_cmd;
56d6aa33 6020 unsigned long hrrq_flags, lock_flags;
d12f1576 6021 int rc;
05a6538a 6022 struct ipr_hrr_queue *hrrq;
6023 int hrrq_id;
1da177e4 6024
00bfef2c
BK
6025 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6026
1da177e4 6027 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6028 res = scsi_cmd->device->hostdata;
56d6aa33 6029
6030 if (ipr_is_gata(res) && res->sata_port) {
6031 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6032 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6034 return rc;
6035 }
6036
05a6538a 6037 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6038 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6039
56d6aa33 6040 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6041 /*
6042 * We are currently blocking all devices due to a host reset
6043 * We have told the host to stop giving us new requests, but
6044 * ERP ops don't count. FIXME
6045 */
56d6aa33 6046 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6047 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6048 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6049 }
1da177e4
LT
6050
6051 /*
6052 * FIXME - Create scsi_set_host_offline interface
6053 * and the ioa_is_dead check can be removed
6054 */
56d6aa33 6055 if (unlikely(hrrq->ioa_is_dead || !res)) {
6056 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6057 goto err_nodev;
1da177e4
LT
6058 }
6059
05a6538a 6060 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6061 if (ipr_cmd == NULL) {
56d6aa33 6062 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6063 return SCSI_MLQUEUE_HOST_BUSY;
6064 }
56d6aa33 6065 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6066
172cd6e1 6067 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6068 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6069
6070 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6071 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6072 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6073
6074 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6075 if (scsi_cmd->underflow == 0)
6076 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6077
1da177e4 6078 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ab6c10b1
WB
6079 if (ipr_is_gscsi(res))
6080 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
1da177e4
LT
6081 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6082 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6083 }
6084
6085 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6086 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6087 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6088 }
1da177e4 6089
d12f1576
DC
6090 if (ioa_cfg->sis64)
6091 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6092 else
6093 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6094
56d6aa33 6095 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6096 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6097 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6098 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6099 if (!rc)
6100 scsi_dma_unmap(scsi_cmd);
a5fb407e 6101 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6102 }
6103
56d6aa33 6104 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6105 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6106 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6107 scsi_dma_unmap(scsi_cmd);
6108 goto err_nodev;
6109 }
6110
6111 ioarcb->res_handle = res->res_handle;
6112 if (res->needs_sync_complete) {
6113 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6114 res->needs_sync_complete = 0;
6115 }
05a6538a 6116 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6117 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6118 ipr_send_command(ipr_cmd);
56d6aa33 6119 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6120 return 0;
1da177e4 6121
00bfef2c 6122err_nodev:
56d6aa33 6123 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6124 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6125 scsi_cmd->result = (DID_NO_CONNECT << 16);
6126 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6127 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6128 return 0;
6129}
f281233d 6130
35a39691
BK
6131/**
6132 * ipr_ioctl - IOCTL handler
6133 * @sdev: scsi device struct
6134 * @cmd: IOCTL cmd
6135 * @arg: IOCTL arg
6136 *
6137 * Return value:
6138 * 0 on success / other on failure
6139 **/
bd705f2d 6140static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6141{
6142 struct ipr_resource_entry *res;
6143
6144 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6145 if (res && ipr_is_gata(res)) {
6146 if (cmd == HDIO_GET_IDENTITY)
6147 return -ENOTTY;
94be9a58 6148 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6149 }
35a39691
BK
6150
6151 return -EINVAL;
6152}
6153
1da177e4
LT
6154/**
6155 * ipr_info - Get information about the card/driver
6156 * @scsi_host: scsi host struct
6157 *
6158 * Return value:
6159 * pointer to buffer with description string
6160 **/
203fa3fe 6161static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6162{
6163 static char buffer[512];
6164 struct ipr_ioa_cfg *ioa_cfg;
6165 unsigned long lock_flags = 0;
6166
6167 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6168
6169 spin_lock_irqsave(host->host_lock, lock_flags);
6170 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6171 spin_unlock_irqrestore(host->host_lock, lock_flags);
6172
6173 return buffer;
6174}
6175
6176static struct scsi_host_template driver_template = {
6177 .module = THIS_MODULE,
6178 .name = "IPR",
6179 .info = ipr_ioa_info,
35a39691 6180 .ioctl = ipr_ioctl,
1da177e4
LT
6181 .queuecommand = ipr_queuecommand,
6182 .eh_abort_handler = ipr_eh_abort,
6183 .eh_device_reset_handler = ipr_eh_dev_reset,
6184 .eh_host_reset_handler = ipr_eh_host_reset,
6185 .slave_alloc = ipr_slave_alloc,
6186 .slave_configure = ipr_slave_configure,
6187 .slave_destroy = ipr_slave_destroy,
35a39691
BK
6188 .target_alloc = ipr_target_alloc,
6189 .target_destroy = ipr_target_destroy,
1da177e4
LT
6190 .change_queue_depth = ipr_change_queue_depth,
6191 .change_queue_type = ipr_change_queue_type,
6192 .bios_param = ipr_biosparam,
6193 .can_queue = IPR_MAX_COMMANDS,
6194 .this_id = -1,
6195 .sg_tablesize = IPR_MAX_SGLIST,
6196 .max_sectors = IPR_IOA_MAX_SECTORS,
6197 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6198 .use_clustering = ENABLE_CLUSTERING,
6199 .shost_attrs = ipr_ioa_attrs,
6200 .sdev_attrs = ipr_dev_attrs,
6201 .proc_name = IPR_NAME
6202};
6203
35a39691
BK
6204/**
6205 * ipr_ata_phy_reset - libata phy_reset handler
6206 * @ap: ata port to reset
6207 *
6208 **/
6209static void ipr_ata_phy_reset(struct ata_port *ap)
6210{
6211 unsigned long flags;
6212 struct ipr_sata_port *sata_port = ap->private_data;
6213 struct ipr_resource_entry *res = sata_port->res;
6214 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6215 int rc;
6216
6217 ENTER;
6218 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6219 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6221 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6222 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6223 }
6224
56d6aa33 6225 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6226 goto out_unlock;
6227
6228 rc = ipr_device_reset(ioa_cfg, res);
6229
6230 if (rc) {
3e4ec344 6231 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6232 goto out_unlock;
6233 }
6234
3e7ebdfa
WB
6235 ap->link.device[0].class = res->ata_class;
6236 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6237 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6238
6239out_unlock:
6240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6241 LEAVE;
6242}
6243
6244/**
6245 * ipr_ata_post_internal - Cleanup after an internal command
6246 * @qc: ATA queued command
6247 *
6248 * Return value:
6249 * none
6250 **/
6251static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6252{
6253 struct ipr_sata_port *sata_port = qc->ap->private_data;
6254 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6255 struct ipr_cmnd *ipr_cmd;
05a6538a 6256 struct ipr_hrr_queue *hrrq;
35a39691
BK
6257 unsigned long flags;
6258
6259 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6260 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6262 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6263 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6264 }
6265
05a6538a 6266 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6267 spin_lock(&hrrq->_lock);
05a6538a 6268 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6269 if (ipr_cmd->qc == qc) {
6270 ipr_device_reset(ioa_cfg, sata_port->res);
6271 break;
6272 }
35a39691 6273 }
56d6aa33 6274 spin_unlock(&hrrq->_lock);
35a39691
BK
6275 }
6276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6277}
6278
35a39691
BK
6279/**
6280 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6281 * @regs: destination
6282 * @tf: source ATA taskfile
6283 *
6284 * Return value:
6285 * none
6286 **/
6287static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6288 struct ata_taskfile *tf)
6289{
6290 regs->feature = tf->feature;
6291 regs->nsect = tf->nsect;
6292 regs->lbal = tf->lbal;
6293 regs->lbam = tf->lbam;
6294 regs->lbah = tf->lbah;
6295 regs->device = tf->device;
6296 regs->command = tf->command;
6297 regs->hob_feature = tf->hob_feature;
6298 regs->hob_nsect = tf->hob_nsect;
6299 regs->hob_lbal = tf->hob_lbal;
6300 regs->hob_lbam = tf->hob_lbam;
6301 regs->hob_lbah = tf->hob_lbah;
6302 regs->ctl = tf->ctl;
6303}
6304
6305/**
6306 * ipr_sata_done - done function for SATA commands
6307 * @ipr_cmd: ipr command struct
6308 *
6309 * This function is invoked by the interrupt handler for
6310 * ops generated by the SCSI mid-layer to SATA devices
6311 *
6312 * Return value:
6313 * none
6314 **/
6315static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6316{
6317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6318 struct ata_queued_cmd *qc = ipr_cmd->qc;
6319 struct ipr_sata_port *sata_port = qc->ap->private_data;
6320 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6321 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6322
56d6aa33 6323 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6324 if (ipr_cmd->ioa_cfg->sis64)
6325 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6326 sizeof(struct ipr_ioasa_gata));
6327 else
6328 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6329 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6330 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6331
96d21f00 6332 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6333 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6334
6335 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6336 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6337 else
96d21f00 6338 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6339 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6340 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6341 ata_qc_complete(qc);
6342}
6343
a32c055f
WB
6344/**
6345 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6346 * @ipr_cmd: ipr command struct
6347 * @qc: ATA queued command
6348 *
6349 **/
6350static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6351 struct ata_queued_cmd *qc)
6352{
6353 u32 ioadl_flags = 0;
6354 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6355 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6356 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6357 int len = qc->nbytes;
6358 struct scatterlist *sg;
6359 unsigned int si;
6360 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6361
6362 if (len == 0)
6363 return;
6364
6365 if (qc->dma_dir == DMA_TO_DEVICE) {
6366 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6367 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6368 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6369 ioadl_flags = IPR_IOADL_FLAGS_READ;
6370
6371 ioarcb->data_transfer_length = cpu_to_be32(len);
6372 ioarcb->ioadl_len =
6373 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6374 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6375 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6376
6377 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6378 ioadl64->flags = cpu_to_be32(ioadl_flags);
6379 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6380 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6381
6382 last_ioadl64 = ioadl64;
6383 ioadl64++;
6384 }
6385
6386 if (likely(last_ioadl64))
6387 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6388}
6389
35a39691
BK
6390/**
6391 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6392 * @ipr_cmd: ipr command struct
6393 * @qc: ATA queued command
6394 *
6395 **/
6396static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6397 struct ata_queued_cmd *qc)
6398{
6399 u32 ioadl_flags = 0;
6400 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6401 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6402 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6403 int len = qc->nbytes;
35a39691 6404 struct scatterlist *sg;
ff2aeb1e 6405 unsigned int si;
35a39691
BK
6406
6407 if (len == 0)
6408 return;
6409
6410 if (qc->dma_dir == DMA_TO_DEVICE) {
6411 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6412 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6413 ioarcb->data_transfer_length = cpu_to_be32(len);
6414 ioarcb->ioadl_len =
35a39691
BK
6415 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6416 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6417 ioadl_flags = IPR_IOADL_FLAGS_READ;
6418 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6419 ioarcb->read_ioadl_len =
6420 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6421 }
6422
ff2aeb1e 6423 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6424 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6425 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6426
6427 last_ioadl = ioadl;
6428 ioadl++;
35a39691 6429 }
3be6cbd7
JG
6430
6431 if (likely(last_ioadl))
6432 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6433}
6434
56d6aa33 6435/**
6436 * ipr_qc_defer - Get a free ipr_cmd
6437 * @qc: queued command
6438 *
6439 * Return value:
6440 * 0 if success
6441 **/
6442static int ipr_qc_defer(struct ata_queued_cmd *qc)
6443{
6444 struct ata_port *ap = qc->ap;
6445 struct ipr_sata_port *sata_port = ap->private_data;
6446 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6447 struct ipr_cmnd *ipr_cmd;
6448 struct ipr_hrr_queue *hrrq;
6449 int hrrq_id;
6450
6451 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6452 hrrq = &ioa_cfg->hrrq[hrrq_id];
6453
6454 qc->lldd_task = NULL;
6455 spin_lock(&hrrq->_lock);
6456 if (unlikely(hrrq->ioa_is_dead)) {
6457 spin_unlock(&hrrq->_lock);
6458 return 0;
6459 }
6460
6461 if (unlikely(!hrrq->allow_cmds)) {
6462 spin_unlock(&hrrq->_lock);
6463 return ATA_DEFER_LINK;
6464 }
6465
6466 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6467 if (ipr_cmd == NULL) {
6468 spin_unlock(&hrrq->_lock);
6469 return ATA_DEFER_LINK;
6470 }
6471
6472 qc->lldd_task = ipr_cmd;
6473 spin_unlock(&hrrq->_lock);
6474 return 0;
6475}
6476
35a39691
BK
6477/**
6478 * ipr_qc_issue - Issue a SATA qc to a device
6479 * @qc: queued command
6480 *
6481 * Return value:
6482 * 0 if success
6483 **/
6484static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6485{
6486 struct ata_port *ap = qc->ap;
6487 struct ipr_sata_port *sata_port = ap->private_data;
6488 struct ipr_resource_entry *res = sata_port->res;
6489 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6490 struct ipr_cmnd *ipr_cmd;
6491 struct ipr_ioarcb *ioarcb;
6492 struct ipr_ioarcb_ata_regs *regs;
6493
56d6aa33 6494 if (qc->lldd_task == NULL)
6495 ipr_qc_defer(qc);
6496
6497 ipr_cmd = qc->lldd_task;
6498 if (ipr_cmd == NULL)
0feeed82 6499 return AC_ERR_SYSTEM;
35a39691 6500
56d6aa33 6501 qc->lldd_task = NULL;
6502 spin_lock(&ipr_cmd->hrrq->_lock);
6503 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6504 ipr_cmd->hrrq->ioa_is_dead)) {
6505 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6506 spin_unlock(&ipr_cmd->hrrq->_lock);
6507 return AC_ERR_SYSTEM;
6508 }
6509
05a6538a 6510 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6511 ioarcb = &ipr_cmd->ioarcb;
35a39691 6512
a32c055f
WB
6513 if (ioa_cfg->sis64) {
6514 regs = &ipr_cmd->i.ata_ioadl.regs;
6515 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6516 } else
6517 regs = &ioarcb->u.add_data.u.regs;
6518
6519 memset(regs, 0, sizeof(*regs));
6520 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6521
56d6aa33 6522 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6523 ipr_cmd->qc = qc;
6524 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6525 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6526 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6527 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6528 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6529 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6530
a32c055f
WB
6531 if (ioa_cfg->sis64)
6532 ipr_build_ata_ioadl64(ipr_cmd, qc);
6533 else
6534 ipr_build_ata_ioadl(ipr_cmd, qc);
6535
35a39691
BK
6536 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6537 ipr_copy_sata_tf(regs, &qc->tf);
6538 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6539 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6540
6541 switch (qc->tf.protocol) {
6542 case ATA_PROT_NODATA:
6543 case ATA_PROT_PIO:
6544 break;
6545
6546 case ATA_PROT_DMA:
6547 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6548 break;
6549
0dc36888
TH
6550 case ATAPI_PROT_PIO:
6551 case ATAPI_PROT_NODATA:
35a39691
BK
6552 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6553 break;
6554
0dc36888 6555 case ATAPI_PROT_DMA:
35a39691
BK
6556 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6557 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6558 break;
6559
6560 default:
6561 WARN_ON(1);
56d6aa33 6562 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6563 return AC_ERR_INVALID;
35a39691
BK
6564 }
6565
a32c055f 6566 ipr_send_command(ipr_cmd);
56d6aa33 6567 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6568
35a39691
BK
6569 return 0;
6570}
6571
4c9bf4e7
TH
6572/**
6573 * ipr_qc_fill_rtf - Read result TF
6574 * @qc: ATA queued command
6575 *
6576 * Return value:
6577 * true
6578 **/
6579static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6580{
6581 struct ipr_sata_port *sata_port = qc->ap->private_data;
6582 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6583 struct ata_taskfile *tf = &qc->result_tf;
6584
6585 tf->feature = g->error;
6586 tf->nsect = g->nsect;
6587 tf->lbal = g->lbal;
6588 tf->lbam = g->lbam;
6589 tf->lbah = g->lbah;
6590 tf->device = g->device;
6591 tf->command = g->status;
6592 tf->hob_nsect = g->hob_nsect;
6593 tf->hob_lbal = g->hob_lbal;
6594 tf->hob_lbam = g->hob_lbam;
6595 tf->hob_lbah = g->hob_lbah;
6596 tf->ctl = g->alt_status;
6597
6598 return true;
6599}
6600
35a39691 6601static struct ata_port_operations ipr_sata_ops = {
35a39691 6602 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6603 .hardreset = ipr_sata_reset,
35a39691 6604 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6605 .qc_prep = ata_noop_qc_prep,
56d6aa33 6606 .qc_defer = ipr_qc_defer,
35a39691 6607 .qc_issue = ipr_qc_issue,
4c9bf4e7 6608 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6609 .port_start = ata_sas_port_start,
6610 .port_stop = ata_sas_port_stop
6611};
6612
6613static struct ata_port_info sata_port_info = {
9cbe056f 6614 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6615 .pio_mask = ATA_PIO4_ONLY,
6616 .mwdma_mask = ATA_MWDMA2,
6617 .udma_mask = ATA_UDMA6,
35a39691
BK
6618 .port_ops = &ipr_sata_ops
6619};
6620
1da177e4
LT
6621#ifdef CONFIG_PPC_PSERIES
6622static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6623 PVR_NORTHSTAR,
6624 PVR_PULSAR,
6625 PVR_POWER4,
6626 PVR_ICESTAR,
6627 PVR_SSTAR,
6628 PVR_POWER4p,
6629 PVR_630,
6630 PVR_630p
1da177e4
LT
6631};
6632
6633/**
6634 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6635 * @ioa_cfg: ioa cfg struct
6636 *
6637 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6638 * certain pSeries hardware. This function determines if the given
6639 * adapter is in one of these confgurations or not.
6640 *
6641 * Return value:
6642 * 1 if adapter is not supported / 0 if adapter is supported
6643 **/
6644static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6645{
1da177e4
LT
6646 int i;
6647
44c10138 6648 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6649 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6650 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6651 return 1;
1da177e4
LT
6652 }
6653 }
6654 return 0;
6655}
6656#else
6657#define ipr_invalid_adapter(ioa_cfg) 0
6658#endif
6659
6660/**
6661 * ipr_ioa_bringdown_done - IOA bring down completion.
6662 * @ipr_cmd: ipr command struct
6663 *
6664 * This function processes the completion of an adapter bring down.
6665 * It wakes any reset sleepers.
6666 *
6667 * Return value:
6668 * IPR_RC_JOB_RETURN
6669 **/
6670static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6671{
6672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6673
6674 ENTER;
6675 ioa_cfg->in_reset_reload = 0;
6676 ioa_cfg->reset_retries = 0;
05a6538a 6677 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6678 wake_up_all(&ioa_cfg->reset_wait_q);
6679
6680 spin_unlock_irq(ioa_cfg->host->host_lock);
6681 scsi_unblock_requests(ioa_cfg->host);
6682 spin_lock_irq(ioa_cfg->host->host_lock);
6683 LEAVE;
6684
6685 return IPR_RC_JOB_RETURN;
6686}
6687
6688/**
6689 * ipr_ioa_reset_done - IOA reset completion.
6690 * @ipr_cmd: ipr command struct
6691 *
6692 * This function processes the completion of an adapter reset.
6693 * It schedules any necessary mid-layer add/removes and
6694 * wakes any reset sleepers.
6695 *
6696 * Return value:
6697 * IPR_RC_JOB_RETURN
6698 **/
6699static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6700{
6701 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6702 struct ipr_resource_entry *res;
6703 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 6704 int i = 0, j;
1da177e4
LT
6705
6706 ENTER;
6707 ioa_cfg->in_reset_reload = 0;
56d6aa33 6708 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6709 spin_lock(&ioa_cfg->hrrq[j]._lock);
6710 ioa_cfg->hrrq[j].allow_cmds = 1;
6711 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6712 }
6713 wmb();
1da177e4 6714 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6715 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6716
6717 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6718 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6719 ipr_trace;
6720 break;
6721 }
6722 }
6723 schedule_work(&ioa_cfg->work_q);
6724
6725 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6726 list_del(&hostrcb->queue);
6727 if (i++ < IPR_NUM_LOG_HCAMS)
6728 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6729 else
6730 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6731 }
6732
6bb04170 6733 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6734 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6735
6736 ioa_cfg->reset_retries = 0;
05a6538a 6737 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6738 wake_up_all(&ioa_cfg->reset_wait_q);
6739
30237853 6740 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6741 scsi_unblock_requests(ioa_cfg->host);
30237853 6742 spin_lock(ioa_cfg->host->host_lock);
1da177e4 6743
56d6aa33 6744 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
6745 scsi_block_requests(ioa_cfg->host);
6746
6747 LEAVE;
6748 return IPR_RC_JOB_RETURN;
6749}
6750
6751/**
6752 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6753 * @supported_dev: supported device struct
6754 * @vpids: vendor product id struct
6755 *
6756 * Return value:
6757 * none
6758 **/
6759static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6760 struct ipr_std_inq_vpids *vpids)
6761{
6762 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6763 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6764 supported_dev->num_records = 1;
6765 supported_dev->data_length =
6766 cpu_to_be16(sizeof(struct ipr_supported_device));
6767 supported_dev->reserved = 0;
6768}
6769
6770/**
6771 * ipr_set_supported_devs - Send Set Supported Devices for a device
6772 * @ipr_cmd: ipr command struct
6773 *
a32c055f 6774 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6775 *
6776 * Return value:
6777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6778 **/
6779static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6780{
6781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6782 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6783 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6784 struct ipr_resource_entry *res = ipr_cmd->u.res;
6785
6786 ipr_cmd->job_step = ipr_ioa_reset_done;
6787
6788 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6789 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6790 continue;
6791
6792 ipr_cmd->u.res = res;
3e7ebdfa 6793 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6794
6795 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6796 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6797 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6798
6799 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6800 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6801 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6802 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6803
a32c055f
WB
6804 ipr_init_ioadl(ipr_cmd,
6805 ioa_cfg->vpd_cbs_dma +
6806 offsetof(struct ipr_misc_cbs, supp_dev),
6807 sizeof(struct ipr_supported_device),
6808 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6809
6810 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6811 IPR_SET_SUP_DEVICE_TIMEOUT);
6812
3e7ebdfa
WB
6813 if (!ioa_cfg->sis64)
6814 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 6815 LEAVE;
1da177e4
LT
6816 return IPR_RC_JOB_RETURN;
6817 }
6818
05a6538a 6819 LEAVE;
1da177e4
LT
6820 return IPR_RC_JOB_CONTINUE;
6821}
6822
6823/**
6824 * ipr_get_mode_page - Locate specified mode page
6825 * @mode_pages: mode page buffer
6826 * @page_code: page code to find
6827 * @len: minimum required length for mode page
6828 *
6829 * Return value:
6830 * pointer to mode page / NULL on failure
6831 **/
6832static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6833 u32 page_code, u32 len)
6834{
6835 struct ipr_mode_page_hdr *mode_hdr;
6836 u32 page_length;
6837 u32 length;
6838
6839 if (!mode_pages || (mode_pages->hdr.length == 0))
6840 return NULL;
6841
6842 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6843 mode_hdr = (struct ipr_mode_page_hdr *)
6844 (mode_pages->data + mode_pages->hdr.block_desc_len);
6845
6846 while (length) {
6847 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6848 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6849 return mode_hdr;
6850 break;
6851 } else {
6852 page_length = (sizeof(struct ipr_mode_page_hdr) +
6853 mode_hdr->page_length);
6854 length -= page_length;
6855 mode_hdr = (struct ipr_mode_page_hdr *)
6856 ((unsigned long)mode_hdr + page_length);
6857 }
6858 }
6859 return NULL;
6860}
6861
6862/**
6863 * ipr_check_term_power - Check for term power errors
6864 * @ioa_cfg: ioa config struct
6865 * @mode_pages: IOAFP mode pages buffer
6866 *
6867 * Check the IOAFP's mode page 28 for term power errors
6868 *
6869 * Return value:
6870 * nothing
6871 **/
6872static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6873 struct ipr_mode_pages *mode_pages)
6874{
6875 int i;
6876 int entry_length;
6877 struct ipr_dev_bus_entry *bus;
6878 struct ipr_mode_page28 *mode_page;
6879
6880 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6881 sizeof(struct ipr_mode_page28));
6882
6883 entry_length = mode_page->entry_length;
6884
6885 bus = mode_page->bus;
6886
6887 for (i = 0; i < mode_page->num_entries; i++) {
6888 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6889 dev_err(&ioa_cfg->pdev->dev,
6890 "Term power is absent on scsi bus %d\n",
6891 bus->res_addr.bus);
6892 }
6893
6894 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6895 }
6896}
6897
6898/**
6899 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6900 * @ioa_cfg: ioa config struct
6901 *
6902 * Looks through the config table checking for SES devices. If
6903 * the SES device is in the SES table indicating a maximum SCSI
6904 * bus speed, the speed is limited for the bus.
6905 *
6906 * Return value:
6907 * none
6908 **/
6909static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6910{
6911 u32 max_xfer_rate;
6912 int i;
6913
6914 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6915 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6916 ioa_cfg->bus_attr[i].bus_width);
6917
6918 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6919 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6920 }
6921}
6922
6923/**
6924 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6925 * @ioa_cfg: ioa config struct
6926 * @mode_pages: mode page 28 buffer
6927 *
6928 * Updates mode page 28 based on driver configuration
6929 *
6930 * Return value:
6931 * none
6932 **/
6933static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 6934 struct ipr_mode_pages *mode_pages)
1da177e4
LT
6935{
6936 int i, entry_length;
6937 struct ipr_dev_bus_entry *bus;
6938 struct ipr_bus_attributes *bus_attr;
6939 struct ipr_mode_page28 *mode_page;
6940
6941 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6942 sizeof(struct ipr_mode_page28));
6943
6944 entry_length = mode_page->entry_length;
6945
6946 /* Loop for each device bus entry */
6947 for (i = 0, bus = mode_page->bus;
6948 i < mode_page->num_entries;
6949 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6950 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6951 dev_err(&ioa_cfg->pdev->dev,
6952 "Invalid resource address reported: 0x%08X\n",
6953 IPR_GET_PHYS_LOC(bus->res_addr));
6954 continue;
6955 }
6956
6957 bus_attr = &ioa_cfg->bus_attr[i];
6958 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6959 bus->bus_width = bus_attr->bus_width;
6960 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6961 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6962 if (bus_attr->qas_enabled)
6963 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6964 else
6965 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6966 }
6967}
6968
6969/**
6970 * ipr_build_mode_select - Build a mode select command
6971 * @ipr_cmd: ipr command struct
6972 * @res_handle: resource handle to send command to
6973 * @parm: Byte 2 of Mode Sense command
6974 * @dma_addr: DMA buffer address
6975 * @xfer_len: data transfer length
6976 *
6977 * Return value:
6978 * none
6979 **/
6980static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6981 __be32 res_handle, u8 parm,
6982 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6983{
1da177e4
LT
6984 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6985
6986 ioarcb->res_handle = res_handle;
6987 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6988 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6989 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6990 ioarcb->cmd_pkt.cdb[1] = parm;
6991 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6992
a32c055f 6993 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6994}
6995
6996/**
6997 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6998 * @ipr_cmd: ipr command struct
6999 *
7000 * This function sets up the SCSI bus attributes and sends
7001 * a Mode Select for Page 28 to activate them.
7002 *
7003 * Return value:
7004 * IPR_RC_JOB_RETURN
7005 **/
7006static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7007{
7008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7009 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7010 int length;
7011
7012 ENTER;
4733804c
BK
7013 ipr_scsi_bus_speed_limit(ioa_cfg);
7014 ipr_check_term_power(ioa_cfg, mode_pages);
7015 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7016 length = mode_pages->hdr.length + 1;
7017 mode_pages->hdr.length = 0;
1da177e4
LT
7018
7019 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7020 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7021 length);
7022
f72919ec
WB
7023 ipr_cmd->job_step = ipr_set_supported_devs;
7024 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7025 struct ipr_resource_entry, queue);
1da177e4
LT
7026 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7027
7028 LEAVE;
7029 return IPR_RC_JOB_RETURN;
7030}
7031
7032/**
7033 * ipr_build_mode_sense - Builds a mode sense command
7034 * @ipr_cmd: ipr command struct
7035 * @res: resource entry struct
7036 * @parm: Byte 2 of mode sense command
7037 * @dma_addr: DMA address of mode sense buffer
7038 * @xfer_len: Size of DMA buffer
7039 *
7040 * Return value:
7041 * none
7042 **/
7043static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7044 __be32 res_handle,
a32c055f 7045 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7046{
1da177e4
LT
7047 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7048
7049 ioarcb->res_handle = res_handle;
7050 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7051 ioarcb->cmd_pkt.cdb[2] = parm;
7052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7053 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7054
a32c055f 7055 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7056}
7057
dfed823e
BK
7058/**
7059 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7060 * @ipr_cmd: ipr command struct
7061 *
7062 * This function handles the failure of an IOA bringup command.
7063 *
7064 * Return value:
7065 * IPR_RC_JOB_RETURN
7066 **/
7067static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7068{
7069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7070 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7071
7072 dev_err(&ioa_cfg->pdev->dev,
7073 "0x%02X failed with IOASC: 0x%08X\n",
7074 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7075
7076 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7077 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7078 return IPR_RC_JOB_RETURN;
7079}
7080
7081/**
7082 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7083 * @ipr_cmd: ipr command struct
7084 *
7085 * This function handles the failure of a Mode Sense to the IOAFP.
7086 * Some adapters do not handle all mode pages.
7087 *
7088 * Return value:
7089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7090 **/
7091static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7092{
f72919ec 7093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7094 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7095
7096 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7097 ipr_cmd->job_step = ipr_set_supported_devs;
7098 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7099 struct ipr_resource_entry, queue);
dfed823e
BK
7100 return IPR_RC_JOB_CONTINUE;
7101 }
7102
7103 return ipr_reset_cmd_failed(ipr_cmd);
7104}
7105
1da177e4
LT
7106/**
7107 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7108 * @ipr_cmd: ipr command struct
7109 *
7110 * This function send a Page 28 mode sense to the IOA to
7111 * retrieve SCSI bus attributes.
7112 *
7113 * Return value:
7114 * IPR_RC_JOB_RETURN
7115 **/
7116static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7117{
7118 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7119
7120 ENTER;
7121 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7122 0x28, ioa_cfg->vpd_cbs_dma +
7123 offsetof(struct ipr_misc_cbs, mode_pages),
7124 sizeof(struct ipr_mode_pages));
7125
7126 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7127 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7128
7129 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7130
7131 LEAVE;
7132 return IPR_RC_JOB_RETURN;
7133}
7134
ac09c349
BK
7135/**
7136 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7137 * @ipr_cmd: ipr command struct
7138 *
7139 * This function enables dual IOA RAID support if possible.
7140 *
7141 * Return value:
7142 * IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7145{
7146 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7147 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7148 struct ipr_mode_page24 *mode_page;
7149 int length;
7150
7151 ENTER;
7152 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7153 sizeof(struct ipr_mode_page24));
7154
7155 if (mode_page)
7156 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7157
7158 length = mode_pages->hdr.length + 1;
7159 mode_pages->hdr.length = 0;
7160
7161 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7162 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7163 length);
7164
7165 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7166 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7167
7168 LEAVE;
7169 return IPR_RC_JOB_RETURN;
7170}
7171
7172/**
7173 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7174 * @ipr_cmd: ipr command struct
7175 *
7176 * This function handles the failure of a Mode Sense to the IOAFP.
7177 * Some adapters do not handle all mode pages.
7178 *
7179 * Return value:
7180 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7181 **/
7182static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7183{
96d21f00 7184 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7185
7186 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7187 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7188 return IPR_RC_JOB_CONTINUE;
7189 }
7190
7191 return ipr_reset_cmd_failed(ipr_cmd);
7192}
7193
7194/**
7195 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7196 * @ipr_cmd: ipr command struct
7197 *
7198 * This function send a mode sense to the IOA to retrieve
7199 * the IOA Advanced Function Control mode page.
7200 *
7201 * Return value:
7202 * IPR_RC_JOB_RETURN
7203 **/
7204static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7205{
7206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7207
7208 ENTER;
7209 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7210 0x24, ioa_cfg->vpd_cbs_dma +
7211 offsetof(struct ipr_misc_cbs, mode_pages),
7212 sizeof(struct ipr_mode_pages));
7213
7214 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7215 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7216
7217 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7218
7219 LEAVE;
7220 return IPR_RC_JOB_RETURN;
7221}
7222
1da177e4
LT
7223/**
7224 * ipr_init_res_table - Initialize the resource table
7225 * @ipr_cmd: ipr command struct
7226 *
7227 * This function looks through the existing resource table, comparing
7228 * it with the config table. This function will take care of old/new
7229 * devices and schedule adding/removing them from the mid-layer
7230 * as appropriate.
7231 *
7232 * Return value:
7233 * IPR_RC_JOB_CONTINUE
7234 **/
7235static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7236{
7237 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7238 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7239 struct ipr_config_table_entry_wrapper cfgtew;
7240 int entries, found, flag, i;
1da177e4
LT
7241 LIST_HEAD(old_res);
7242
7243 ENTER;
3e7ebdfa
WB
7244 if (ioa_cfg->sis64)
7245 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7246 else
7247 flag = ioa_cfg->u.cfg_table->hdr.flags;
7248
7249 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7250 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7251
7252 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7253 list_move_tail(&res->queue, &old_res);
7254
3e7ebdfa 7255 if (ioa_cfg->sis64)
438b0331 7256 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7257 else
7258 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7259
7260 for (i = 0; i < entries; i++) {
7261 if (ioa_cfg->sis64)
7262 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7263 else
7264 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7265 found = 0;
7266
7267 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7268 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7269 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7270 found = 1;
7271 break;
7272 }
7273 }
7274
7275 if (!found) {
7276 if (list_empty(&ioa_cfg->free_res_q)) {
7277 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7278 break;
7279 }
7280
7281 found = 1;
7282 res = list_entry(ioa_cfg->free_res_q.next,
7283 struct ipr_resource_entry, queue);
7284 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7285 ipr_init_res_entry(res, &cfgtew);
1da177e4 7286 res->add_to_ml = 1;
56115598
WB
7287 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7288 res->sdev->allow_restart = 1;
1da177e4
LT
7289
7290 if (found)
3e7ebdfa 7291 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7292 }
7293
7294 list_for_each_entry_safe(res, temp, &old_res, queue) {
7295 if (res->sdev) {
7296 res->del_from_ml = 1;
3e7ebdfa 7297 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7298 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7299 }
7300 }
7301
3e7ebdfa
WB
7302 list_for_each_entry_safe(res, temp, &old_res, queue) {
7303 ipr_clear_res_target(res);
7304 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7305 }
7306
ac09c349
BK
7307 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7308 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7309 else
7310 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7311
7312 LEAVE;
7313 return IPR_RC_JOB_CONTINUE;
7314}
7315
7316/**
7317 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7318 * @ipr_cmd: ipr command struct
7319 *
7320 * This function sends a Query IOA Configuration command
7321 * to the adapter to retrieve the IOA configuration table.
7322 *
7323 * Return value:
7324 * IPR_RC_JOB_RETURN
7325 **/
7326static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7327{
7328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7329 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7330 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7331 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7332
7333 ENTER;
ac09c349
BK
7334 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7335 ioa_cfg->dual_raid = 1;
1da177e4
LT
7336 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7337 ucode_vpd->major_release, ucode_vpd->card_type,
7338 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7340 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7341
7342 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7343 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7344 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7345 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7346
3e7ebdfa 7347 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7348 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7349
7350 ipr_cmd->job_step = ipr_init_res_table;
7351
7352 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7353
7354 LEAVE;
7355 return IPR_RC_JOB_RETURN;
7356}
7357
7358/**
7359 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7360 * @ipr_cmd: ipr command struct
7361 *
7362 * This utility function sends an inquiry to the adapter.
7363 *
7364 * Return value:
7365 * none
7366 **/
7367static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7368 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7369{
7370 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7371
7372 ENTER;
7373 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7374 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7375
7376 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7377 ioarcb->cmd_pkt.cdb[1] = flags;
7378 ioarcb->cmd_pkt.cdb[2] = page;
7379 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7380
a32c055f 7381 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7382
7383 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7384 LEAVE;
7385}
7386
62275040
BK
7387/**
7388 * ipr_inquiry_page_supported - Is the given inquiry page supported
7389 * @page0: inquiry page 0 buffer
7390 * @page: page code.
7391 *
7392 * This function determines if the specified inquiry page is supported.
7393 *
7394 * Return value:
7395 * 1 if page is supported / 0 if not
7396 **/
7397static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7398{
7399 int i;
7400
7401 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7402 if (page0->page[i] == page)
7403 return 1;
7404
7405 return 0;
7406}
7407
ac09c349
BK
7408/**
7409 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7410 * @ipr_cmd: ipr command struct
7411 *
7412 * This function sends a Page 0xD0 inquiry to the adapter
7413 * to retrieve adapter capabilities.
7414 *
7415 * Return value:
7416 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7417 **/
7418static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7419{
7420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7421 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7422 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7423
7424 ENTER;
7425 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7426 memset(cap, 0, sizeof(*cap));
7427
7428 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7429 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7430 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7431 sizeof(struct ipr_inquiry_cap));
7432 return IPR_RC_JOB_RETURN;
7433 }
7434
7435 LEAVE;
7436 return IPR_RC_JOB_CONTINUE;
7437}
7438
1da177e4
LT
7439/**
7440 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7441 * @ipr_cmd: ipr command struct
7442 *
7443 * This function sends a Page 3 inquiry to the adapter
7444 * to retrieve software VPD information.
7445 *
7446 * Return value:
7447 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7448 **/
7449static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7450{
7451 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7452
7453 ENTER;
7454
ac09c349 7455 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7456
7457 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7458 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7459 sizeof(struct ipr_inquiry_page3));
7460
7461 LEAVE;
7462 return IPR_RC_JOB_RETURN;
7463}
7464
7465/**
7466 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7467 * @ipr_cmd: ipr command struct
7468 *
7469 * This function sends a Page 0 inquiry to the adapter
7470 * to retrieve supported inquiry pages.
7471 *
7472 * Return value:
7473 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7474 **/
7475static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7476{
7477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7478 char type[5];
7479
7480 ENTER;
7481
7482 /* Grab the type out of the VPD and store it away */
7483 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7484 type[4] = '\0';
7485 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7486
62275040 7487 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7488
62275040
BK
7489 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7490 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7491 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7492
7493 LEAVE;
7494 return IPR_RC_JOB_RETURN;
7495}
7496
7497/**
7498 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7499 * @ipr_cmd: ipr command struct
7500 *
7501 * This function sends a standard inquiry to the adapter.
7502 *
7503 * Return value:
7504 * IPR_RC_JOB_RETURN
7505 **/
7506static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7507{
7508 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7509
7510 ENTER;
62275040 7511 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7512
7513 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7514 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7515 sizeof(struct ipr_ioa_vpd));
7516
7517 LEAVE;
7518 return IPR_RC_JOB_RETURN;
7519}
7520
7521/**
214777ba 7522 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7523 * @ipr_cmd: ipr command struct
7524 *
7525 * This function send an Identify Host Request Response Queue
7526 * command to establish the HRRQ with the adapter.
7527 *
7528 * Return value:
7529 * IPR_RC_JOB_RETURN
7530 **/
214777ba 7531static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7532{
7533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7535 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7536
7537 ENTER;
05a6538a 7538 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7539 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7540
56d6aa33 7541 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7542 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7543
05a6538a 7544 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7545 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7546
05a6538a 7547 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7548 if (ioa_cfg->sis64)
7549 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7550
05a6538a 7551 if (ioa_cfg->nvectors == 1)
7552 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7553 else
7554 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7555
7556 ioarcb->cmd_pkt.cdb[2] =
7557 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7558 ioarcb->cmd_pkt.cdb[3] =
7559 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7560 ioarcb->cmd_pkt.cdb[4] =
7561 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7562 ioarcb->cmd_pkt.cdb[5] =
7563 ((u64) hrrq->host_rrq_dma) & 0xff;
7564 ioarcb->cmd_pkt.cdb[7] =
7565 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7566 ioarcb->cmd_pkt.cdb[8] =
7567 (sizeof(u32) * hrrq->size) & 0xff;
7568
7569 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7570 ioarcb->cmd_pkt.cdb[9] =
7571 ioa_cfg->identify_hrrq_index;
1da177e4 7572
05a6538a 7573 if (ioa_cfg->sis64) {
7574 ioarcb->cmd_pkt.cdb[10] =
7575 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7576 ioarcb->cmd_pkt.cdb[11] =
7577 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7578 ioarcb->cmd_pkt.cdb[12] =
7579 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7580 ioarcb->cmd_pkt.cdb[13] =
7581 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7582 }
7583
7584 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7585 ioarcb->cmd_pkt.cdb[14] =
7586 ioa_cfg->identify_hrrq_index;
05a6538a 7587
7588 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7589 IPR_INTERNAL_TIMEOUT);
7590
56d6aa33 7591 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7592 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7593
7594 LEAVE;
7595 return IPR_RC_JOB_RETURN;
05a6538a 7596 }
7597
1da177e4 7598 LEAVE;
05a6538a 7599 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7600}
7601
7602/**
7603 * ipr_reset_timer_done - Adapter reset timer function
7604 * @ipr_cmd: ipr command struct
7605 *
7606 * Description: This function is used in adapter reset processing
7607 * for timing events. If the reset_cmd pointer in the IOA
7608 * config struct is not this adapter's we are doing nested
7609 * resets and fail_all_ops will take care of freeing the
7610 * command block.
7611 *
7612 * Return value:
7613 * none
7614 **/
7615static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7616{
7617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618 unsigned long lock_flags = 0;
7619
7620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7621
7622 if (ioa_cfg->reset_cmd == ipr_cmd) {
7623 list_del(&ipr_cmd->queue);
7624 ipr_cmd->done(ipr_cmd);
7625 }
7626
7627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7628}
7629
7630/**
7631 * ipr_reset_start_timer - Start a timer for adapter reset job
7632 * @ipr_cmd: ipr command struct
7633 * @timeout: timeout value
7634 *
7635 * Description: This function is used in adapter reset processing
7636 * for timing events. If the reset_cmd pointer in the IOA
7637 * config struct is not this adapter's we are doing nested
7638 * resets and fail_all_ops will take care of freeing the
7639 * command block.
7640 *
7641 * Return value:
7642 * none
7643 **/
7644static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7645 unsigned long timeout)
7646{
05a6538a 7647
7648 ENTER;
7649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7650 ipr_cmd->done = ipr_reset_ioa_job;
7651
7652 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7653 ipr_cmd->timer.expires = jiffies + timeout;
7654 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7655 add_timer(&ipr_cmd->timer);
7656}
7657
7658/**
7659 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7660 * @ioa_cfg: ioa cfg struct
7661 *
7662 * Return value:
7663 * nothing
7664 **/
7665static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7666{
05a6538a 7667 struct ipr_hrr_queue *hrrq;
1da177e4 7668
05a6538a 7669 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7670 spin_lock(&hrrq->_lock);
05a6538a 7671 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7672
7673 /* Initialize Host RRQ pointers */
7674 hrrq->hrrq_start = hrrq->host_rrq;
7675 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7676 hrrq->hrrq_curr = hrrq->hrrq_start;
7677 hrrq->toggle_bit = 1;
56d6aa33 7678 spin_unlock(&hrrq->_lock);
05a6538a 7679 }
56d6aa33 7680 wmb();
05a6538a 7681
56d6aa33 7682 ioa_cfg->identify_hrrq_index = 0;
7683 if (ioa_cfg->hrrq_num == 1)
7684 atomic_set(&ioa_cfg->hrrq_index, 0);
7685 else
7686 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
7687
7688 /* Zero out config table */
3e7ebdfa 7689 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7690}
7691
214777ba
WB
7692/**
7693 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7694 * @ipr_cmd: ipr command struct
7695 *
7696 * Return value:
7697 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7698 **/
7699static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7700{
7701 unsigned long stage, stage_time;
7702 u32 feedback;
7703 volatile u32 int_reg;
7704 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7705 u64 maskval = 0;
7706
7707 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7708 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7709 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7710
7711 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7712
7713 /* sanity check the stage_time value */
438b0331
WB
7714 if (stage_time == 0)
7715 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7716 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7717 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7718 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7719 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7720
7721 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7722 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7723 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7724 stage_time = ioa_cfg->transop_timeout;
7725 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7726 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7727 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7728 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7729 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7730 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7731 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7732 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7733 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7734 return IPR_RC_JOB_CONTINUE;
7735 }
214777ba
WB
7736 }
7737
7738 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7739 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7740 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7741 ipr_cmd->done = ipr_reset_ioa_job;
7742 add_timer(&ipr_cmd->timer);
05a6538a 7743
7744 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7745
7746 return IPR_RC_JOB_RETURN;
7747}
7748
1da177e4
LT
7749/**
7750 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7751 * @ipr_cmd: ipr command struct
7752 *
7753 * This function reinitializes some control blocks and
7754 * enables destructive diagnostics on the adapter.
7755 *
7756 * Return value:
7757 * IPR_RC_JOB_RETURN
7758 **/
7759static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7760{
7761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762 volatile u32 int_reg;
7be96900 7763 volatile u64 maskval;
56d6aa33 7764 int i;
1da177e4
LT
7765
7766 ENTER;
214777ba 7767 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7768 ipr_init_ioa_mem(ioa_cfg);
7769
56d6aa33 7770 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7771 spin_lock(&ioa_cfg->hrrq[i]._lock);
7772 ioa_cfg->hrrq[i].allow_interrupts = 1;
7773 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7774 }
7775 wmb();
8701f185
WB
7776 if (ioa_cfg->sis64) {
7777 /* Set the adapter to the correct endian mode. */
7778 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7779 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7780 }
7781
7be96900 7782 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7783
7784 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7785 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7786 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7787 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7788 return IPR_RC_JOB_CONTINUE;
7789 }
7790
7791 /* Enable destructive diagnostics on IOA */
214777ba
WB
7792 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7793
7be96900
WB
7794 if (ioa_cfg->sis64) {
7795 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7796 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7797 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7798 } else
7799 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7800
1da177e4
LT
7801 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7802
7803 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7804
214777ba
WB
7805 if (ioa_cfg->sis64) {
7806 ipr_cmd->job_step = ipr_reset_next_stage;
7807 return IPR_RC_JOB_CONTINUE;
7808 }
7809
1da177e4 7810 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7811 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7812 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7813 ipr_cmd->done = ipr_reset_ioa_job;
7814 add_timer(&ipr_cmd->timer);
05a6538a 7815 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7816
7817 LEAVE;
7818 return IPR_RC_JOB_RETURN;
7819}
7820
7821/**
7822 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7823 * @ipr_cmd: ipr command struct
7824 *
7825 * This function is invoked when an adapter dump has run out
7826 * of processing time.
7827 *
7828 * Return value:
7829 * IPR_RC_JOB_CONTINUE
7830 **/
7831static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7832{
7833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7834
7835 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7836 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7837 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7838 ioa_cfg->sdt_state = ABORT_DUMP;
7839
4c647e90 7840 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7841 ipr_cmd->job_step = ipr_reset_alert;
7842
7843 return IPR_RC_JOB_CONTINUE;
7844}
7845
7846/**
7847 * ipr_unit_check_no_data - Log a unit check/no data error log
7848 * @ioa_cfg: ioa config struct
7849 *
7850 * Logs an error indicating the adapter unit checked, but for some
7851 * reason, we were unable to fetch the unit check buffer.
7852 *
7853 * Return value:
7854 * nothing
7855 **/
7856static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7857{
7858 ioa_cfg->errors_logged++;
7859 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7860}
7861
7862/**
7863 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7864 * @ioa_cfg: ioa config struct
7865 *
7866 * Fetches the unit check buffer from the adapter by clocking the data
7867 * through the mailbox register.
7868 *
7869 * Return value:
7870 * nothing
7871 **/
7872static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7873{
7874 unsigned long mailbox;
7875 struct ipr_hostrcb *hostrcb;
7876 struct ipr_uc_sdt sdt;
7877 int rc, length;
65f56475 7878 u32 ioasc;
1da177e4
LT
7879
7880 mailbox = readl(ioa_cfg->ioa_mailbox);
7881
dcbad00e 7882 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7883 ipr_unit_check_no_data(ioa_cfg);
7884 return;
7885 }
7886
7887 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7888 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7889 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7890
dcbad00e
WB
7891 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7892 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7893 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7894 ipr_unit_check_no_data(ioa_cfg);
7895 return;
7896 }
7897
7898 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7899 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7900 length = be32_to_cpu(sdt.entry[0].end_token);
7901 else
7902 length = (be32_to_cpu(sdt.entry[0].end_token) -
7903 be32_to_cpu(sdt.entry[0].start_token)) &
7904 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7905
7906 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7907 struct ipr_hostrcb, queue);
7908 list_del(&hostrcb->queue);
7909 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7910
7911 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7912 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7913 (__be32 *)&hostrcb->hcam,
7914 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7915
65f56475 7916 if (!rc) {
1da177e4 7917 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7918 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7919 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7920 ioa_cfg->sdt_state == GET_DUMP)
7921 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7922 } else
1da177e4
LT
7923 ipr_unit_check_no_data(ioa_cfg);
7924
7925 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7926}
7927
110def85
WB
7928/**
7929 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7930 * @ipr_cmd: ipr command struct
7931 *
7932 * Description: This function will call to get the unit check buffer.
7933 *
7934 * Return value:
7935 * IPR_RC_JOB_RETURN
7936 **/
7937static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7938{
7939 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7940
7941 ENTER;
7942 ioa_cfg->ioa_unit_checked = 0;
7943 ipr_get_unit_check_buffer(ioa_cfg);
7944 ipr_cmd->job_step = ipr_reset_alert;
7945 ipr_reset_start_timer(ipr_cmd, 0);
7946
7947 LEAVE;
7948 return IPR_RC_JOB_RETURN;
7949}
7950
1da177e4
LT
7951/**
7952 * ipr_reset_restore_cfg_space - Restore PCI config space.
7953 * @ipr_cmd: ipr command struct
7954 *
7955 * Description: This function restores the saved PCI config space of
7956 * the adapter, fails all outstanding ops back to the callers, and
7957 * fetches the dump/unit check if applicable to this reset.
7958 *
7959 * Return value:
7960 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7961 **/
7962static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7963{
7964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 7965 u32 int_reg;
1da177e4
LT
7966
7967 ENTER;
99c965dd 7968 ioa_cfg->pdev->state_saved = true;
1d3c16a8 7969 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
7970
7971 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7972 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7973 return IPR_RC_JOB_CONTINUE;
7974 }
7975
7976 ipr_fail_all_ops(ioa_cfg);
7977
8701f185
WB
7978 if (ioa_cfg->sis64) {
7979 /* Set the adapter to the correct endian mode. */
7980 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7981 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7982 }
7983
1da177e4 7984 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
7985 if (ioa_cfg->sis64) {
7986 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7987 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7988 return IPR_RC_JOB_RETURN;
7989 } else {
7990 ioa_cfg->ioa_unit_checked = 0;
7991 ipr_get_unit_check_buffer(ioa_cfg);
7992 ipr_cmd->job_step = ipr_reset_alert;
7993 ipr_reset_start_timer(ipr_cmd, 0);
7994 return IPR_RC_JOB_RETURN;
7995 }
1da177e4
LT
7996 }
7997
7998 if (ioa_cfg->in_ioa_bringdown) {
7999 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8000 } else {
8001 ipr_cmd->job_step = ipr_reset_enable_ioa;
8002
8003 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8004 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8005 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8006 if (ioa_cfg->sis64)
8007 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8008 else
8009 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8010 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8011 schedule_work(&ioa_cfg->work_q);
8012 return IPR_RC_JOB_RETURN;
8013 }
8014 }
8015
438b0331 8016 LEAVE;
1da177e4
LT
8017 return IPR_RC_JOB_CONTINUE;
8018}
8019
e619e1a7
BK
8020/**
8021 * ipr_reset_bist_done - BIST has completed on the adapter.
8022 * @ipr_cmd: ipr command struct
8023 *
8024 * Description: Unblock config space and resume the reset process.
8025 *
8026 * Return value:
8027 * IPR_RC_JOB_CONTINUE
8028 **/
8029static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8030{
fb51ccbf
JK
8031 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8032
e619e1a7 8033 ENTER;
fb51ccbf
JK
8034 if (ioa_cfg->cfg_locked)
8035 pci_cfg_access_unlock(ioa_cfg->pdev);
8036 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8037 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8038 LEAVE;
8039 return IPR_RC_JOB_CONTINUE;
8040}
8041
1da177e4
LT
8042/**
8043 * ipr_reset_start_bist - Run BIST on the adapter.
8044 * @ipr_cmd: ipr command struct
8045 *
8046 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8047 *
8048 * Return value:
8049 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8050 **/
8051static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8052{
8053 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8054 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8055
8056 ENTER;
cb237ef7
WB
8057 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8058 writel(IPR_UPROCI_SIS64_START_BIST,
8059 ioa_cfg->regs.set_uproc_interrupt_reg32);
8060 else
8061 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8062
8063 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8064 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8065 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8066 rc = IPR_RC_JOB_RETURN;
cb237ef7 8067 } else {
fb51ccbf
JK
8068 if (ioa_cfg->cfg_locked)
8069 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8070 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8071 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8072 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8073 }
8074
8075 LEAVE;
8076 return rc;
8077}
8078
463fc696
BK
8079/**
8080 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8081 * @ipr_cmd: ipr command struct
8082 *
8083 * Description: This clears PCI reset to the adapter and delays two seconds.
8084 *
8085 * Return value:
8086 * IPR_RC_JOB_RETURN
8087 **/
8088static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8089{
8090 ENTER;
8091 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8092 ipr_cmd->job_step = ipr_reset_bist_done;
8093 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8094 LEAVE;
8095 return IPR_RC_JOB_RETURN;
8096}
8097
8098/**
8099 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8100 * @ipr_cmd: ipr command struct
8101 *
8102 * Description: This asserts PCI reset to the adapter.
8103 *
8104 * Return value:
8105 * IPR_RC_JOB_RETURN
8106 **/
8107static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8108{
8109 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8110 struct pci_dev *pdev = ioa_cfg->pdev;
8111
8112 ENTER;
463fc696
BK
8113 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8114 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8115 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8116 LEAVE;
8117 return IPR_RC_JOB_RETURN;
8118}
8119
fb51ccbf
JK
8120/**
8121 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8122 * @ipr_cmd: ipr command struct
8123 *
8124 * Description: This attempts to block config access to the IOA.
8125 *
8126 * Return value:
8127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8128 **/
8129static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8130{
8131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8132 int rc = IPR_RC_JOB_CONTINUE;
8133
8134 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8135 ioa_cfg->cfg_locked = 1;
8136 ipr_cmd->job_step = ioa_cfg->reset;
8137 } else {
8138 if (ipr_cmd->u.time_left) {
8139 rc = IPR_RC_JOB_RETURN;
8140 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8141 ipr_reset_start_timer(ipr_cmd,
8142 IPR_CHECK_FOR_RESET_TIMEOUT);
8143 } else {
8144 ipr_cmd->job_step = ioa_cfg->reset;
8145 dev_err(&ioa_cfg->pdev->dev,
8146 "Timed out waiting to lock config access. Resetting anyway.\n");
8147 }
8148 }
8149
8150 return rc;
8151}
8152
8153/**
8154 * ipr_reset_block_config_access - Block config access to the IOA
8155 * @ipr_cmd: ipr command struct
8156 *
8157 * Description: This attempts to block config access to the IOA
8158 *
8159 * Return value:
8160 * IPR_RC_JOB_CONTINUE
8161 **/
8162static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8163{
8164 ipr_cmd->ioa_cfg->cfg_locked = 0;
8165 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8166 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8167 return IPR_RC_JOB_CONTINUE;
8168}
8169
1da177e4
LT
8170/**
8171 * ipr_reset_allowed - Query whether or not IOA can be reset
8172 * @ioa_cfg: ioa config struct
8173 *
8174 * Return value:
8175 * 0 if reset not allowed / non-zero if reset is allowed
8176 **/
8177static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8178{
8179 volatile u32 temp_reg;
8180
8181 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8182 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8183}
8184
8185/**
8186 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8187 * @ipr_cmd: ipr command struct
8188 *
8189 * Description: This function waits for adapter permission to run BIST,
8190 * then runs BIST. If the adapter does not give permission after a
8191 * reasonable time, we will reset the adapter anyway. The impact of
8192 * resetting the adapter without warning the adapter is the risk of
8193 * losing the persistent error log on the adapter. If the adapter is
8194 * reset while it is writing to the flash on the adapter, the flash
8195 * segment will have bad ECC and be zeroed.
8196 *
8197 * Return value:
8198 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8199 **/
8200static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8201{
8202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8203 int rc = IPR_RC_JOB_RETURN;
8204
8205 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8206 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8207 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8208 } else {
fb51ccbf 8209 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8210 rc = IPR_RC_JOB_CONTINUE;
8211 }
8212
8213 return rc;
8214}
8215
8216/**
8701f185 8217 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8218 * @ipr_cmd: ipr command struct
8219 *
8220 * Description: This function alerts the adapter that it will be reset.
8221 * If memory space is not currently enabled, proceed directly
8222 * to running BIST on the adapter. The timer must always be started
8223 * so we guarantee we do not run BIST from ipr_isr.
8224 *
8225 * Return value:
8226 * IPR_RC_JOB_RETURN
8227 **/
8228static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8229{
8230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8231 u16 cmd_reg;
8232 int rc;
8233
8234 ENTER;
8235 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8236
8237 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8238 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8239 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8240 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8241 } else {
fb51ccbf 8242 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8243 }
8244
8245 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8246 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8247
8248 LEAVE;
8249 return IPR_RC_JOB_RETURN;
8250}
8251
8252/**
8253 * ipr_reset_ucode_download_done - Microcode download completion
8254 * @ipr_cmd: ipr command struct
8255 *
8256 * Description: This function unmaps the microcode download buffer.
8257 *
8258 * Return value:
8259 * IPR_RC_JOB_CONTINUE
8260 **/
8261static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8262{
8263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8264 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8265
8266 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8267 sglist->num_sg, DMA_TO_DEVICE);
8268
8269 ipr_cmd->job_step = ipr_reset_alert;
8270 return IPR_RC_JOB_CONTINUE;
8271}
8272
8273/**
8274 * ipr_reset_ucode_download - Download microcode to the adapter
8275 * @ipr_cmd: ipr command struct
8276 *
8277 * Description: This function checks to see if it there is microcode
8278 * to download to the adapter. If there is, a download is performed.
8279 *
8280 * Return value:
8281 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8282 **/
8283static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8284{
8285 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8286 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8287
8288 ENTER;
8289 ipr_cmd->job_step = ipr_reset_alert;
8290
8291 if (!sglist)
8292 return IPR_RC_JOB_CONTINUE;
8293
8294 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8295 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8296 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8297 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8298 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8299 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8300 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8301
a32c055f
WB
8302 if (ioa_cfg->sis64)
8303 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8304 else
8305 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8306 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8307
8308 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8309 IPR_WRITE_BUFFER_TIMEOUT);
8310
8311 LEAVE;
8312 return IPR_RC_JOB_RETURN;
8313}
8314
8315/**
8316 * ipr_reset_shutdown_ioa - Shutdown the adapter
8317 * @ipr_cmd: ipr command struct
8318 *
8319 * Description: This function issues an adapter shutdown of the
8320 * specified type to the specified adapter as part of the
8321 * adapter reset job.
8322 *
8323 * Return value:
8324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8325 **/
8326static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8327{
8328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8329 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8330 unsigned long timeout;
8331 int rc = IPR_RC_JOB_CONTINUE;
8332
8333 ENTER;
56d6aa33 8334 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8335 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8336 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8337 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8338 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8339 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8340
ac09c349
BK
8341 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8342 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8343 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8344 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8345 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8346 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8347 else
ac09c349 8348 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8349
8350 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8351
8352 rc = IPR_RC_JOB_RETURN;
8353 ipr_cmd->job_step = ipr_reset_ucode_download;
8354 } else
8355 ipr_cmd->job_step = ipr_reset_alert;
8356
8357 LEAVE;
8358 return rc;
8359}
8360
8361/**
8362 * ipr_reset_ioa_job - Adapter reset job
8363 * @ipr_cmd: ipr command struct
8364 *
8365 * Description: This function is the job router for the adapter reset job.
8366 *
8367 * Return value:
8368 * none
8369 **/
8370static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8371{
8372 u32 rc, ioasc;
1da177e4
LT
8373 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8374
8375 do {
96d21f00 8376 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8377
8378 if (ioa_cfg->reset_cmd != ipr_cmd) {
8379 /*
8380 * We are doing nested adapter resets and this is
8381 * not the current reset job.
8382 */
05a6538a 8383 list_add_tail(&ipr_cmd->queue,
8384 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8385 return;
8386 }
8387
8388 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8389 rc = ipr_cmd->job_step_failed(ipr_cmd);
8390 if (rc == IPR_RC_JOB_RETURN)
8391 return;
1da177e4
LT
8392 }
8393
8394 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8395 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8396 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8397 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8398}
8399
8400/**
8401 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8402 * @ioa_cfg: ioa config struct
8403 * @job_step: first job step of reset job
8404 * @shutdown_type: shutdown type
8405 *
8406 * Description: This function will initiate the reset of the given adapter
8407 * starting at the selected job step.
8408 * If the caller needs to wait on the completion of the reset,
8409 * the caller must sleep on the reset_wait_q.
8410 *
8411 * Return value:
8412 * none
8413 **/
8414static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8415 int (*job_step) (struct ipr_cmnd *),
8416 enum ipr_shutdown_type shutdown_type)
8417{
8418 struct ipr_cmnd *ipr_cmd;
56d6aa33 8419 int i;
1da177e4
LT
8420
8421 ioa_cfg->in_reset_reload = 1;
56d6aa33 8422 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8423 spin_lock(&ioa_cfg->hrrq[i]._lock);
8424 ioa_cfg->hrrq[i].allow_cmds = 0;
8425 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8426 }
8427 wmb();
1da177e4
LT
8428 scsi_block_requests(ioa_cfg->host);
8429
8430 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8431 ioa_cfg->reset_cmd = ipr_cmd;
8432 ipr_cmd->job_step = job_step;
8433 ipr_cmd->u.shutdown_type = shutdown_type;
8434
8435 ipr_reset_ioa_job(ipr_cmd);
8436}
8437
8438/**
8439 * ipr_initiate_ioa_reset - Initiate an adapter reset
8440 * @ioa_cfg: ioa config struct
8441 * @shutdown_type: shutdown type
8442 *
8443 * Description: This function will initiate the reset of the given adapter.
8444 * If the caller needs to wait on the completion of the reset,
8445 * the caller must sleep on the reset_wait_q.
8446 *
8447 * Return value:
8448 * none
8449 **/
8450static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8451 enum ipr_shutdown_type shutdown_type)
8452{
56d6aa33 8453 int i;
8454
8455 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8456 return;
8457
41e9a696
BK
8458 if (ioa_cfg->in_reset_reload) {
8459 if (ioa_cfg->sdt_state == GET_DUMP)
8460 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8461 else if (ioa_cfg->sdt_state == READ_DUMP)
8462 ioa_cfg->sdt_state = ABORT_DUMP;
8463 }
1da177e4
LT
8464
8465 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8466 dev_err(&ioa_cfg->pdev->dev,
8467 "IOA taken offline - error recovery failed\n");
8468
8469 ioa_cfg->reset_retries = 0;
56d6aa33 8470 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8471 spin_lock(&ioa_cfg->hrrq[i]._lock);
8472 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8473 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8474 }
8475 wmb();
1da177e4
LT
8476
8477 if (ioa_cfg->in_ioa_bringdown) {
8478 ioa_cfg->reset_cmd = NULL;
8479 ioa_cfg->in_reset_reload = 0;
8480 ipr_fail_all_ops(ioa_cfg);
8481 wake_up_all(&ioa_cfg->reset_wait_q);
8482
8483 spin_unlock_irq(ioa_cfg->host->host_lock);
8484 scsi_unblock_requests(ioa_cfg->host);
8485 spin_lock_irq(ioa_cfg->host->host_lock);
8486 return;
8487 } else {
8488 ioa_cfg->in_ioa_bringdown = 1;
8489 shutdown_type = IPR_SHUTDOWN_NONE;
8490 }
8491 }
8492
8493 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8494 shutdown_type);
8495}
8496
f8a88b19
LV
8497/**
8498 * ipr_reset_freeze - Hold off all I/O activity
8499 * @ipr_cmd: ipr command struct
8500 *
8501 * Description: If the PCI slot is frozen, hold off all I/O
8502 * activity; then, as soon as the slot is available again,
8503 * initiate an adapter reset.
8504 */
8505static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8506{
56d6aa33 8507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8508 int i;
8509
f8a88b19 8510 /* Disallow new interrupts, avoid loop */
56d6aa33 8511 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8512 spin_lock(&ioa_cfg->hrrq[i]._lock);
8513 ioa_cfg->hrrq[i].allow_interrupts = 0;
8514 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8515 }
8516 wmb();
05a6538a 8517 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8518 ipr_cmd->done = ipr_reset_ioa_job;
8519 return IPR_RC_JOB_RETURN;
8520}
8521
8522/**
8523 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8524 * @pdev: PCI device struct
8525 *
8526 * Description: This routine is called to tell us that the PCI bus
8527 * is down. Can't do anything here, except put the device driver
8528 * into a holding pattern, waiting for the PCI bus to come back.
8529 */
8530static void ipr_pci_frozen(struct pci_dev *pdev)
8531{
8532 unsigned long flags = 0;
8533 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8534
8535 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8536 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8538}
8539
8540/**
8541 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8542 * @pdev: PCI device struct
8543 *
8544 * Description: This routine is called by the pci error recovery
8545 * code after the PCI slot has been reset, just before we
8546 * should resume normal operations.
8547 */
8548static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8549{
8550 unsigned long flags = 0;
8551 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8552
8553 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
8554 if (ioa_cfg->needs_warm_reset)
8555 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8556 else
8557 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8558 IPR_SHUTDOWN_NONE);
f8a88b19
LV
8559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8560 return PCI_ERS_RESULT_RECOVERED;
8561}
8562
8563/**
8564 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8565 * @pdev: PCI device struct
8566 *
8567 * Description: This routine is called when the PCI bus has
8568 * permanently failed.
8569 */
8570static void ipr_pci_perm_failure(struct pci_dev *pdev)
8571{
8572 unsigned long flags = 0;
8573 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 8574 int i;
f8a88b19
LV
8575
8576 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8577 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8578 ioa_cfg->sdt_state = ABORT_DUMP;
8579 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8580 ioa_cfg->in_ioa_bringdown = 1;
56d6aa33 8581 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8582 spin_lock(&ioa_cfg->hrrq[i]._lock);
8583 ioa_cfg->hrrq[i].allow_cmds = 0;
8584 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8585 }
8586 wmb();
f8a88b19
LV
8587 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8589}
8590
8591/**
8592 * ipr_pci_error_detected - Called when a PCI error is detected.
8593 * @pdev: PCI device struct
8594 * @state: PCI channel state
8595 *
8596 * Description: Called when a PCI error is detected.
8597 *
8598 * Return value:
8599 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8600 */
8601static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8602 pci_channel_state_t state)
8603{
8604 switch (state) {
8605 case pci_channel_io_frozen:
8606 ipr_pci_frozen(pdev);
8607 return PCI_ERS_RESULT_NEED_RESET;
8608 case pci_channel_io_perm_failure:
8609 ipr_pci_perm_failure(pdev);
8610 return PCI_ERS_RESULT_DISCONNECT;
8611 break;
8612 default:
8613 break;
8614 }
8615 return PCI_ERS_RESULT_NEED_RESET;
8616}
8617
1da177e4
LT
8618/**
8619 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8620 * @ioa_cfg: ioa cfg struct
8621 *
8622 * Description: This is the second phase of adapter intialization
8623 * This function takes care of initilizing the adapter to the point
8624 * where it can accept new commands.
8625
8626 * Return value:
b1c11812 8627 * 0 on success / -EIO on failure
1da177e4 8628 **/
6f039790 8629static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8630{
8631 int rc = 0;
8632 unsigned long host_lock_flags = 0;
8633
8634 ENTER;
8635 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8636 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
8637 if (ioa_cfg->needs_hard_reset) {
8638 ioa_cfg->needs_hard_reset = 0;
8639 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8640 } else
8641 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8642 IPR_SHUTDOWN_NONE);
1da177e4
LT
8643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8644 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8645 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8646
56d6aa33 8647 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8648 rc = -EIO;
8649 } else if (ipr_invalid_adapter(ioa_cfg)) {
8650 if (!ipr_testmode)
8651 rc = -EIO;
8652
8653 dev_err(&ioa_cfg->pdev->dev,
8654 "Adapter not supported in this hardware configuration.\n");
8655 }
8656
8657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8658
8659 LEAVE;
8660 return rc;
8661}
8662
8663/**
8664 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8665 * @ioa_cfg: ioa config struct
8666 *
8667 * Return value:
8668 * none
8669 **/
8670static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8671{
8672 int i;
8673
8674 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8675 if (ioa_cfg->ipr_cmnd_list[i])
8676 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8677 ioa_cfg->ipr_cmnd_list[i],
8678 ioa_cfg->ipr_cmnd_list_dma[i]);
8679
8680 ioa_cfg->ipr_cmnd_list[i] = NULL;
8681 }
8682
8683 if (ioa_cfg->ipr_cmd_pool)
203fa3fe 8684 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8685
89aad428
BK
8686 kfree(ioa_cfg->ipr_cmnd_list);
8687 kfree(ioa_cfg->ipr_cmnd_list_dma);
8688 ioa_cfg->ipr_cmnd_list = NULL;
8689 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8690 ioa_cfg->ipr_cmd_pool = NULL;
8691}
8692
8693/**
8694 * ipr_free_mem - Frees memory allocated for an adapter
8695 * @ioa_cfg: ioa cfg struct
8696 *
8697 * Return value:
8698 * nothing
8699 **/
8700static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8701{
8702 int i;
8703
8704 kfree(ioa_cfg->res_entries);
8705 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8706 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8707 ipr_free_cmd_blks(ioa_cfg);
05a6538a 8708
8709 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8710 pci_free_consistent(ioa_cfg->pdev,
8711 sizeof(u32) * ioa_cfg->hrrq[i].size,
8712 ioa_cfg->hrrq[i].host_rrq,
8713 ioa_cfg->hrrq[i].host_rrq_dma);
8714
3e7ebdfa
WB
8715 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8716 ioa_cfg->u.cfg_table,
1da177e4
LT
8717 ioa_cfg->cfg_table_dma);
8718
8719 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8720 pci_free_consistent(ioa_cfg->pdev,
8721 sizeof(struct ipr_hostrcb),
8722 ioa_cfg->hostrcb[i],
8723 ioa_cfg->hostrcb_dma[i]);
8724 }
8725
8726 ipr_free_dump(ioa_cfg);
1da177e4
LT
8727 kfree(ioa_cfg->trace);
8728}
8729
8730/**
8731 * ipr_free_all_resources - Free all allocated resources for an adapter.
8732 * @ipr_cmd: ipr command struct
8733 *
8734 * This function frees all allocated resources for the
8735 * specified adapter.
8736 *
8737 * Return value:
8738 * none
8739 **/
8740static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8741{
8742 struct pci_dev *pdev = ioa_cfg->pdev;
8743
8744 ENTER;
05a6538a 8745 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8746 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8747 int i;
8748 for (i = 0; i < ioa_cfg->nvectors; i++)
8749 free_irq(ioa_cfg->vectors_info[i].vec,
8750 &ioa_cfg->hrrq[i]);
8751 } else
8752 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8753
56d6aa33 8754 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 8755 pci_disable_msi(pdev);
56d6aa33 8756 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8757 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 8758 pci_disable_msix(pdev);
56d6aa33 8759 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8760 }
05a6538a 8761
1da177e4
LT
8762 iounmap(ioa_cfg->hdw_dma_regs);
8763 pci_release_regions(pdev);
8764 ipr_free_mem(ioa_cfg);
8765 scsi_host_put(ioa_cfg->host);
8766 pci_disable_device(pdev);
8767 LEAVE;
8768}
8769
8770/**
8771 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8772 * @ioa_cfg: ioa config struct
8773 *
8774 * Return value:
8775 * 0 on success / -ENOMEM on allocation failure
8776 **/
6f039790 8777static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8778{
8779 struct ipr_cmnd *ipr_cmd;
8780 struct ipr_ioarcb *ioarcb;
8781 dma_addr_t dma_addr;
05a6538a 8782 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 8783
203fa3fe
KSS
8784 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8785 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
8786
8787 if (!ioa_cfg->ipr_cmd_pool)
8788 return -ENOMEM;
8789
89aad428
BK
8790 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8791 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8792
8793 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8794 ipr_free_cmd_blks(ioa_cfg);
8795 return -ENOMEM;
8796 }
8797
05a6538a 8798 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8799 if (ioa_cfg->hrrq_num > 1) {
8800 if (i == 0) {
8801 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8802 ioa_cfg->hrrq[i].min_cmd_id = 0;
8803 ioa_cfg->hrrq[i].max_cmd_id =
8804 (entries_each_hrrq - 1);
8805 } else {
8806 entries_each_hrrq =
8807 IPR_NUM_BASE_CMD_BLKS/
8808 (ioa_cfg->hrrq_num - 1);
8809 ioa_cfg->hrrq[i].min_cmd_id =
8810 IPR_NUM_INTERNAL_CMD_BLKS +
8811 (i - 1) * entries_each_hrrq;
8812 ioa_cfg->hrrq[i].max_cmd_id =
8813 (IPR_NUM_INTERNAL_CMD_BLKS +
8814 i * entries_each_hrrq - 1);
8815 }
8816 } else {
8817 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8818 ioa_cfg->hrrq[i].min_cmd_id = 0;
8819 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8820 }
8821 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8822 }
8823
8824 BUG_ON(ioa_cfg->hrrq_num == 0);
8825
8826 i = IPR_NUM_CMD_BLKS -
8827 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8828 if (i > 0) {
8829 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8830 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8831 }
8832
1da177e4 8833 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
203fa3fe 8834 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8835
8836 if (!ipr_cmd) {
8837 ipr_free_cmd_blks(ioa_cfg);
8838 return -ENOMEM;
8839 }
8840
8841 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8842 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8843 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8844
8845 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8846 ipr_cmd->dma_addr = dma_addr;
8847 if (ioa_cfg->sis64)
8848 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8849 else
8850 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8851
1da177e4 8852 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8853 if (ioa_cfg->sis64) {
8854 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8855 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8856 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8857 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8858 } else {
8859 ioarcb->write_ioadl_addr =
8860 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8861 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8862 ioarcb->ioasa_host_pci_addr =
96d21f00 8863 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8864 }
1da177e4
LT
8865 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8866 ipr_cmd->cmd_index = i;
8867 ipr_cmd->ioa_cfg = ioa_cfg;
8868 ipr_cmd->sense_buffer_dma = dma_addr +
8869 offsetof(struct ipr_cmnd, sense_buffer);
8870
05a6538a 8871 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8872 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8873 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8874 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8875 hrrq_id++;
1da177e4
LT
8876 }
8877
8878 return 0;
8879}
8880
8881/**
8882 * ipr_alloc_mem - Allocate memory for an adapter
8883 * @ioa_cfg: ioa config struct
8884 *
8885 * Return value:
8886 * 0 on success / non-zero for error
8887 **/
6f039790 8888static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8889{
8890 struct pci_dev *pdev = ioa_cfg->pdev;
8891 int i, rc = -ENOMEM;
8892
8893 ENTER;
0bc42e35 8894 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8895 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8896
8897 if (!ioa_cfg->res_entries)
8898 goto out;
8899
3e7ebdfa
WB
8900 if (ioa_cfg->sis64) {
8901 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8902 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8903 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8904 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8905 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8906 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
a2e49cb2
BK
8907
8908 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8909 || !ioa_cfg->vset_ids)
8910 goto out_free_res_entries;
3e7ebdfa
WB
8911 }
8912
8913 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8914 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8915 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8916 }
1da177e4
LT
8917
8918 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8919 sizeof(struct ipr_misc_cbs),
8920 &ioa_cfg->vpd_cbs_dma);
8921
8922 if (!ioa_cfg->vpd_cbs)
8923 goto out_free_res_entries;
8924
05a6538a 8925 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8926 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8927 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
56d6aa33 8928 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
8929 if (i == 0)
8930 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
8931 else
8932 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
05a6538a 8933 }
8934
1da177e4
LT
8935 if (ipr_alloc_cmd_blks(ioa_cfg))
8936 goto out_free_vpd_cbs;
8937
05a6538a 8938 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8939 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8940 sizeof(u32) * ioa_cfg->hrrq[i].size,
8941 &ioa_cfg->hrrq[i].host_rrq_dma);
8942
8943 if (!ioa_cfg->hrrq[i].host_rrq) {
8944 while (--i > 0)
8945 pci_free_consistent(pdev,
8946 sizeof(u32) * ioa_cfg->hrrq[i].size,
8947 ioa_cfg->hrrq[i].host_rrq,
8948 ioa_cfg->hrrq[i].host_rrq_dma);
8949 goto out_ipr_free_cmd_blocks;
8950 }
8951 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8952 }
1da177e4 8953
3e7ebdfa
WB
8954 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8955 ioa_cfg->cfg_table_size,
8956 &ioa_cfg->cfg_table_dma);
1da177e4 8957
3e7ebdfa 8958 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8959 goto out_free_host_rrq;
8960
8961 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8962 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8963 sizeof(struct ipr_hostrcb),
8964 &ioa_cfg->hostrcb_dma[i]);
8965
8966 if (!ioa_cfg->hostrcb[i])
8967 goto out_free_hostrcb_dma;
8968
8969 ioa_cfg->hostrcb[i]->hostrcb_dma =
8970 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8971 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8972 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8973 }
8974
0bc42e35 8975 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8976 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8977
8978 if (!ioa_cfg->trace)
8979 goto out_free_hostrcb_dma;
8980
1da177e4
LT
8981 rc = 0;
8982out:
8983 LEAVE;
8984 return rc;
8985
8986out_free_hostrcb_dma:
8987 while (i-- > 0) {
8988 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8989 ioa_cfg->hostrcb[i],
8990 ioa_cfg->hostrcb_dma[i]);
8991 }
3e7ebdfa
WB
8992 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8993 ioa_cfg->u.cfg_table,
8994 ioa_cfg->cfg_table_dma);
1da177e4 8995out_free_host_rrq:
05a6538a 8996 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8997 pci_free_consistent(pdev,
8998 sizeof(u32) * ioa_cfg->hrrq[i].size,
8999 ioa_cfg->hrrq[i].host_rrq,
9000 ioa_cfg->hrrq[i].host_rrq_dma);
9001 }
1da177e4
LT
9002out_ipr_free_cmd_blocks:
9003 ipr_free_cmd_blks(ioa_cfg);
9004out_free_vpd_cbs:
9005 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9006 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9007out_free_res_entries:
9008 kfree(ioa_cfg->res_entries);
a2e49cb2
BK
9009 kfree(ioa_cfg->target_ids);
9010 kfree(ioa_cfg->array_ids);
9011 kfree(ioa_cfg->vset_ids);
1da177e4
LT
9012 goto out;
9013}
9014
9015/**
9016 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9017 * @ioa_cfg: ioa config struct
9018 *
9019 * Return value:
9020 * none
9021 **/
6f039790 9022static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9023{
9024 int i;
9025
9026 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9027 ioa_cfg->bus_attr[i].bus = i;
9028 ioa_cfg->bus_attr[i].qas_enabled = 0;
9029 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9030 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9031 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9032 else
9033 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9034 }
9035}
9036
9037/**
9038 * ipr_init_ioa_cfg - Initialize IOA config struct
9039 * @ioa_cfg: ioa config struct
9040 * @host: scsi host struct
9041 * @pdev: PCI dev struct
9042 *
9043 * Return value:
9044 * none
9045 **/
6f039790
GKH
9046static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9047 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4
LT
9048{
9049 const struct ipr_interrupt_offsets *p;
9050 struct ipr_interrupts *t;
9051 void __iomem *base;
9052
9053 ioa_cfg->host = host;
9054 ioa_cfg->pdev = pdev;
9055 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9056 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9057 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9058 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9059 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9060 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9061 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9062 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9063
1da177e4
LT
9064 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9065 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9066 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9067 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9068 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9069 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9070 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
9071 ioa_cfg->sdt_state = INACTIVE;
9072
9073 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9074 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9075
3e7ebdfa
WB
9076 if (ioa_cfg->sis64) {
9077 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9078 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9079 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9080 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9081 } else {
9082 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9083 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9084 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9085 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9086 }
1da177e4
LT
9087 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9088 host->unique_id = host->host_no;
9089 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9090 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9091 pci_set_drvdata(pdev, ioa_cfg);
9092
9093 p = &ioa_cfg->chip_cfg->regs;
9094 t = &ioa_cfg->regs;
9095 base = ioa_cfg->hdw_dma_regs;
9096
9097 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9098 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 9099 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 9100 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 9101 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 9102 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 9103 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 9104 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 9105 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
9106 t->ioarrin_reg = base + p->ioarrin_reg;
9107 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 9108 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 9109 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 9110 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 9111 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 9112 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
9113
9114 if (ioa_cfg->sis64) {
214777ba 9115 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
9116 t->dump_addr_reg = base + p->dump_addr_reg;
9117 t->dump_data_reg = base + p->dump_data_reg;
8701f185 9118 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 9119 }
1da177e4
LT
9120}
9121
9122/**
1be7bd82 9123 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9124 * @dev_id: PCI device id struct
9125 *
9126 * Return value:
1be7bd82 9127 * ptr to chip information on success / NULL on failure
1da177e4 9128 **/
6f039790 9129static const struct ipr_chip_t *
1be7bd82 9130ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9131{
9132 int i;
9133
1da177e4
LT
9134 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9135 if (ipr_chip[i].vendor == dev_id->vendor &&
9136 ipr_chip[i].device == dev_id->device)
1be7bd82 9137 return &ipr_chip[i];
1da177e4
LT
9138 return NULL;
9139}
9140
05a6538a 9141static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9142{
9143 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9144 int i, err, vectors;
9145
9146 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9147 entries[i].entry = i;
9148
9149 vectors = ipr_number_of_msix;
9150
9151 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9152 vectors = err;
9153
9154 if (err < 0) {
9155 pci_disable_msix(ioa_cfg->pdev);
9156 return err;
9157 }
9158
9159 if (!err) {
9160 for (i = 0; i < vectors; i++)
9161 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9162 ioa_cfg->nvectors = vectors;
9163 }
9164
9165 return err;
9166}
9167
9168static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9169{
9170 int i, err, vectors;
9171
9172 vectors = ipr_number_of_msix;
9173
9174 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9175 vectors = err;
9176
9177 if (err < 0) {
9178 pci_disable_msi(ioa_cfg->pdev);
9179 return err;
9180 }
9181
9182 if (!err) {
9183 for (i = 0; i < vectors; i++)
9184 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9185 ioa_cfg->nvectors = vectors;
9186 }
9187
9188 return err;
9189}
9190
9191static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9192{
9193 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9194
9195 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9196 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9197 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9198 ioa_cfg->vectors_info[vec_idx].
9199 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9200 }
9201}
9202
9203static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9204{
9205 int i, rc;
9206
9207 for (i = 1; i < ioa_cfg->nvectors; i++) {
9208 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9209 ipr_isr_mhrrq,
9210 0,
9211 ioa_cfg->vectors_info[i].desc,
9212 &ioa_cfg->hrrq[i]);
9213 if (rc) {
9214 while (--i >= 0)
9215 free_irq(ioa_cfg->vectors_info[i].vec,
9216 &ioa_cfg->hrrq[i]);
9217 return rc;
9218 }
9219 }
9220 return 0;
9221}
9222
95fecd90
WB
9223/**
9224 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9225 * @pdev: PCI device struct
9226 *
9227 * Description: Simply set the msi_received flag to 1 indicating that
9228 * Message Signaled Interrupts are supported.
9229 *
9230 * Return value:
9231 * 0 on success / non-zero on failure
9232 **/
6f039790 9233static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9234{
9235 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9236 unsigned long lock_flags = 0;
9237 irqreturn_t rc = IRQ_HANDLED;
9238
05a6538a 9239 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9241
9242 ioa_cfg->msi_received = 1;
9243 wake_up(&ioa_cfg->msi_wait_q);
9244
9245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9246 return rc;
9247}
9248
9249/**
9250 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9251 * @pdev: PCI device struct
9252 *
9253 * Description: The return value from pci_enable_msi() can not always be
9254 * trusted. This routine sets up and initiates a test interrupt to determine
9255 * if the interrupt is received via the ipr_test_intr() service routine.
9256 * If the tests fails, the driver will fall back to LSI.
9257 *
9258 * Return value:
9259 * 0 on success / non-zero on failure
9260 **/
6f039790 9261static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9262{
9263 int rc;
9264 volatile u32 int_reg;
9265 unsigned long lock_flags = 0;
9266
9267 ENTER;
9268
9269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9270 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9271 ioa_cfg->msi_received = 0;
9272 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9273 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9274 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9276
9277 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9278 if (rc) {
9279 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9280 return rc;
9281 } else if (ipr_debug)
9282 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9283
214777ba 9284 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9286 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9287 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9288 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9289
95fecd90
WB
9290 if (!ioa_cfg->msi_received) {
9291 /* MSI test failed */
9292 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9293 rc = -EOPNOTSUPP;
9294 } else if (ipr_debug)
9295 dev_info(&pdev->dev, "MSI test succeeded.\n");
9296
9297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9298
9299 free_irq(pdev->irq, ioa_cfg);
9300
9301 LEAVE;
9302
9303 return rc;
9304}
9305
05a6538a 9306 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9307 * @pdev: PCI device struct
9308 * @dev_id: PCI device id struct
9309 *
9310 * Return value:
9311 * 0 on success / non-zero on failure
9312 **/
6f039790
GKH
9313static int ipr_probe_ioa(struct pci_dev *pdev,
9314 const struct pci_device_id *dev_id)
1da177e4
LT
9315{
9316 struct ipr_ioa_cfg *ioa_cfg;
9317 struct Scsi_Host *host;
9318 unsigned long ipr_regs_pci;
9319 void __iomem *ipr_regs;
a2a65a3e 9320 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9321 volatile u32 mask, uproc, interrupts;
56d6aa33 9322 unsigned long lock_flags;
1da177e4
LT
9323
9324 ENTER;
9325
9326 if ((rc = pci_enable_device(pdev))) {
9327 dev_err(&pdev->dev, "Cannot enable adapter\n");
9328 goto out;
9329 }
9330
9331 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9332
9333 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9334
9335 if (!host) {
9336 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9337 rc = -ENOMEM;
9338 goto out_disable;
9339 }
9340
9341 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9342 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9343 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9344
1be7bd82 9345 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9346
1be7bd82 9347 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9348 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9349 dev_id->vendor, dev_id->device);
9350 goto out_scsi_host_put;
9351 }
9352
a32c055f
WB
9353 /* set SIS 32 or SIS 64 */
9354 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9355 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9356 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9357 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9358
5469cb5b
BK
9359 if (ipr_transop_timeout)
9360 ioa_cfg->transop_timeout = ipr_transop_timeout;
9361 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9362 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9363 else
9364 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9365
44c10138 9366 ioa_cfg->revid = pdev->revision;
463fc696 9367
1da177e4
LT
9368 ipr_regs_pci = pci_resource_start(pdev, 0);
9369
9370 rc = pci_request_regions(pdev, IPR_NAME);
9371 if (rc < 0) {
9372 dev_err(&pdev->dev,
9373 "Couldn't register memory range of registers\n");
9374 goto out_scsi_host_put;
9375 }
9376
25729a7f 9377 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9378
9379 if (!ipr_regs) {
9380 dev_err(&pdev->dev,
9381 "Couldn't map memory range of registers\n");
9382 rc = -ENOMEM;
9383 goto out_release_regions;
9384 }
9385
9386 ioa_cfg->hdw_dma_regs = ipr_regs;
9387 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9388 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9389
9390 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9391
9392 pci_set_master(pdev);
9393
a32c055f
WB
9394 if (ioa_cfg->sis64) {
9395 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9396 if (rc < 0) {
9397 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9398 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9399 }
9400
9401 } else
9402 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9403
1da177e4
LT
9404 if (rc < 0) {
9405 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9406 goto cleanup_nomem;
9407 }
9408
9409 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9410 ioa_cfg->chip_cfg->cache_line_size);
9411
9412 if (rc != PCIBIOS_SUCCESSFUL) {
9413 dev_err(&pdev->dev, "Write of cache line size failed\n");
9414 rc = -EIO;
9415 goto cleanup_nomem;
9416 }
9417
05a6538a 9418 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9419 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9420 IPR_MAX_MSIX_VECTORS);
9421 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9422 }
9423
9424 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9425 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9426 ioa_cfg->intr_flag = IPR_USE_MSIX;
9427 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9428 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9429 ioa_cfg->intr_flag = IPR_USE_MSI;
9430 else {
9431 ioa_cfg->intr_flag = IPR_USE_LSI;
9432 ioa_cfg->nvectors = 1;
9433 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9434 }
9435
9436 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9437 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9438 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9439 if (rc == -EOPNOTSUPP) {
9440 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9441 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9442 pci_disable_msi(pdev);
9443 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9444 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9445 pci_disable_msix(pdev);
9446 }
9447
9448 ioa_cfg->intr_flag = IPR_USE_LSI;
9449 ioa_cfg->nvectors = 1;
9450 }
95fecd90
WB
9451 else if (rc)
9452 goto out_msi_disable;
05a6538a 9453 else {
9454 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9455 dev_info(&pdev->dev,
9456 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9457 ioa_cfg->nvectors, pdev->irq);
9458 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9459 dev_info(&pdev->dev,
9460 "Request for %d MSIXs succeeded.",
9461 ioa_cfg->nvectors);
9462 }
9463 }
9464
9465 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9466 (unsigned int)num_online_cpus(),
9467 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9468
1da177e4
LT
9469 /* Save away PCI config space for use following IOA reset */
9470 rc = pci_save_state(pdev);
9471
9472 if (rc != PCIBIOS_SUCCESSFUL) {
9473 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9474 rc = -EIO;
f170c684 9475 goto out_msi_disable;
1da177e4
LT
9476 }
9477
9478 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9479 goto out_msi_disable;
1da177e4
LT
9480
9481 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9482 goto out_msi_disable;
1da177e4 9483
3e7ebdfa
WB
9484 if (ioa_cfg->sis64)
9485 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9486 + ((sizeof(struct ipr_config_table_entry64)
9487 * ioa_cfg->max_devs_supported)));
9488 else
9489 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9490 + ((sizeof(struct ipr_config_table_entry)
9491 * ioa_cfg->max_devs_supported)));
9492
1da177e4
LT
9493 rc = ipr_alloc_mem(ioa_cfg);
9494 if (rc < 0) {
9495 dev_err(&pdev->dev,
9496 "Couldn't allocate enough memory for device driver!\n");
f170c684 9497 goto out_msi_disable;
1da177e4
LT
9498 }
9499
ce155cce
BK
9500 /*
9501 * If HRRQ updated interrupt is not masked, or reset alert is set,
9502 * the card is in an unknown state and needs a hard reset
9503 */
214777ba
WB
9504 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9505 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9506 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9507 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9508 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9509 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9510 ioa_cfg->needs_hard_reset = 1;
9511 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9512 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9513
56d6aa33 9514 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9515 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 9516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9517
05a6538a 9518 if (ioa_cfg->intr_flag == IPR_USE_MSI
9519 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9520 name_msi_vectors(ioa_cfg);
9521 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9522 0,
9523 ioa_cfg->vectors_info[0].desc,
9524 &ioa_cfg->hrrq[0]);
9525 if (!rc)
9526 rc = ipr_request_other_msi_irqs(ioa_cfg);
9527 } else {
9528 rc = request_irq(pdev->irq, ipr_isr,
9529 IRQF_SHARED,
9530 IPR_NAME, &ioa_cfg->hrrq[0]);
9531 }
1da177e4
LT
9532 if (rc) {
9533 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9534 pdev->irq, rc);
9535 goto cleanup_nolog;
9536 }
9537
463fc696
BK
9538 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9539 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9540 ioa_cfg->needs_warm_reset = 1;
9541 ioa_cfg->reset = ipr_reset_slot_reset;
9542 } else
9543 ioa_cfg->reset = ipr_reset_start_bist;
9544
1da177e4
LT
9545 spin_lock(&ipr_driver_lock);
9546 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9547 spin_unlock(&ipr_driver_lock);
9548
9549 LEAVE;
9550out:
9551 return rc;
9552
9553cleanup_nolog:
9554 ipr_free_mem(ioa_cfg);
95fecd90 9555out_msi_disable:
05a6538a 9556 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9557 pci_disable_msi(pdev);
9558 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9559 pci_disable_msix(pdev);
f170c684
JL
9560cleanup_nomem:
9561 iounmap(ipr_regs);
1da177e4
LT
9562out_release_regions:
9563 pci_release_regions(pdev);
9564out_scsi_host_put:
9565 scsi_host_put(host);
9566out_disable:
9567 pci_disable_device(pdev);
9568 goto out;
9569}
9570
9571/**
9572 * ipr_scan_vsets - Scans for VSET devices
9573 * @ioa_cfg: ioa config struct
9574 *
9575 * Description: Since the VSET resources do not follow SAM in that we can have
9576 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9577 *
9578 * Return value:
9579 * none
9580 **/
9581static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9582{
9583 int target, lun;
9584
9585 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
203fa3fe 9586 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
1da177e4
LT
9587 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9588}
9589
9590/**
9591 * ipr_initiate_ioa_bringdown - Bring down an adapter
9592 * @ioa_cfg: ioa config struct
9593 * @shutdown_type: shutdown type
9594 *
9595 * Description: This function will initiate bringing down the adapter.
9596 * This consists of issuing an IOA shutdown to the adapter
9597 * to flush the cache, and running BIST.
9598 * If the caller needs to wait on the completion of the reset,
9599 * the caller must sleep on the reset_wait_q.
9600 *
9601 * Return value:
9602 * none
9603 **/
9604static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9605 enum ipr_shutdown_type shutdown_type)
9606{
9607 ENTER;
9608 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9609 ioa_cfg->sdt_state = ABORT_DUMP;
9610 ioa_cfg->reset_retries = 0;
9611 ioa_cfg->in_ioa_bringdown = 1;
9612 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9613 LEAVE;
9614}
9615
9616/**
9617 * __ipr_remove - Remove a single adapter
9618 * @pdev: pci device struct
9619 *
9620 * Adapter hot plug remove entry point.
9621 *
9622 * Return value:
9623 * none
9624 **/
9625static void __ipr_remove(struct pci_dev *pdev)
9626{
9627 unsigned long host_lock_flags = 0;
9628 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9629 ENTER;
9630
9631 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9632 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9634 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9635 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9636 }
9637
1da177e4
LT
9638 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9639
9640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9641 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9642 flush_work(&ioa_cfg->work_q);
1da177e4
LT
9643 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9644
9645 spin_lock(&ipr_driver_lock);
9646 list_del(&ioa_cfg->queue);
9647 spin_unlock(&ipr_driver_lock);
9648
9649 if (ioa_cfg->sdt_state == ABORT_DUMP)
9650 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9652
9653 ipr_free_all_resources(ioa_cfg);
9654
9655 LEAVE;
9656}
9657
9658/**
9659 * ipr_remove - IOA hot plug remove entry point
9660 * @pdev: pci device struct
9661 *
9662 * Adapter hot plug remove entry point.
9663 *
9664 * Return value:
9665 * none
9666 **/
6f039790 9667static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9668{
9669 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9670
9671 ENTER;
9672
ee959b00 9673 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9674 &ipr_trace_attr);
ee959b00 9675 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9676 &ipr_dump_attr);
9677 scsi_remove_host(ioa_cfg->host);
9678
9679 __ipr_remove(pdev);
9680
9681 LEAVE;
9682}
9683
9684/**
9685 * ipr_probe - Adapter hot plug add entry point
9686 *
9687 * Return value:
9688 * 0 on success / non-zero on failure
9689 **/
6f039790 9690static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9691{
9692 struct ipr_ioa_cfg *ioa_cfg;
9693 int rc;
9694
9695 rc = ipr_probe_ioa(pdev, dev_id);
9696
9697 if (rc)
9698 return rc;
9699
9700 ioa_cfg = pci_get_drvdata(pdev);
9701 rc = ipr_probe_ioa_part2(ioa_cfg);
9702
9703 if (rc) {
9704 __ipr_remove(pdev);
9705 return rc;
9706 }
9707
9708 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9709
9710 if (rc) {
9711 __ipr_remove(pdev);
9712 return rc;
9713 }
9714
ee959b00 9715 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9716 &ipr_trace_attr);
9717
9718 if (rc) {
9719 scsi_remove_host(ioa_cfg->host);
9720 __ipr_remove(pdev);
9721 return rc;
9722 }
9723
ee959b00 9724 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9725 &ipr_dump_attr);
9726
9727 if (rc) {
ee959b00 9728 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9729 &ipr_trace_attr);
9730 scsi_remove_host(ioa_cfg->host);
9731 __ipr_remove(pdev);
9732 return rc;
9733 }
9734
9735 scsi_scan_host(ioa_cfg->host);
9736 ipr_scan_vsets(ioa_cfg);
9737 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9738 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 9739 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9740 schedule_work(&ioa_cfg->work_q);
9741 return 0;
9742}
9743
9744/**
9745 * ipr_shutdown - Shutdown handler.
d18c3db5 9746 * @pdev: pci device struct
1da177e4
LT
9747 *
9748 * This function is invoked upon system shutdown/reboot. It will issue
9749 * an adapter shutdown to the adapter to flush the write cache.
9750 *
9751 * Return value:
9752 * none
9753 **/
d18c3db5 9754static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9755{
d18c3db5 9756 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
9757 unsigned long lock_flags = 0;
9758
9759 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 9760 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9762 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9763 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9764 }
9765
1da177e4
LT
9766 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9767 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9768 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9769}
9770
6f039790 9771static struct pci_device_id ipr_pci_table[] = {
1da177e4 9772 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9773 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9774 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9775 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9776 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9777 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9778 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9779 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9780 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9781 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9782 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9783 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9784 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9785 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9786 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9787 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9788 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9789 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9790 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9791 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9792 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9793 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9794 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9795 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9796 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9797 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9798 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9799 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9801 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9802 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9803 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9804 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 9805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
9806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9807 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
9808 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9809 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
9810 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 9812 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 9813 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 9814 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 9815 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 9816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 9817 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 9818 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 9819 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9820 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9821 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9822 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9823 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9824 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
9825 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9826 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9827 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 9831 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9832 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
9833 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9834 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
9835 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9836 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 9837 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 9839 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 9841 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
9843 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 9847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9853 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9854 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
1da177e4
LT
9855 { }
9856};
9857MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9858
a55b2d21 9859static const struct pci_error_handlers ipr_err_handler = {
f8a88b19
LV
9860 .error_detected = ipr_pci_error_detected,
9861 .slot_reset = ipr_pci_slot_reset,
9862};
9863
1da177e4
LT
9864static struct pci_driver ipr_driver = {
9865 .name = IPR_NAME,
9866 .id_table = ipr_pci_table,
9867 .probe = ipr_probe,
6f039790 9868 .remove = ipr_remove,
d18c3db5 9869 .shutdown = ipr_shutdown,
f8a88b19 9870 .err_handler = &ipr_err_handler,
1da177e4
LT
9871};
9872
f72919ec
WB
9873/**
9874 * ipr_halt_done - Shutdown prepare completion
9875 *
9876 * Return value:
9877 * none
9878 **/
9879static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9880{
05a6538a 9881 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
9882}
9883
9884/**
9885 * ipr_halt - Issue shutdown prepare to all adapters
9886 *
9887 * Return value:
9888 * NOTIFY_OK on success / NOTIFY_DONE on failure
9889 **/
9890static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9891{
9892 struct ipr_cmnd *ipr_cmd;
9893 struct ipr_ioa_cfg *ioa_cfg;
9894 unsigned long flags = 0;
9895
9896 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9897 return NOTIFY_DONE;
9898
9899 spin_lock(&ipr_driver_lock);
9900
9901 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9902 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
56d6aa33 9903 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
f72919ec
WB
9904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9905 continue;
9906 }
9907
9908 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9909 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9910 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9911 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9912 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9913
9914 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9915 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9916 }
9917 spin_unlock(&ipr_driver_lock);
9918
9919 return NOTIFY_OK;
9920}
9921
9922static struct notifier_block ipr_notifier = {
9923 ipr_halt, NULL, 0
9924};
9925
1da177e4
LT
9926/**
9927 * ipr_init - Module entry point
9928 *
9929 * Return value:
9930 * 0 on success / negative value on failure
9931 **/
9932static int __init ipr_init(void)
9933{
9934 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9935 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9936
f72919ec 9937 register_reboot_notifier(&ipr_notifier);
dcbccbde 9938 return pci_register_driver(&ipr_driver);
1da177e4
LT
9939}
9940
9941/**
9942 * ipr_exit - Module unload
9943 *
9944 * Module unload entry point.
9945 *
9946 * Return value:
9947 * none
9948 **/
9949static void __exit ipr_exit(void)
9950{
f72919ec 9951 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9952 pci_unregister_driver(&ipr_driver);
9953}
9954
9955module_init(ipr_init);
9956module_exit(ipr_exit);