]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Driver version 2.6.0
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
1da177e4
LT
102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 107 .mailbox = 0x0042C,
89aad428 108 .max_cmds = 100,
1da177e4 109 .cache_line_size = 0x20,
7dd21308 110 .clear_isr = 1,
b53d124a 111 .iopoll_weight = 0,
1da177e4
LT
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
214777ba 115 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 116 .sense_interrupt_mask_reg = 0x0022C,
214777ba 117 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 118 .clr_interrupt_reg = 0x00228,
214777ba 119 .clr_interrupt_reg32 = 0x00228,
1da177e4 120 .sense_interrupt_reg = 0x00224,
214777ba 121 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
214777ba 124 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 125 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
89aad428 133 .max_cmds = 100,
1da177e4 134 .cache_line_size = 0x20,
7dd21308 135 .clear_isr = 1,
b53d124a 136 .iopoll_weight = 0,
1da177e4
LT
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
214777ba 140 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 141 .sense_interrupt_mask_reg = 0x00288,
214777ba 142 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 143 .clr_interrupt_reg = 0x00284,
214777ba 144 .clr_interrupt_reg32 = 0x00284,
1da177e4 145 .sense_interrupt_reg = 0x00280,
214777ba 146 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
214777ba 149 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 150 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
154 }
155 },
a74c1639 156 { /* CRoC */
110def85 157 .mailbox = 0x00044,
89aad428 158 .max_cmds = 1000,
a74c1639 159 .cache_line_size = 0x20,
7dd21308 160 .clear_isr = 0,
b53d124a 161 .iopoll_weight = 64,
a74c1639
WB
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
214777ba 165 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 166 .sense_interrupt_mask_reg = 0x00010,
214777ba 167 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 168 .clr_interrupt_reg = 0x00008,
214777ba 169 .clr_interrupt_reg32 = 0x0000C,
a74c1639 170 .sense_interrupt_reg = 0x00000,
214777ba 171 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
214777ba 174 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 175 .set_uproc_interrupt_reg = 0x00020,
214777ba 176 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 177 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
dcbad00e 180 .dump_addr_reg = 0x00064,
8701f185
WB
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
a74c1639
WB
183 }
184 },
1da177e4
LT
185};
186
187static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
197};
198
203fa3fe 199static int ipr_max_bus_speeds[] = {
1da177e4
LT
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
1da177e4
LT
224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
1da177e4
LT
227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
933916f3 230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
933916f3 238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 239 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 241 "4101: Soft device bus fabric error"},
5aa3a333
WB
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF9: Device sector reassign successful"},
933916f3 258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "7001: IOA sector reassignment successful"},
933916f3 262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFF6: Device hardware error recovered by the IOA"},
933916f3 270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FFF6: Device hardware error recovered by the device"},
933916f3 272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFFA: Undefined device response recovered by the IOA"},
933916f3 276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "FFF6: Device bus error, message or command phase"},
933916f3 278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 279 "FFFE: Task Management Function failed"},
933916f3 280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "FFF6: Failure prediction threshold exceeded"},
933916f3 282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
283 "8009: Impending cache battery pack failure"},
284 {0x02040400, 0, 0,
285 "34FF: Disk device format in progress"},
65f56475
BK
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
1da177e4
LT
288 {0x023F0000, 0, 0,
289 "Synchronization required"},
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
933916f3 294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
933916f3 300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 301 "FFF3: Disk media format bad"},
933916f3 302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 303 "3002: Addressed device failed to respond to selection"},
933916f3 304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 305 "3100: Device bus error"},
933916f3 306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
933916f3 310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 311 "4100: Hard device bus fabric error"},
5aa3a333
WB
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
933916f3 326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 327 "9000: IOA reserved area data check"},
933916f3 328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 329 "9001: IOA reserved area invalid data pattern"},
933916f3 330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 331 "9002: IOA reserved area LRC error"},
5aa3a333
WB
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
933916f3 334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "102E: Out of alternate sectors for disk storage"},
933916f3 336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "FFF4: Data transfer underlength error"},
933916f3 338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "FFF4: Data transfer overlength error"},
933916f3 340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "3400: Logical unit failure"},
933916f3 342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "FFF4: Device microcode is corrupt"},
933916f3 344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
933916f3 348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "FFF4: Disk device problem"},
933916f3 350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "8150: Permanent IOA failure"},
933916f3 352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "3010: Disk device returned wrong response to IOA"},
933916f3 354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
933916f3 358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
360 {0x04448700, 0, 0,
361 "ATA device status error"},
1da177e4
LT
362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
933916f3 364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 365 "8008: A permanent cache battery pack failure occurred"},
933916f3 366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "9090: Disk unit has been modified after the last known status"},
933916f3 368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 369 "9081: IOA detected device error"},
933916f3 370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 371 "9082: IOA detected device error"},
933916f3 372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 373 "3110: Device bus error, message or command phase"},
933916f3 374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 375 "3110: SAS Command / Task Management Function failed"},
933916f3 376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 377 "9091: Incorrect hardware configuration change has been detected"},
933916f3 378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 379 "9073: Invalid multi-adapter configuration"},
933916f3 380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 381 "4010: Incorrect connection between cascaded expanders"},
933916f3 382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 383 "4020: Connections exceed IOA design limits"},
933916f3 384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 385 "4030: Incorrect multipath connection"},
933916f3 386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 387 "4110: Unsupported enclosure function"},
933916f3 388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
389 "FFF4: Command to logical unit failed"},
390 {0x05240000, 1, 0,
391 "Illegal request, invalid request type or request packet"},
392 {0x05250000, 0, 0,
393 "Illegal request, invalid resource handle"},
b0df54bb
BK
394 {0x05258000, 0, 0,
395 "Illegal request, commands not allowed to this device"},
396 {0x05258100, 0, 0,
397 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
398 {0x05258200, 0, 0,
399 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
400 {0x05260000, 0, 0,
401 "Illegal request, invalid field in parameter list"},
402 {0x05260100, 0, 0,
403 "Illegal request, parameter not supported"},
404 {0x05260200, 0, 0,
405 "Illegal request, parameter value invalid"},
406 {0x052C0000, 0, 0,
407 "Illegal request, command sequence error"},
b0df54bb
BK
408 {0x052C8000, 1, 0,
409 "Illegal request, dual adapter support not enabled"},
933916f3 410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 411 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 413 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 415 "3140: Device bus not ready to ready transition"},
933916f3 416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
417 "FFFB: SCSI bus was reset"},
418 {0x06290500, 0, 0,
419 "FFFE: SCSI bus transition to single ended"},
420 {0x06290600, 0, 0,
421 "FFFE: SCSI bus transition to LVD"},
933916f3 422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 423 "FFFB: SCSI bus was reset by another initiator"},
933916f3 424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 425 "3029: A device replacement has occurred"},
933916f3 426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 427 "9051: IOA cache data exists for a missing or failed device"},
933916f3 428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 431 "9025: Disk unit is not supported at its physical location"},
933916f3 432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 433 "3020: IOA detected a SCSI bus configuration error"},
933916f3 434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 435 "3150: SCSI bus configuration error"},
933916f3 436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 437 "9074: Asymmetric advanced function disk configuration"},
933916f3 438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 439 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 441 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 443 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 445 "9076: Configuration error, missing remote IOA"},
933916f3 446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 447 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
933916f3 450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 451 "9041: Array protection temporarily suspended"},
933916f3 452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 453 "9042: Corrupt array parity detected on specified device"},
933916f3 454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 457 "9071: Link operational transition"},
933916f3 458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 459 "9072: Link not operational transition"},
933916f3 460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 461 "9032: Array exposed but still protected"},
e435340c
BK
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
933916f3 464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 465 "4061: Multipath redundancy level got better"},
933916f3 466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 467 "4060: Multipath redundancy level got worse"},
1da177e4
LT
468 {0x07270000, 0, 0,
469 "Failure due to other device"},
933916f3 470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 471 "9008: IOA does not support functions expected by devices"},
933916f3 472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 473 "9010: Cache data associated with attached devices cannot be found"},
933916f3 474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 475 "9011: Cache data belongs to devices other than those attached"},
933916f3 476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 477 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 479 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9022: Exposed array is missing a required device"},
933916f3 482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9023: Array member(s) not at required physical locations"},
933916f3 484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9024: Array not functional due to present hardware configuration"},
933916f3 486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9026: Array not functional due to present hardware configuration"},
933916f3 488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9027: Array is missing a device and parity is out of sync"},
933916f3 490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9028: Maximum number of arrays already exist"},
933916f3 492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 493 "9050: Required cache data cannot be located for a disk unit"},
933916f3 494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 495 "9052: Cache data exists for a device that has been modified"},
933916f3 496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 497 "9054: IOA resources not available due to previous problems"},
933916f3 498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 499 "9092: Disk unit requires initialization before use"},
933916f3 500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 501 "9029: Incorrect hardware configuration change has been detected"},
933916f3 502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 503 "9060: One or more disk pairs are missing from an array"},
933916f3 504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 505 "9061: One or more disks are missing from an array"},
933916f3 506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 507 "9062: One or more disks are missing from an array"},
933916f3 508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
509 "9063: Maximum number of functional arrays has been exceeded"},
510 {0x0B260000, 0, 0,
511 "Aborted command, invalid descriptor"},
512 {0x0B5A0000, 0, 0,
513 "Command terminated by host"}
514};
515
516static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530};
531
532/*
533 * Function Prototypes
534 */
535static int ipr_reset_alert(struct ipr_cmnd *);
536static void ipr_process_ccn(struct ipr_cmnd *);
537static void ipr_process_error(struct ipr_cmnd *);
538static void ipr_reset_ioa_job(struct ipr_cmnd *);
539static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
541
542#ifdef CONFIG_SCSI_IPR_TRACE
543/**
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
546 * @type: trace type
547 * @add_data: additional data
548 *
549 * Return value:
550 * none
551 **/
552static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
554{
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557
56d6aa33 558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
1da177e4
LT
560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
a32c055f
WB
563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
565 else
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
56d6aa33 570 wmb();
1da177e4
LT
571}
572#else
203fa3fe 573#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
574#endif
575
172cd6e1
BK
576/**
577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
579 *
580 * Return value:
581 * none
582 **/
583static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
584{
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
591}
592
1da177e4
LT
593/**
594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
601{
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 606 int hrrq_id;
1da177e4 607
05a6538a 608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 611 ioarcb->data_transfer_length = 0;
1da177e4 612 ioarcb->read_data_transfer_length = 0;
a32c055f 613 ioarcb->ioadl_len = 0;
1da177e4 614 ioarcb->read_ioadl_len = 0;
a32c055f 615
96d21f00 616 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
619 ioasa64->u.gata.status = 0;
620 } else {
a32c055f
WB
621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 624 ioasa->u.gata.status = 0;
a32c055f
WB
625 }
626
96d21f00
WB
627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
1da177e4 629 ipr_cmd->scsi_cmd = NULL;
35a39691 630 ipr_cmd->qc = NULL;
1da177e4
LT
631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
633}
634
635/**
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
638 *
639 * Return value:
640 * none
641 **/
172cd6e1
BK
642static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
644{
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
172cd6e1 648 ipr_cmd->fast_done = fast_done;
1da177e4
LT
649 init_timer(&ipr_cmd->timer);
650}
651
652/**
00bfef2c 653 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
654 * @ioa_cfg: ioa config struct
655 *
656 * Return value:
657 * pointer to ipr command struct
658 **/
659static
05a6538a 660struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 661{
05a6538a 662 struct ipr_cmnd *ipr_cmd = NULL;
663
664 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
665 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
666 struct ipr_cmnd, queue);
667 list_del(&ipr_cmd->queue);
668 }
1da177e4 669
1da177e4
LT
670
671 return ipr_cmd;
672}
673
00bfef2c
BK
674/**
675 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
676 * @ioa_cfg: ioa config struct
677 *
678 * Return value:
679 * pointer to ipr command struct
680 **/
681static
682struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
683{
05a6538a 684 struct ipr_cmnd *ipr_cmd =
685 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 686 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
687 return ipr_cmd;
688}
689
1da177e4
LT
690/**
691 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
692 * @ioa_cfg: ioa config struct
693 * @clr_ints: interrupts to clear
694 *
695 * This function masks all interrupts on the adapter, then clears the
696 * interrupts specified in the mask
697 *
698 * Return value:
699 * none
700 **/
701static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
702 u32 clr_ints)
703{
704 volatile u32 int_reg;
56d6aa33 705 int i;
1da177e4
LT
706
707 /* Stop new interrupts */
56d6aa33 708 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
709 spin_lock(&ioa_cfg->hrrq[i]._lock);
710 ioa_cfg->hrrq[i].allow_interrupts = 0;
711 spin_unlock(&ioa_cfg->hrrq[i]._lock);
712 }
713 wmb();
1da177e4
LT
714
715 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
716 if (ioa_cfg->sis64)
717 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
718 else
719 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
720
721 /* Clear any pending interrupts */
214777ba
WB
722 if (ioa_cfg->sis64)
723 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
724 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
725 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
726}
727
728/**
729 * ipr_save_pcix_cmd_reg - Save PCI-X command register
730 * @ioa_cfg: ioa config struct
731 *
732 * Return value:
733 * 0 on success / -EIO on failure
734 **/
735static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
736{
737 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
738
7dce0e1c
BK
739 if (pcix_cmd_reg == 0)
740 return 0;
1da177e4
LT
741
742 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
745 return -EIO;
746 }
747
748 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
749 return 0;
750}
751
752/**
753 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
754 * @ioa_cfg: ioa config struct
755 *
756 * Return value:
757 * 0 on success / -EIO on failure
758 **/
759static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
760{
761 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
762
763 if (pcix_cmd_reg) {
764 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
765 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
766 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
767 return -EIO;
768 }
1da177e4
LT
769 }
770
771 return 0;
772}
773
35a39691
BK
774/**
775 * ipr_sata_eh_done - done function for aborted SATA commands
776 * @ipr_cmd: ipr command struct
777 *
778 * This function is invoked for ops generated to SATA
779 * devices which are being aborted.
780 *
781 * Return value:
782 * none
783 **/
784static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
785{
35a39691
BK
786 struct ata_queued_cmd *qc = ipr_cmd->qc;
787 struct ipr_sata_port *sata_port = qc->ap->private_data;
788
789 qc->err_mask |= AC_ERR_OTHER;
790 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 791 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
792 ata_qc_complete(qc);
793}
794
1da177e4
LT
795/**
796 * ipr_scsi_eh_done - mid-layer done function for aborted ops
797 * @ipr_cmd: ipr command struct
798 *
799 * This function is invoked by the interrupt handler for
800 * ops generated by the SCSI mid-layer which are being aborted.
801 *
802 * Return value:
803 * none
804 **/
805static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
806{
1da177e4
LT
807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
808
809 scsi_cmd->result |= (DID_ERROR << 16);
810
63015bc9 811 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 812 scsi_cmd->scsi_done(scsi_cmd);
05a6538a 813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
814}
815
816/**
817 * ipr_fail_all_ops - Fails all outstanding ops.
818 * @ioa_cfg: ioa config struct
819 *
820 * This function fails all outstanding ops.
821 *
822 * Return value:
823 * none
824 **/
825static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
826{
827 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 828 struct ipr_hrr_queue *hrrq;
1da177e4
LT
829
830 ENTER;
05a6538a 831 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 832 spin_lock(&hrrq->_lock);
05a6538a 833 list_for_each_entry_safe(ipr_cmd,
834 temp, &hrrq->hrrq_pending_q, queue) {
835 list_del(&ipr_cmd->queue);
1da177e4 836
05a6538a 837 ipr_cmd->s.ioasa.hdr.ioasc =
838 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
839 ipr_cmd->s.ioasa.hdr.ilid =
840 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 841
05a6538a 842 if (ipr_cmd->scsi_cmd)
843 ipr_cmd->done = ipr_scsi_eh_done;
844 else if (ipr_cmd->qc)
845 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 846
05a6538a 847 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
848 IPR_IOASC_IOA_WAS_RESET);
849 del_timer(&ipr_cmd->timer);
850 ipr_cmd->done(ipr_cmd);
851 }
56d6aa33 852 spin_unlock(&hrrq->_lock);
1da177e4 853 }
1da177e4
LT
854 LEAVE;
855}
856
a32c055f
WB
857/**
858 * ipr_send_command - Send driver initiated requests.
859 * @ipr_cmd: ipr command struct
860 *
861 * This function sends a command to the adapter using the correct write call.
862 * In the case of sis64, calculate the ioarcb size required. Then or in the
863 * appropriate bits.
864 *
865 * Return value:
866 * none
867 **/
868static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
869{
870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
871 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
872
873 if (ioa_cfg->sis64) {
874 /* The default size is 256 bytes */
875 send_dma_addr |= 0x1;
876
877 /* If the number of ioadls * size of ioadl > 128 bytes,
878 then use a 512 byte ioarcb */
879 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
880 send_dma_addr |= 0x4;
881 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
882 } else
883 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
884}
885
1da177e4
LT
886/**
887 * ipr_do_req - Send driver initiated requests.
888 * @ipr_cmd: ipr command struct
889 * @done: done function
890 * @timeout_func: timeout function
891 * @timeout: timeout value
892 *
893 * This function sends the specified command to the adapter with the
894 * timeout given. The done function is invoked on command completion.
895 *
896 * Return value:
897 * none
898 **/
899static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
900 void (*done) (struct ipr_cmnd *),
901 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
902{
05a6538a 903 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
904
905 ipr_cmd->done = done;
906
907 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
908 ipr_cmd->timer.expires = jiffies + timeout;
909 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
910
911 add_timer(&ipr_cmd->timer);
912
913 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
914
a32c055f 915 ipr_send_command(ipr_cmd);
1da177e4
LT
916}
917
918/**
919 * ipr_internal_cmd_done - Op done function for an internally generated op.
920 * @ipr_cmd: ipr command struct
921 *
922 * This function is the op done function for an internally generated,
923 * blocking op. It simply wakes the sleeping thread.
924 *
925 * Return value:
926 * none
927 **/
928static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
929{
930 if (ipr_cmd->sibling)
931 ipr_cmd->sibling = NULL;
932 else
933 complete(&ipr_cmd->completion);
934}
935
a32c055f
WB
936/**
937 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
938 * @ipr_cmd: ipr command struct
939 * @dma_addr: dma address
940 * @len: transfer length
941 * @flags: ioadl flag value
942 *
943 * This function initializes an ioadl in the case where there is only a single
944 * descriptor.
945 *
946 * Return value:
947 * nothing
948 **/
949static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
950 u32 len, int flags)
951{
952 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
953 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
954
955 ipr_cmd->dma_use_sg = 1;
956
957 if (ipr_cmd->ioa_cfg->sis64) {
958 ioadl64->flags = cpu_to_be32(flags);
959 ioadl64->data_len = cpu_to_be32(len);
960 ioadl64->address = cpu_to_be64(dma_addr);
961
962 ipr_cmd->ioarcb.ioadl_len =
963 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
964 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
965 } else {
966 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
967 ioadl->address = cpu_to_be32(dma_addr);
968
969 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
970 ipr_cmd->ioarcb.read_ioadl_len =
971 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
972 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
973 } else {
974 ipr_cmd->ioarcb.ioadl_len =
975 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
977 }
978 }
979}
980
1da177e4
LT
981/**
982 * ipr_send_blocking_cmd - Send command and sleep on its completion.
983 * @ipr_cmd: ipr command struct
984 * @timeout_func: function to invoke if command times out
985 * @timeout: timeout
986 *
987 * Return value:
988 * none
989 **/
990static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
991 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
992 u32 timeout)
993{
994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
995
996 init_completion(&ipr_cmd->completion);
997 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
998
999 spin_unlock_irq(ioa_cfg->host->host_lock);
1000 wait_for_completion(&ipr_cmd->completion);
1001 spin_lock_irq(ioa_cfg->host->host_lock);
1002}
1003
05a6538a 1004static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1005{
1006 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1007 return 0;
1008 else
1009 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1010}
1011
1da177e4
LT
1012/**
1013 * ipr_send_hcam - Send an HCAM to the adapter.
1014 * @ioa_cfg: ioa config struct
1015 * @type: HCAM type
1016 * @hostrcb: hostrcb struct
1017 *
1018 * This function will send a Host Controlled Async command to the adapter.
1019 * If HCAMs are currently not allowed to be issued to the adapter, it will
1020 * place the hostrcb on the free queue.
1021 *
1022 * Return value:
1023 * none
1024 **/
1025static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1026 struct ipr_hostrcb *hostrcb)
1027{
1028 struct ipr_cmnd *ipr_cmd;
1029 struct ipr_ioarcb *ioarcb;
1030
56d6aa33 1031 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1032 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1034 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1035
1036 ipr_cmd->u.hostrcb = hostrcb;
1037 ioarcb = &ipr_cmd->ioarcb;
1038
1039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1040 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1041 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1042 ioarcb->cmd_pkt.cdb[1] = type;
1043 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1044 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1045
a32c055f
WB
1046 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1047 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1048
1049 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1050 ipr_cmd->done = ipr_process_ccn;
1051 else
1052 ipr_cmd->done = ipr_process_error;
1053
1054 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1055
a32c055f 1056 ipr_send_command(ipr_cmd);
1da177e4
LT
1057 } else {
1058 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1059 }
1060}
1061
3e7ebdfa
WB
1062/**
1063 * ipr_update_ata_class - Update the ata class in the resource entry
1064 * @res: resource entry struct
1065 * @proto: cfgte device bus protocol value
1066 *
1067 * Return value:
1068 * none
1069 **/
1070static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1071{
203fa3fe 1072 switch (proto) {
3e7ebdfa
WB
1073 case IPR_PROTO_SATA:
1074 case IPR_PROTO_SAS_STP:
1075 res->ata_class = ATA_DEV_ATA;
1076 break;
1077 case IPR_PROTO_SATA_ATAPI:
1078 case IPR_PROTO_SAS_STP_ATAPI:
1079 res->ata_class = ATA_DEV_ATAPI;
1080 break;
1081 default:
1082 res->ata_class = ATA_DEV_UNKNOWN;
1083 break;
1084 };
1085}
1086
1da177e4
LT
1087/**
1088 * ipr_init_res_entry - Initialize a resource entry struct.
1089 * @res: resource entry struct
3e7ebdfa 1090 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1091 *
1092 * Return value:
1093 * none
1094 **/
3e7ebdfa
WB
1095static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1097{
3e7ebdfa
WB
1098 int found = 0;
1099 unsigned int proto;
1100 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1101 struct ipr_resource_entry *gscsi_res = NULL;
1102
ee0a90fa 1103 res->needs_sync_complete = 0;
1da177e4
LT
1104 res->in_erp = 0;
1105 res->add_to_ml = 0;
1106 res->del_from_ml = 0;
1107 res->resetting_device = 0;
1108 res->sdev = NULL;
35a39691 1109 res->sata_port = NULL;
3e7ebdfa
WB
1110
1111 if (ioa_cfg->sis64) {
1112 proto = cfgtew->u.cfgte64->proto;
1113 res->res_flags = cfgtew->u.cfgte64->res_flags;
1114 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1115 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1116
1117 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1118 sizeof(res->res_path));
1119
1120 res->bus = 0;
0cb992ed
WB
1121 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1123 res->lun = scsilun_to_int(&res->dev_lun);
1124
1125 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1126 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1127 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1128 found = 1;
1129 res->target = gscsi_res->target;
1130 break;
1131 }
1132 }
1133 if (!found) {
1134 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1135 ioa_cfg->max_devs_supported);
1136 set_bit(res->target, ioa_cfg->target_ids);
1137 }
3e7ebdfa
WB
1138 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1139 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1140 res->target = 0;
1141 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1142 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1143 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1144 ioa_cfg->max_devs_supported);
1145 set_bit(res->target, ioa_cfg->array_ids);
1146 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1147 res->bus = IPR_VSET_VIRTUAL_BUS;
1148 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1149 ioa_cfg->max_devs_supported);
1150 set_bit(res->target, ioa_cfg->vset_ids);
1151 } else {
1152 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->target_ids);
1155 }
1156 } else {
1157 proto = cfgtew->u.cfgte->proto;
1158 res->qmodel = IPR_QUEUEING_MODEL(res);
1159 res->flags = cfgtew->u.cfgte->flags;
1160 if (res->flags & IPR_IS_IOA_RESOURCE)
1161 res->type = IPR_RES_TYPE_IOAFP;
1162 else
1163 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1164
1165 res->bus = cfgtew->u.cfgte->res_addr.bus;
1166 res->target = cfgtew->u.cfgte->res_addr.target;
1167 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1168 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1169 }
1170
1171 ipr_update_ata_class(res, proto);
1172}
1173
1174/**
1175 * ipr_is_same_device - Determine if two devices are the same.
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1178 *
1179 * Return value:
1180 * 1 if the devices are the same / 0 otherwise
1181 **/
1182static int ipr_is_same_device(struct ipr_resource_entry *res,
1183 struct ipr_config_table_entry_wrapper *cfgtew)
1184{
1185 if (res->ioa_cfg->sis64) {
1186 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1187 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1188 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1189 sizeof(cfgtew->u.cfgte64->lun))) {
1190 return 1;
1191 }
1192 } else {
1193 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1194 res->target == cfgtew->u.cfgte->res_addr.target &&
1195 res->lun == cfgtew->u.cfgte->res_addr.lun)
1196 return 1;
1197 }
1198
1199 return 0;
1200}
1201
1202/**
b3b3b407 1203 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1204 * @res_path: resource path
1205 * @buf: buffer
b3b3b407 1206 * @len: length of buffer provided
3e7ebdfa
WB
1207 *
1208 * Return value:
1209 * pointer to buffer
1210 **/
b3b3b407 1211static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1212{
1213 int i;
5adcbeb3 1214 char *p = buffer;
3e7ebdfa 1215
46d74563 1216 *p = '\0';
5adcbeb3
WB
1217 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1218 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1219 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1220
1221 return buffer;
1222}
1223
b3b3b407
BK
1224/**
1225 * ipr_format_res_path - Format the resource path for printing.
1226 * @ioa_cfg: ioa config struct
1227 * @res_path: resource path
1228 * @buf: buffer
1229 * @len: length of buffer provided
1230 *
1231 * Return value:
1232 * pointer to buffer
1233 **/
1234static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1235 u8 *res_path, char *buffer, int len)
1236{
1237 char *p = buffer;
1238
1239 *p = '\0';
1240 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1241 __ipr_format_res_path(res_path, p, len - (buffer - p));
1242 return buffer;
1243}
1244
3e7ebdfa
WB
1245/**
1246 * ipr_update_res_entry - Update the resource entry.
1247 * @res: resource entry struct
1248 * @cfgtew: config table entry wrapper struct
1249 *
1250 * Return value:
1251 * none
1252 **/
1253static void ipr_update_res_entry(struct ipr_resource_entry *res,
1254 struct ipr_config_table_entry_wrapper *cfgtew)
1255{
1256 char buffer[IPR_MAX_RES_PATH_LENGTH];
1257 unsigned int proto;
1258 int new_path = 0;
1259
1260 if (res->ioa_cfg->sis64) {
1261 res->flags = cfgtew->u.cfgte64->flags;
1262 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1263 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1264
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1266 sizeof(struct ipr_std_inq_data));
1267
1268 res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 proto = cfgtew->u.cfgte64->proto;
1270 res->res_handle = cfgtew->u.cfgte64->res_handle;
1271 res->dev_id = cfgtew->u.cfgte64->dev_id;
1272
1273 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274 sizeof(res->dev_lun.scsi_lun));
1275
1276 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1277 sizeof(res->res_path))) {
1278 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1279 sizeof(res->res_path));
1280 new_path = 1;
1281 }
1282
1283 if (res->sdev && new_path)
1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1285 ipr_format_res_path(res->ioa_cfg,
1286 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1287 } else {
1288 res->flags = cfgtew->u.cfgte->flags;
1289 if (res->flags & IPR_IS_IOA_RESOURCE)
1290 res->type = IPR_RES_TYPE_IOAFP;
1291 else
1292 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1293
1294 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1295 sizeof(struct ipr_std_inq_data));
1296
1297 res->qmodel = IPR_QUEUEING_MODEL(res);
1298 proto = cfgtew->u.cfgte->proto;
1299 res->res_handle = cfgtew->u.cfgte->res_handle;
1300 }
1301
1302 ipr_update_ata_class(res, proto);
1303}
1304
1305/**
1306 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1307 * for the resource.
1308 * @res: resource entry struct
1309 * @cfgtew: config table entry wrapper struct
1310 *
1311 * Return value:
1312 * none
1313 **/
1314static void ipr_clear_res_target(struct ipr_resource_entry *res)
1315{
1316 struct ipr_resource_entry *gscsi_res = NULL;
1317 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1318
1319 if (!ioa_cfg->sis64)
1320 return;
1321
1322 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1323 clear_bit(res->target, ioa_cfg->array_ids);
1324 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1325 clear_bit(res->target, ioa_cfg->vset_ids);
1326 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1327 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1328 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1329 return;
1330 clear_bit(res->target, ioa_cfg->target_ids);
1331
1332 } else if (res->bus == 0)
1333 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1334}
1335
1336/**
1337 * ipr_handle_config_change - Handle a config change from the adapter
1338 * @ioa_cfg: ioa config struct
1339 * @hostrcb: hostrcb
1340 *
1341 * Return value:
1342 * none
1343 **/
1344static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1345 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1346{
1347 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1348 struct ipr_config_table_entry_wrapper cfgtew;
1349 __be32 cc_res_handle;
1350
1da177e4
LT
1351 u32 is_ndn = 1;
1352
3e7ebdfa
WB
1353 if (ioa_cfg->sis64) {
1354 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1355 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1356 } else {
1357 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1358 cc_res_handle = cfgtew.u.cfgte->res_handle;
1359 }
1da177e4
LT
1360
1361 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1362 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1363 is_ndn = 0;
1364 break;
1365 }
1366 }
1367
1368 if (is_ndn) {
1369 if (list_empty(&ioa_cfg->free_res_q)) {
1370 ipr_send_hcam(ioa_cfg,
1371 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1372 hostrcb);
1373 return;
1374 }
1375
1376 res = list_entry(ioa_cfg->free_res_q.next,
1377 struct ipr_resource_entry, queue);
1378
1379 list_del(&res->queue);
3e7ebdfa 1380 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1381 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1382 }
1383
3e7ebdfa 1384 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1385
1386 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1387 if (res->sdev) {
1da177e4 1388 res->del_from_ml = 1;
3e7ebdfa 1389 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1390 if (ioa_cfg->allow_ml_add_del)
1391 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1392 } else {
1393 ipr_clear_res_target(res);
1da177e4 1394 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1395 }
5767a1c4 1396 } else if (!res->sdev || res->del_from_ml) {
1da177e4
LT
1397 res->add_to_ml = 1;
1398 if (ioa_cfg->allow_ml_add_del)
1399 schedule_work(&ioa_cfg->work_q);
1400 }
1401
1402 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1403}
1404
1405/**
1406 * ipr_process_ccn - Op done function for a CCN.
1407 * @ipr_cmd: ipr command struct
1408 *
1409 * This function is the op done function for a configuration
1410 * change notification host controlled async from the adapter.
1411 *
1412 * Return value:
1413 * none
1414 **/
1415static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1416{
1417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1418 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1420
1421 list_del(&hostrcb->queue);
05a6538a 1422 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1423
1424 if (ioasc) {
1425 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1426 dev_err(&ioa_cfg->pdev->dev,
1427 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1428
1429 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1430 } else {
1431 ipr_handle_config_change(ioa_cfg, hostrcb);
1432 }
1433}
1434
8cf093e2
BK
1435/**
1436 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1437 * @i: index into buffer
1438 * @buf: string to modify
1439 *
1440 * This function will strip all trailing whitespace, pad the end
1441 * of the string with a single space, and NULL terminate the string.
1442 *
1443 * Return value:
1444 * new length of string
1445 **/
1446static int strip_and_pad_whitespace(int i, char *buf)
1447{
1448 while (i && buf[i] == ' ')
1449 i--;
1450 buf[i+1] = ' ';
1451 buf[i+2] = '\0';
1452 return i + 2;
1453}
1454
1455/**
1456 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1457 * @prefix: string to print at start of printk
1458 * @hostrcb: hostrcb pointer
1459 * @vpd: vendor/product id/sn struct
1460 *
1461 * Return value:
1462 * none
1463 **/
1464static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1465 struct ipr_vpd *vpd)
1466{
1467 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1468 int i = 0;
1469
1470 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1471 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1472
1473 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1474 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1475
1476 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1477 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1478
1479 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1480}
1481
1da177e4
LT
1482/**
1483 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1484 * @vpd: vendor/product id/sn struct
1da177e4
LT
1485 *
1486 * Return value:
1487 * none
1488 **/
cfc32139 1489static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1490{
1491 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1492 + IPR_SERIAL_NUM_LEN];
1493
cfc32139
BK
1494 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1495 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1496 IPR_PROD_ID_LEN);
1497 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1498 ipr_err("Vendor/Product ID: %s\n", buffer);
1499
cfc32139 1500 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1501 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1502 ipr_err(" Serial Number: %s\n", buffer);
1503}
1504
8cf093e2
BK
1505/**
1506 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1507 * @prefix: string to print at start of printk
1508 * @hostrcb: hostrcb pointer
1509 * @vpd: vendor/product id/sn/wwn struct
1510 *
1511 * Return value:
1512 * none
1513 **/
1514static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1515 struct ipr_ext_vpd *vpd)
1516{
1517 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1518 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1519 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1520}
1521
ee0f05b8
BK
1522/**
1523 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1524 * @vpd: vendor/product id/sn/wwn struct
1525 *
1526 * Return value:
1527 * none
1528 **/
1529static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1530{
1531 ipr_log_vpd(&vpd->vpd);
1532 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1533 be32_to_cpu(vpd->wwid[1]));
1534}
1535
1536/**
1537 * ipr_log_enhanced_cache_error - Log a cache error.
1538 * @ioa_cfg: ioa config struct
1539 * @hostrcb: hostrcb struct
1540 *
1541 * Return value:
1542 * none
1543 **/
1544static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1545 struct ipr_hostrcb *hostrcb)
1546{
4565e370
WB
1547 struct ipr_hostrcb_type_12_error *error;
1548
1549 if (ioa_cfg->sis64)
1550 error = &hostrcb->hcam.u.error64.u.type_12_error;
1551 else
1552 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1553
1554 ipr_err("-----Current Configuration-----\n");
1555 ipr_err("Cache Directory Card Information:\n");
1556 ipr_log_ext_vpd(&error->ioa_vpd);
1557 ipr_err("Adapter Card Information:\n");
1558 ipr_log_ext_vpd(&error->cfc_vpd);
1559
1560 ipr_err("-----Expected Configuration-----\n");
1561 ipr_err("Cache Directory Card Information:\n");
1562 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1563 ipr_err("Adapter Card Information:\n");
1564 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1565
1566 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1567 be32_to_cpu(error->ioa_data[0]),
1568 be32_to_cpu(error->ioa_data[1]),
1569 be32_to_cpu(error->ioa_data[2]));
1570}
1571
1da177e4
LT
1572/**
1573 * ipr_log_cache_error - Log a cache error.
1574 * @ioa_cfg: ioa config struct
1575 * @hostrcb: hostrcb struct
1576 *
1577 * Return value:
1578 * none
1579 **/
1580static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1581 struct ipr_hostrcb *hostrcb)
1582{
1583 struct ipr_hostrcb_type_02_error *error =
1584 &hostrcb->hcam.u.error.u.type_02_error;
1585
1586 ipr_err("-----Current Configuration-----\n");
1587 ipr_err("Cache Directory Card Information:\n");
cfc32139 1588 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1589 ipr_err("Adapter Card Information:\n");
cfc32139 1590 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1591
1592 ipr_err("-----Expected Configuration-----\n");
1593 ipr_err("Cache Directory Card Information:\n");
cfc32139 1594 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1595 ipr_err("Adapter Card Information:\n");
cfc32139 1596 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1597
1598 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1599 be32_to_cpu(error->ioa_data[0]),
1600 be32_to_cpu(error->ioa_data[1]),
1601 be32_to_cpu(error->ioa_data[2]));
1602}
1603
ee0f05b8
BK
1604/**
1605 * ipr_log_enhanced_config_error - Log a configuration error.
1606 * @ioa_cfg: ioa config struct
1607 * @hostrcb: hostrcb struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1613 struct ipr_hostrcb *hostrcb)
1614{
1615 int errors_logged, i;
1616 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1617 struct ipr_hostrcb_type_13_error *error;
1618
1619 error = &hostrcb->hcam.u.error.u.type_13_error;
1620 errors_logged = be32_to_cpu(error->errors_logged);
1621
1622 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1623 be32_to_cpu(error->errors_detected), errors_logged);
1624
1625 dev_entry = error->dev;
1626
1627 for (i = 0; i < errors_logged; i++, dev_entry++) {
1628 ipr_err_separator;
1629
1630 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1631 ipr_log_ext_vpd(&dev_entry->vpd);
1632
1633 ipr_err("-----New Device Information-----\n");
1634 ipr_log_ext_vpd(&dev_entry->new_vpd);
1635
1636 ipr_err("Cache Directory Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1638
1639 ipr_err("Adapter Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1641 }
1642}
1643
4565e370
WB
1644/**
1645 * ipr_log_sis64_config_error - Log a device error.
1646 * @ioa_cfg: ioa config struct
1647 * @hostrcb: hostrcb struct
1648 *
1649 * Return value:
1650 * none
1651 **/
1652static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1653 struct ipr_hostrcb *hostrcb)
1654{
1655 int errors_logged, i;
1656 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1657 struct ipr_hostrcb_type_23_error *error;
1658 char buffer[IPR_MAX_RES_PATH_LENGTH];
1659
1660 error = &hostrcb->hcam.u.error64.u.type_23_error;
1661 errors_logged = be32_to_cpu(error->errors_logged);
1662
1663 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1664 be32_to_cpu(error->errors_detected), errors_logged);
1665
1666 dev_entry = error->dev;
1667
1668 for (i = 0; i < errors_logged; i++, dev_entry++) {
1669 ipr_err_separator;
1670
1671 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1672 __ipr_format_res_path(dev_entry->res_path,
1673 buffer, sizeof(buffer)));
4565e370
WB
1674 ipr_log_ext_vpd(&dev_entry->vpd);
1675
1676 ipr_err("-----New Device Information-----\n");
1677 ipr_log_ext_vpd(&dev_entry->new_vpd);
1678
1679 ipr_err("Cache Directory Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1681
1682 ipr_err("Adapter Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1684 }
1685}
1686
1da177e4
LT
1687/**
1688 * ipr_log_config_error - Log a configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1691 *
1692 * Return value:
1693 * none
1694 **/
1695static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1697{
1698 int errors_logged, i;
1699 struct ipr_hostrcb_device_data_entry *dev_entry;
1700 struct ipr_hostrcb_type_03_error *error;
1701
1702 error = &hostrcb->hcam.u.error.u.type_03_error;
1703 errors_logged = be32_to_cpu(error->errors_logged);
1704
1705 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706 be32_to_cpu(error->errors_detected), errors_logged);
1707
cfc32139 1708 dev_entry = error->dev;
1da177e4
LT
1709
1710 for (i = 0; i < errors_logged; i++, dev_entry++) {
1711 ipr_err_separator;
1712
fa15b1f6 1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1714 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1715
1716 ipr_err("-----New Device Information-----\n");
cfc32139 1717 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1718
1719 ipr_err("Cache Directory Card Information:\n");
cfc32139 1720 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1721
1722 ipr_err("Adapter Card Information:\n");
cfc32139 1723 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1724
1725 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1726 be32_to_cpu(dev_entry->ioa_data[0]),
1727 be32_to_cpu(dev_entry->ioa_data[1]),
1728 be32_to_cpu(dev_entry->ioa_data[2]),
1729 be32_to_cpu(dev_entry->ioa_data[3]),
1730 be32_to_cpu(dev_entry->ioa_data[4]));
1731 }
1732}
1733
ee0f05b8
BK
1734/**
1735 * ipr_log_enhanced_array_error - Log an array configuration error.
1736 * @ioa_cfg: ioa config struct
1737 * @hostrcb: hostrcb struct
1738 *
1739 * Return value:
1740 * none
1741 **/
1742static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1743 struct ipr_hostrcb *hostrcb)
1744{
1745 int i, num_entries;
1746 struct ipr_hostrcb_type_14_error *error;
1747 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1748 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1749
1750 error = &hostrcb->hcam.u.error.u.type_14_error;
1751
1752 ipr_err_separator;
1753
1754 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1755 error->protection_level,
1756 ioa_cfg->host->host_no,
1757 error->last_func_vset_res_addr.bus,
1758 error->last_func_vset_res_addr.target,
1759 error->last_func_vset_res_addr.lun);
1760
1761 ipr_err_separator;
1762
1763 array_entry = error->array_member;
1764 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1765 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1766
1767 for (i = 0; i < num_entries; i++, array_entry++) {
1768 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1769 continue;
1770
1771 if (be32_to_cpu(error->exposed_mode_adn) == i)
1772 ipr_err("Exposed Array Member %d:\n", i);
1773 else
1774 ipr_err("Array Member %d:\n", i);
1775
1776 ipr_log_ext_vpd(&array_entry->vpd);
1777 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1778 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1779 "Expected Location");
1780
1781 ipr_err_separator;
1782 }
1783}
1784
1da177e4
LT
1785/**
1786 * ipr_log_array_error - Log an array configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795{
1796 int i;
1797 struct ipr_hostrcb_type_04_error *error;
1798 struct ipr_hostrcb_array_data_entry *array_entry;
1799 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1800
1801 error = &hostrcb->hcam.u.error.u.type_04_error;
1802
1803 ipr_err_separator;
1804
1805 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1806 error->protection_level,
1807 ioa_cfg->host->host_no,
1808 error->last_func_vset_res_addr.bus,
1809 error->last_func_vset_res_addr.target,
1810 error->last_func_vset_res_addr.lun);
1811
1812 ipr_err_separator;
1813
1814 array_entry = error->array_member;
1815
1816 for (i = 0; i < 18; i++) {
cfc32139 1817 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1818 continue;
1819
fa15b1f6 1820 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1821 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1822 else
1da177e4 1823 ipr_err("Array Member %d:\n", i);
1da177e4 1824
cfc32139 1825 ipr_log_vpd(&array_entry->vpd);
1da177e4 1826
fa15b1f6
BK
1827 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1828 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1829 "Expected Location");
1da177e4
LT
1830
1831 ipr_err_separator;
1832
1833 if (i == 9)
1834 array_entry = error->array_member2;
1835 else
1836 array_entry++;
1837 }
1838}
1839
1840/**
b0df54bb 1841 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1842 * @ioa_cfg: ioa config struct
b0df54bb
BK
1843 * @data: IOA error data
1844 * @len: data length
1da177e4
LT
1845 *
1846 * Return value:
1847 * none
1848 **/
ac719aba 1849static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1850{
1851 int i;
1da177e4 1852
b0df54bb 1853 if (len == 0)
1da177e4
LT
1854 return;
1855
ac719aba
BK
1856 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1857 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1858
b0df54bb 1859 for (i = 0; i < len / 4; i += 4) {
1da177e4 1860 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1861 be32_to_cpu(data[i]),
1862 be32_to_cpu(data[i+1]),
1863 be32_to_cpu(data[i+2]),
1864 be32_to_cpu(data[i+3]));
1da177e4
LT
1865 }
1866}
1867
ee0f05b8
BK
1868/**
1869 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1870 * @ioa_cfg: ioa config struct
1871 * @hostrcb: hostrcb struct
1872 *
1873 * Return value:
1874 * none
1875 **/
1876static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_hostrcb *hostrcb)
1878{
1879 struct ipr_hostrcb_type_17_error *error;
1880
4565e370
WB
1881 if (ioa_cfg->sis64)
1882 error = &hostrcb->hcam.u.error64.u.type_17_error;
1883 else
1884 error = &hostrcb->hcam.u.error.u.type_17_error;
1885
ee0f05b8 1886 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1887 strim(error->failure_reason);
ee0f05b8 1888
8cf093e2
BK
1889 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1890 be32_to_cpu(hostrcb->hcam.u.error.prc));
1891 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1892 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1893 be32_to_cpu(hostrcb->hcam.length) -
1894 (offsetof(struct ipr_hostrcb_error, u) +
1895 offsetof(struct ipr_hostrcb_type_17_error, data)));
1896}
1897
b0df54bb
BK
1898/**
1899 * ipr_log_dual_ioa_error - Log a dual adapter error.
1900 * @ioa_cfg: ioa config struct
1901 * @hostrcb: hostrcb struct
1902 *
1903 * Return value:
1904 * none
1905 **/
1906static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1907 struct ipr_hostrcb *hostrcb)
1908{
1909 struct ipr_hostrcb_type_07_error *error;
1910
1911 error = &hostrcb->hcam.u.error.u.type_07_error;
1912 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1913 strim(error->failure_reason);
b0df54bb 1914
8cf093e2
BK
1915 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1916 be32_to_cpu(hostrcb->hcam.u.error.prc));
1917 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1918 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1919 be32_to_cpu(hostrcb->hcam.length) -
1920 (offsetof(struct ipr_hostrcb_error, u) +
1921 offsetof(struct ipr_hostrcb_type_07_error, data)));
1922}
1923
49dc6a18
BK
1924static const struct {
1925 u8 active;
1926 char *desc;
1927} path_active_desc[] = {
1928 { IPR_PATH_NO_INFO, "Path" },
1929 { IPR_PATH_ACTIVE, "Active path" },
1930 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1931};
1932
1933static const struct {
1934 u8 state;
1935 char *desc;
1936} path_state_desc[] = {
1937 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1938 { IPR_PATH_HEALTHY, "is healthy" },
1939 { IPR_PATH_DEGRADED, "is degraded" },
1940 { IPR_PATH_FAILED, "is failed" }
1941};
1942
1943/**
1944 * ipr_log_fabric_path - Log a fabric path error
1945 * @hostrcb: hostrcb struct
1946 * @fabric: fabric descriptor
1947 *
1948 * Return value:
1949 * none
1950 **/
1951static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1952 struct ipr_hostrcb_fabric_desc *fabric)
1953{
1954 int i, j;
1955 u8 path_state = fabric->path_state;
1956 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1957 u8 state = path_state & IPR_PATH_STATE_MASK;
1958
1959 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1960 if (path_active_desc[i].active != active)
1961 continue;
1962
1963 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1964 if (path_state_desc[j].state != state)
1965 continue;
1966
1967 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1968 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1969 path_active_desc[i].desc, path_state_desc[j].desc,
1970 fabric->ioa_port);
1971 } else if (fabric->cascaded_expander == 0xff) {
1972 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1973 path_active_desc[i].desc, path_state_desc[j].desc,
1974 fabric->ioa_port, fabric->phy);
1975 } else if (fabric->phy == 0xff) {
1976 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1977 path_active_desc[i].desc, path_state_desc[j].desc,
1978 fabric->ioa_port, fabric->cascaded_expander);
1979 } else {
1980 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1981 path_active_desc[i].desc, path_state_desc[j].desc,
1982 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1983 }
1984 return;
1985 }
1986 }
1987
1988 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1989 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1990}
1991
4565e370
WB
1992/**
1993 * ipr_log64_fabric_path - Log a fabric path error
1994 * @hostrcb: hostrcb struct
1995 * @fabric: fabric descriptor
1996 *
1997 * Return value:
1998 * none
1999 **/
2000static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2001 struct ipr_hostrcb64_fabric_desc *fabric)
2002{
2003 int i, j;
2004 u8 path_state = fabric->path_state;
2005 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2006 u8 state = path_state & IPR_PATH_STATE_MASK;
2007 char buffer[IPR_MAX_RES_PATH_LENGTH];
2008
2009 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2010 if (path_active_desc[i].active != active)
2011 continue;
2012
2013 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2014 if (path_state_desc[j].state != state)
2015 continue;
2016
2017 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2019 ipr_format_res_path(hostrcb->ioa_cfg,
2020 fabric->res_path,
2021 buffer, sizeof(buffer)));
4565e370
WB
2022 return;
2023 }
2024 }
2025
2026 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2027 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2028 buffer, sizeof(buffer)));
4565e370
WB
2029}
2030
49dc6a18
BK
2031static const struct {
2032 u8 type;
2033 char *desc;
2034} path_type_desc[] = {
2035 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2036 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2037 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2038 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2039};
2040
2041static const struct {
2042 u8 status;
2043 char *desc;
2044} path_status_desc[] = {
2045 { IPR_PATH_CFG_NO_PROB, "Functional" },
2046 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2047 { IPR_PATH_CFG_FAILED, "Failed" },
2048 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2049 { IPR_PATH_NOT_DETECTED, "Missing" },
2050 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2051};
2052
2053static const char *link_rate[] = {
2054 "unknown",
2055 "disabled",
2056 "phy reset problem",
2057 "spinup hold",
2058 "port selector",
2059 "unknown",
2060 "unknown",
2061 "unknown",
2062 "1.5Gbps",
2063 "3.0Gbps",
2064 "unknown",
2065 "unknown",
2066 "unknown",
2067 "unknown",
2068 "unknown",
2069 "unknown"
2070};
2071
2072/**
2073 * ipr_log_path_elem - Log a fabric path element.
2074 * @hostrcb: hostrcb struct
2075 * @cfg: fabric path element struct
2076 *
2077 * Return value:
2078 * none
2079 **/
2080static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2081 struct ipr_hostrcb_config_element *cfg)
2082{
2083 int i, j;
2084 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2085 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2086
2087 if (type == IPR_PATH_CFG_NOT_EXIST)
2088 return;
2089
2090 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2091 if (path_type_desc[i].type != type)
2092 continue;
2093
2094 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2095 if (path_status_desc[j].status != status)
2096 continue;
2097
2098 if (type == IPR_PATH_CFG_IOA_PORT) {
2099 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2100 path_status_desc[j].desc, path_type_desc[i].desc,
2101 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2102 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2103 } else {
2104 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2105 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2106 path_status_desc[j].desc, path_type_desc[i].desc,
2107 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2108 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2109 } else if (cfg->cascaded_expander == 0xff) {
2110 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2111 "WWN=%08X%08X\n", path_status_desc[j].desc,
2112 path_type_desc[i].desc, cfg->phy,
2113 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115 } else if (cfg->phy == 0xff) {
2116 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2117 "WWN=%08X%08X\n", path_status_desc[j].desc,
2118 path_type_desc[i].desc, cfg->cascaded_expander,
2119 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2120 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2121 } else {
2122 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2123 "WWN=%08X%08X\n", path_status_desc[j].desc,
2124 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2125 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2126 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2127 }
2128 }
2129 return;
2130 }
2131 }
2132
2133 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2134 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2135 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2136 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2137}
2138
4565e370
WB
2139/**
2140 * ipr_log64_path_elem - Log a fabric path element.
2141 * @hostrcb: hostrcb struct
2142 * @cfg: fabric path element struct
2143 *
2144 * Return value:
2145 * none
2146 **/
2147static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2148 struct ipr_hostrcb64_config_element *cfg)
2149{
2150 int i, j;
2151 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2152 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2153 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2154 char buffer[IPR_MAX_RES_PATH_LENGTH];
2155
2156 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2157 return;
2158
2159 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2160 if (path_type_desc[i].type != type)
2161 continue;
2162
2163 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2164 if (path_status_desc[j].status != status)
2165 continue;
2166
2167 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2168 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2169 ipr_format_res_path(hostrcb->ioa_cfg,
2170 cfg->res_path, buffer, sizeof(buffer)),
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]),
2173 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2174 return;
2175 }
2176 }
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2179 ipr_format_res_path(hostrcb->ioa_cfg,
2180 cfg->res_path, buffer, sizeof(buffer)),
2181 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2182 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2183}
2184
49dc6a18
BK
2185/**
2186 * ipr_log_fabric_error - Log a fabric error.
2187 * @ioa_cfg: ioa config struct
2188 * @hostrcb: hostrcb struct
2189 *
2190 * Return value:
2191 * none
2192 **/
2193static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2195{
2196 struct ipr_hostrcb_type_20_error *error;
2197 struct ipr_hostrcb_fabric_desc *fabric;
2198 struct ipr_hostrcb_config_element *cfg;
2199 int i, add_len;
2200
2201 error = &hostrcb->hcam.u.error.u.type_20_error;
2202 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2203 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2204
2205 add_len = be32_to_cpu(hostrcb->hcam.length) -
2206 (offsetof(struct ipr_hostrcb_error, u) +
2207 offsetof(struct ipr_hostrcb_type_20_error, desc));
2208
2209 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2210 ipr_log_fabric_path(hostrcb, fabric);
2211 for_each_fabric_cfg(fabric, cfg)
2212 ipr_log_path_elem(hostrcb, cfg);
2213
2214 add_len -= be16_to_cpu(fabric->length);
2215 fabric = (struct ipr_hostrcb_fabric_desc *)
2216 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2217 }
2218
ac719aba 2219 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2220}
2221
4565e370
WB
2222/**
2223 * ipr_log_sis64_array_error - Log a sis64 array error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2232{
2233 int i, num_entries;
2234 struct ipr_hostrcb_type_24_error *error;
2235 struct ipr_hostrcb64_array_data_entry *array_entry;
2236 char buffer[IPR_MAX_RES_PATH_LENGTH];
2237 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2238
2239 error = &hostrcb->hcam.u.error64.u.type_24_error;
2240
2241 ipr_err_separator;
2242
2243 ipr_err("RAID %s Array Configuration: %s\n",
2244 error->protection_level,
b3b3b407
BK
2245 ipr_format_res_path(ioa_cfg, error->last_res_path,
2246 buffer, sizeof(buffer)));
4565e370
WB
2247
2248 ipr_err_separator;
2249
2250 array_entry = error->array_member;
7262026f
WB
2251 num_entries = min_t(u32, error->num_entries,
2252 ARRAY_SIZE(error->array_member));
4565e370
WB
2253
2254 for (i = 0; i < num_entries; i++, array_entry++) {
2255
2256 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2257 continue;
2258
2259 if (error->exposed_mode_adn == i)
2260 ipr_err("Exposed Array Member %d:\n", i);
2261 else
2262 ipr_err("Array Member %d:\n", i);
2263
2264 ipr_err("Array Member %d:\n", i);
2265 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2266 ipr_err("Current Location: %s\n",
b3b3b407
BK
2267 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2268 buffer, sizeof(buffer)));
7262026f 2269 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2270 ipr_format_res_path(ioa_cfg,
2271 array_entry->expected_res_path,
2272 buffer, sizeof(buffer)));
4565e370
WB
2273
2274 ipr_err_separator;
2275 }
2276}
2277
2278/**
2279 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2280 * @ioa_cfg: ioa config struct
2281 * @hostrcb: hostrcb struct
2282 *
2283 * Return value:
2284 * none
2285 **/
2286static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2287 struct ipr_hostrcb *hostrcb)
2288{
2289 struct ipr_hostrcb_type_30_error *error;
2290 struct ipr_hostrcb64_fabric_desc *fabric;
2291 struct ipr_hostrcb64_config_element *cfg;
2292 int i, add_len;
2293
2294 error = &hostrcb->hcam.u.error64.u.type_30_error;
2295
2296 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2297 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2298
2299 add_len = be32_to_cpu(hostrcb->hcam.length) -
2300 (offsetof(struct ipr_hostrcb64_error, u) +
2301 offsetof(struct ipr_hostrcb_type_30_error, desc));
2302
2303 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2304 ipr_log64_fabric_path(hostrcb, fabric);
2305 for_each_fabric_cfg(fabric, cfg)
2306 ipr_log64_path_elem(hostrcb, cfg);
2307
2308 add_len -= be16_to_cpu(fabric->length);
2309 fabric = (struct ipr_hostrcb64_fabric_desc *)
2310 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2311 }
2312
2313 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2314}
2315
b0df54bb
BK
2316/**
2317 * ipr_log_generic_error - Log an adapter error.
2318 * @ioa_cfg: ioa config struct
2319 * @hostrcb: hostrcb struct
2320 *
2321 * Return value:
2322 * none
2323 **/
2324static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2325 struct ipr_hostrcb *hostrcb)
2326{
ac719aba 2327 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2328 be32_to_cpu(hostrcb->hcam.length));
2329}
2330
1da177e4
LT
2331/**
2332 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2333 * @ioasc: IOASC
2334 *
2335 * This function will return the index of into the ipr_error_table
2336 * for the specified IOASC. If the IOASC is not in the table,
2337 * 0 will be returned, which points to the entry used for unknown errors.
2338 *
2339 * Return value:
2340 * index into the ipr_error_table
2341 **/
2342static u32 ipr_get_error(u32 ioasc)
2343{
2344 int i;
2345
2346 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2347 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2348 return i;
2349
2350 return 0;
2351}
2352
2353/**
2354 * ipr_handle_log_data - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2357 *
2358 * This function logs an adapter error to the system.
2359 *
2360 * Return value:
2361 * none
2362 **/
2363static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2364 struct ipr_hostrcb *hostrcb)
2365{
2366 u32 ioasc;
2367 int error_index;
2368
2369 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2370 return;
2371
2372 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2373 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2374
4565e370
WB
2375 if (ioa_cfg->sis64)
2376 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2377 else
2378 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2379
4565e370
WB
2380 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2381 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2382 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2383 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2384 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2385 }
2386
2387 error_index = ipr_get_error(ioasc);
2388
2389 if (!ipr_error_table[error_index].log_hcam)
2390 return;
2391
49dc6a18 2392 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2393
2394 /* Set indication we have logged an error */
2395 ioa_cfg->errors_logged++;
2396
933916f3 2397 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2398 return;
cf852037
BK
2399 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2400 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2401
2402 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2403 case IPR_HOST_RCB_OVERLAY_ID_2:
2404 ipr_log_cache_error(ioa_cfg, hostrcb);
2405 break;
2406 case IPR_HOST_RCB_OVERLAY_ID_3:
2407 ipr_log_config_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_4:
2410 case IPR_HOST_RCB_OVERLAY_ID_6:
2411 ipr_log_array_error(ioa_cfg, hostrcb);
2412 break;
b0df54bb
BK
2413 case IPR_HOST_RCB_OVERLAY_ID_7:
2414 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2415 break;
ee0f05b8
BK
2416 case IPR_HOST_RCB_OVERLAY_ID_12:
2417 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_13:
2420 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2421 break;
2422 case IPR_HOST_RCB_OVERLAY_ID_14:
2423 case IPR_HOST_RCB_OVERLAY_ID_16:
2424 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2425 break;
2426 case IPR_HOST_RCB_OVERLAY_ID_17:
2427 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2428 break;
49dc6a18
BK
2429 case IPR_HOST_RCB_OVERLAY_ID_20:
2430 ipr_log_fabric_error(ioa_cfg, hostrcb);
2431 break;
4565e370
WB
2432 case IPR_HOST_RCB_OVERLAY_ID_23:
2433 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2434 break;
2435 case IPR_HOST_RCB_OVERLAY_ID_24:
2436 case IPR_HOST_RCB_OVERLAY_ID_26:
2437 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2438 break;
2439 case IPR_HOST_RCB_OVERLAY_ID_30:
2440 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2441 break;
cf852037 2442 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2443 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2444 default:
a9cfca96 2445 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2446 break;
2447 }
2448}
2449
2450/**
2451 * ipr_process_error - Op done function for an adapter error log.
2452 * @ipr_cmd: ipr command struct
2453 *
2454 * This function is the op done function for an error log host
2455 * controlled async from the adapter. It will log the error and
2456 * send the HCAM back to the adapter.
2457 *
2458 * Return value:
2459 * none
2460 **/
2461static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2462{
2463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2464 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2465 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2466 u32 fd_ioasc;
2467
2468 if (ioa_cfg->sis64)
2469 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2470 else
2471 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2472
2473 list_del(&hostrcb->queue);
05a6538a 2474 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2475
2476 if (!ioasc) {
2477 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2478 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2480 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2481 dev_err(&ioa_cfg->pdev->dev,
2482 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2483 }
2484
2485 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2486}
2487
2488/**
2489 * ipr_timeout - An internally generated op has timed out.
2490 * @ipr_cmd: ipr command struct
2491 *
2492 * This function blocks host requests and initiates an
2493 * adapter reset.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2499{
2500 unsigned long lock_flags = 0;
2501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2502
2503 ENTER;
2504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2505
2506 ioa_cfg->errors_logged++;
2507 dev_err(&ioa_cfg->pdev->dev,
2508 "Adapter being reset due to command timeout.\n");
2509
2510 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2511 ioa_cfg->sdt_state = GET_DUMP;
2512
2513 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2515
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 LEAVE;
2518}
2519
2520/**
2521 * ipr_oper_timeout - Adapter timed out transitioning to operational
2522 * @ipr_cmd: ipr command struct
2523 *
2524 * This function blocks host requests and initiates an
2525 * adapter reset.
2526 *
2527 * Return value:
2528 * none
2529 **/
2530static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2531{
2532 unsigned long lock_flags = 0;
2533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2534
2535 ENTER;
2536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2537
2538 ioa_cfg->errors_logged++;
2539 dev_err(&ioa_cfg->pdev->dev,
2540 "Adapter timed out transitioning to operational.\n");
2541
2542 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2543 ioa_cfg->sdt_state = GET_DUMP;
2544
2545 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2546 if (ipr_fastfail)
2547 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2549 }
2550
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2552 LEAVE;
2553}
2554
2555/**
2556 * ipr_reset_reload - Reset/Reload the IOA
2557 * @ioa_cfg: ioa config struct
2558 * @shutdown_type: shutdown type
2559 *
2560 * This function resets the adapter and re-initializes it.
2561 * This function assumes that all new host commands have been stopped.
2562 * Return value:
2563 * SUCCESS / FAILED
2564 **/
2565static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2566 enum ipr_shutdown_type shutdown_type)
2567{
2568 if (!ioa_cfg->in_reset_reload)
2569 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2570
2571 spin_unlock_irq(ioa_cfg->host->host_lock);
2572 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2573 spin_lock_irq(ioa_cfg->host->host_lock);
2574
2575 /* If we got hit with a host reset while we were already resetting
2576 the adapter for some reason, and the reset failed. */
56d6aa33 2577 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
2578 ipr_trace;
2579 return FAILED;
2580 }
2581
2582 return SUCCESS;
2583}
2584
2585/**
2586 * ipr_find_ses_entry - Find matching SES in SES table
2587 * @res: resource entry struct of SES
2588 *
2589 * Return value:
2590 * pointer to SES table entry / NULL on failure
2591 **/
2592static const struct ipr_ses_table_entry *
2593ipr_find_ses_entry(struct ipr_resource_entry *res)
2594{
2595 int i, j, matches;
3e7ebdfa 2596 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2597 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2598
2599 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2600 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2601 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2602 vpids = &res->std_inq_data.vpids;
2603 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2604 matches++;
2605 else
2606 break;
2607 } else
2608 matches++;
2609 }
2610
2611 if (matches == IPR_PROD_ID_LEN)
2612 return ste;
2613 }
2614
2615 return NULL;
2616}
2617
2618/**
2619 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2620 * @ioa_cfg: ioa config struct
2621 * @bus: SCSI bus
2622 * @bus_width: bus width
2623 *
2624 * Return value:
2625 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2626 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2627 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2628 * max 160MHz = max 320MB/sec).
2629 **/
2630static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2631{
2632 struct ipr_resource_entry *res;
2633 const struct ipr_ses_table_entry *ste;
2634 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2635
2636 /* Loop through each config table entry in the config table buffer */
2637 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2638 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2639 continue;
2640
3e7ebdfa 2641 if (bus != res->bus)
1da177e4
LT
2642 continue;
2643
2644 if (!(ste = ipr_find_ses_entry(res)))
2645 continue;
2646
2647 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2648 }
2649
2650 return max_xfer_rate;
2651}
2652
2653/**
2654 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2655 * @ioa_cfg: ioa config struct
2656 * @max_delay: max delay in micro-seconds to wait
2657 *
2658 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2659 *
2660 * Return value:
2661 * 0 on success / other on failure
2662 **/
2663static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2664{
2665 volatile u32 pcii_reg;
2666 int delay = 1;
2667
2668 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2669 while (delay < max_delay) {
2670 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2671
2672 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2673 return 0;
2674
2675 /* udelay cannot be used if delay is more than a few milliseconds */
2676 if ((delay / 1000) > MAX_UDELAY_MS)
2677 mdelay(delay / 1000);
2678 else
2679 udelay(delay);
2680
2681 delay += delay;
2682 }
2683 return -EIO;
2684}
2685
dcbad00e
WB
2686/**
2687 * ipr_get_sis64_dump_data_section - Dump IOA memory
2688 * @ioa_cfg: ioa config struct
2689 * @start_addr: adapter address to dump
2690 * @dest: destination kernel buffer
2691 * @length_in_words: length to dump in 4 byte words
2692 *
2693 * Return value:
2694 * 0 on success
2695 **/
2696static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2697 u32 start_addr,
2698 __be32 *dest, u32 length_in_words)
2699{
2700 int i;
2701
2702 for (i = 0; i < length_in_words; i++) {
2703 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2704 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2705 dest++;
2706 }
2707
2708 return 0;
2709}
2710
1da177e4
LT
2711/**
2712 * ipr_get_ldump_data_section - Dump IOA memory
2713 * @ioa_cfg: ioa config struct
2714 * @start_addr: adapter address to dump
2715 * @dest: destination kernel buffer
2716 * @length_in_words: length to dump in 4 byte words
2717 *
2718 * Return value:
2719 * 0 on success / -EIO on failure
2720 **/
2721static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2722 u32 start_addr,
2723 __be32 *dest, u32 length_in_words)
2724{
2725 volatile u32 temp_pcii_reg;
2726 int i, delay = 0;
2727
dcbad00e
WB
2728 if (ioa_cfg->sis64)
2729 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2730 dest, length_in_words);
2731
1da177e4
LT
2732 /* Write IOA interrupt reg starting LDUMP state */
2733 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2734 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2735
2736 /* Wait for IO debug acknowledge */
2737 if (ipr_wait_iodbg_ack(ioa_cfg,
2738 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2739 dev_err(&ioa_cfg->pdev->dev,
2740 "IOA dump long data transfer timeout\n");
2741 return -EIO;
2742 }
2743
2744 /* Signal LDUMP interlocked - clear IO debug ack */
2745 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2746 ioa_cfg->regs.clr_interrupt_reg);
2747
2748 /* Write Mailbox with starting address */
2749 writel(start_addr, ioa_cfg->ioa_mailbox);
2750
2751 /* Signal address valid - clear IOA Reset alert */
2752 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2753 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2754
2755 for (i = 0; i < length_in_words; i++) {
2756 /* Wait for IO debug acknowledge */
2757 if (ipr_wait_iodbg_ack(ioa_cfg,
2758 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2759 dev_err(&ioa_cfg->pdev->dev,
2760 "IOA dump short data transfer timeout\n");
2761 return -EIO;
2762 }
2763
2764 /* Read data from mailbox and increment destination pointer */
2765 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2766 dest++;
2767
2768 /* For all but the last word of data, signal data received */
2769 if (i < (length_in_words - 1)) {
2770 /* Signal dump data received - Clear IO debug Ack */
2771 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2772 ioa_cfg->regs.clr_interrupt_reg);
2773 }
2774 }
2775
2776 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2777 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2778 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2779
2780 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2781 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2782
2783 /* Signal dump data received - Clear IO debug Ack */
2784 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2785 ioa_cfg->regs.clr_interrupt_reg);
2786
2787 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2788 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2789 temp_pcii_reg =
214777ba 2790 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2791
2792 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2793 return 0;
2794
2795 udelay(10);
2796 delay += 10;
2797 }
2798
2799 return 0;
2800}
2801
2802#ifdef CONFIG_SCSI_IPR_DUMP
2803/**
2804 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2805 * @ioa_cfg: ioa config struct
2806 * @pci_address: adapter address
2807 * @length: length of data to copy
2808 *
2809 * Copy data from PCI adapter to kernel buffer.
2810 * Note: length MUST be a 4 byte multiple
2811 * Return value:
2812 * 0 on success / other on failure
2813 **/
2814static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2815 unsigned long pci_address, u32 length)
2816{
2817 int bytes_copied = 0;
4d4dd706 2818 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2819 __be32 *page;
2820 unsigned long lock_flags = 0;
2821 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2822
4d4dd706
KSS
2823 if (ioa_cfg->sis64)
2824 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2825 else
2826 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2827
1da177e4 2828 while (bytes_copied < length &&
4d4dd706 2829 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2830 if (ioa_dump->page_offset >= PAGE_SIZE ||
2831 ioa_dump->page_offset == 0) {
2832 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2833
2834 if (!page) {
2835 ipr_trace;
2836 return bytes_copied;
2837 }
2838
2839 ioa_dump->page_offset = 0;
2840 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2841 ioa_dump->next_page_index++;
2842 } else
2843 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2844
2845 rem_len = length - bytes_copied;
2846 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2847 cur_len = min(rem_len, rem_page_len);
2848
2849 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2850 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2851 rc = -EIO;
2852 } else {
2853 rc = ipr_get_ldump_data_section(ioa_cfg,
2854 pci_address + bytes_copied,
2855 &page[ioa_dump->page_offset / 4],
2856 (cur_len / sizeof(u32)));
2857 }
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859
2860 if (!rc) {
2861 ioa_dump->page_offset += cur_len;
2862 bytes_copied += cur_len;
2863 } else {
2864 ipr_trace;
2865 break;
2866 }
2867 schedule();
2868 }
2869
2870 return bytes_copied;
2871}
2872
2873/**
2874 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2875 * @hdr: dump entry header struct
2876 *
2877 * Return value:
2878 * nothing
2879 **/
2880static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2881{
2882 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2883 hdr->num_elems = 1;
2884 hdr->offset = sizeof(*hdr);
2885 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2886}
2887
2888/**
2889 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2890 * @ioa_cfg: ioa config struct
2891 * @driver_dump: driver dump struct
2892 *
2893 * Return value:
2894 * nothing
2895 **/
2896static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2897 struct ipr_driver_dump *driver_dump)
2898{
2899 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2900
2901 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2902 driver_dump->ioa_type_entry.hdr.len =
2903 sizeof(struct ipr_dump_ioa_type_entry) -
2904 sizeof(struct ipr_dump_entry_header);
2905 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2906 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2907 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2908 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2909 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2910 ucode_vpd->minor_release[1];
2911 driver_dump->hdr.num_entries++;
2912}
2913
2914/**
2915 * ipr_dump_version_data - Fill in the driver version in the dump.
2916 * @ioa_cfg: ioa config struct
2917 * @driver_dump: driver dump struct
2918 *
2919 * Return value:
2920 * nothing
2921 **/
2922static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2923 struct ipr_driver_dump *driver_dump)
2924{
2925 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2926 driver_dump->version_entry.hdr.len =
2927 sizeof(struct ipr_dump_version_entry) -
2928 sizeof(struct ipr_dump_entry_header);
2929 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2930 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2931 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2932 driver_dump->hdr.num_entries++;
2933}
2934
2935/**
2936 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2937 * @ioa_cfg: ioa config struct
2938 * @driver_dump: driver dump struct
2939 *
2940 * Return value:
2941 * nothing
2942 **/
2943static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2944 struct ipr_driver_dump *driver_dump)
2945{
2946 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2947 driver_dump->trace_entry.hdr.len =
2948 sizeof(struct ipr_dump_trace_entry) -
2949 sizeof(struct ipr_dump_entry_header);
2950 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2951 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2952 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2953 driver_dump->hdr.num_entries++;
2954}
2955
2956/**
2957 * ipr_dump_location_data - Fill in the IOA location in the dump.
2958 * @ioa_cfg: ioa config struct
2959 * @driver_dump: driver dump struct
2960 *
2961 * Return value:
2962 * nothing
2963 **/
2964static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2965 struct ipr_driver_dump *driver_dump)
2966{
2967 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2968 driver_dump->location_entry.hdr.len =
2969 sizeof(struct ipr_dump_location_entry) -
2970 sizeof(struct ipr_dump_entry_header);
2971 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2972 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2973 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2974 driver_dump->hdr.num_entries++;
2975}
2976
2977/**
2978 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2979 * @ioa_cfg: ioa config struct
2980 * @dump: dump struct
2981 *
2982 * Return value:
2983 * nothing
2984 **/
2985static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2986{
2987 unsigned long start_addr, sdt_word;
2988 unsigned long lock_flags = 0;
2989 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2990 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
2991 u32 num_entries, max_num_entries, start_off, end_off;
2992 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 2993 struct ipr_sdt *sdt;
dcbad00e 2994 int valid = 1;
1da177e4
LT
2995 int i;
2996
2997 ENTER;
2998
2999 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3000
41e9a696 3001 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3003 return;
3004 }
3005
110def85
WB
3006 if (ioa_cfg->sis64) {
3007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3008 ssleep(IPR_DUMP_DELAY_SECONDS);
3009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3010 }
3011
1da177e4
LT
3012 start_addr = readl(ioa_cfg->ioa_mailbox);
3013
dcbad00e 3014 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3015 dev_err(&ioa_cfg->pdev->dev,
3016 "Invalid dump table format: %lx\n", start_addr);
3017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3018 return;
3019 }
3020
3021 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3022
3023 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3024
3025 /* Initialize the overall dump header */
3026 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3027 driver_dump->hdr.num_entries = 1;
3028 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3029 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3030 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3031 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3032
3033 ipr_dump_version_data(ioa_cfg, driver_dump);
3034 ipr_dump_location_data(ioa_cfg, driver_dump);
3035 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3036 ipr_dump_trace_data(ioa_cfg, driver_dump);
3037
3038 /* Update dump_header */
3039 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3040
3041 /* IOA Dump entry */
3042 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3043 ioa_dump->hdr.len = 0;
3044 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3046
3047 /* First entries in sdt are actually a list of dump addresses and
3048 lengths to gather the real dump data. sdt represents the pointer
3049 to the ioa generated dump table. Dump data will be extracted based
3050 on entries in this table */
3051 sdt = &ioa_dump->sdt;
3052
4d4dd706
KSS
3053 if (ioa_cfg->sis64) {
3054 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3055 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3056 } else {
3057 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3058 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3059 }
3060
3061 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3062 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3063 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3064 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3065
3066 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3067 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3068 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3069 dev_err(&ioa_cfg->pdev->dev,
3070 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3071 rc, be32_to_cpu(sdt->hdr.state));
3072 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3073 ioa_cfg->sdt_state = DUMP_OBTAINED;
3074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3075 return;
3076 }
3077
3078 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3079
4d4dd706
KSS
3080 if (num_entries > max_num_entries)
3081 num_entries = max_num_entries;
3082
3083 /* Update dump length to the actual data to be copied */
3084 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3085 if (ioa_cfg->sis64)
3086 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3087 else
3088 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3089
3090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091
3092 for (i = 0; i < num_entries; i++) {
4d4dd706 3093 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3094 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3095 break;
3096 }
3097
3098 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3099 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3100 if (ioa_cfg->sis64)
3101 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3102 else {
3103 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3104 end_off = be32_to_cpu(sdt->entry[i].end_token);
3105
3106 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3107 bytes_to_copy = end_off - start_off;
3108 else
3109 valid = 0;
3110 }
3111 if (valid) {
4d4dd706 3112 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3113 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3114 continue;
3115 }
3116
3117 /* Copy data from adapter to driver buffers */
3118 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3119 bytes_to_copy);
3120
3121 ioa_dump->hdr.len += bytes_copied;
3122
3123 if (bytes_copied != bytes_to_copy) {
3124 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3125 break;
3126 }
3127 }
3128 }
3129 }
3130
3131 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3132
3133 /* Update dump_header */
3134 driver_dump->hdr.len += ioa_dump->hdr.len;
3135 wmb();
3136 ioa_cfg->sdt_state = DUMP_OBTAINED;
3137 LEAVE;
3138}
3139
3140#else
203fa3fe 3141#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3142#endif
3143
3144/**
3145 * ipr_release_dump - Free adapter dump memory
3146 * @kref: kref struct
3147 *
3148 * Return value:
3149 * nothing
3150 **/
3151static void ipr_release_dump(struct kref *kref)
3152{
203fa3fe 3153 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3154 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3155 unsigned long lock_flags = 0;
3156 int i;
3157
3158 ENTER;
3159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3160 ioa_cfg->dump = NULL;
3161 ioa_cfg->sdt_state = INACTIVE;
3162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163
3164 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3165 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3166
4d4dd706 3167 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3168 kfree(dump);
3169 LEAVE;
3170}
3171
3172/**
3173 * ipr_worker_thread - Worker thread
c4028958 3174 * @work: ioa config struct
1da177e4
LT
3175 *
3176 * Called at task level from a work thread. This function takes care
3177 * of adding and removing device from the mid-layer as configuration
3178 * changes are detected by the adapter.
3179 *
3180 * Return value:
3181 * nothing
3182 **/
c4028958 3183static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3184{
3185 unsigned long lock_flags;
3186 struct ipr_resource_entry *res;
3187 struct scsi_device *sdev;
3188 struct ipr_dump *dump;
c4028958
DH
3189 struct ipr_ioa_cfg *ioa_cfg =
3190 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3191 u8 bus, target, lun;
3192 int did_work;
3193
3194 ENTER;
3195 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3196
41e9a696 3197 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3198 dump = ioa_cfg->dump;
3199 if (!dump) {
3200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3201 return;
3202 }
3203 kref_get(&dump->kref);
3204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3205 ipr_get_ioa_dump(ioa_cfg, dump);
3206 kref_put(&dump->kref, ipr_release_dump);
3207
3208 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3209 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3210 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 return;
3213 }
3214
3215restart:
3216 do {
3217 did_work = 0;
56d6aa33 3218 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3219 !ioa_cfg->allow_ml_add_del) {
1da177e4
LT
3220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221 return;
3222 }
3223
3224 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3225 if (res->del_from_ml && res->sdev) {
3226 did_work = 1;
3227 sdev = res->sdev;
3228 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3229 if (!res->add_to_ml)
3230 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3231 else
3232 res->del_from_ml = 0;
1da177e4
LT
3233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 scsi_remove_device(sdev);
3235 scsi_device_put(sdev);
3236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3237 }
3238 break;
3239 }
3240 }
203fa3fe 3241 } while (did_work);
1da177e4
LT
3242
3243 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3244 if (res->add_to_ml) {
3e7ebdfa
WB
3245 bus = res->bus;
3246 target = res->target;
3247 lun = res->lun;
1121b794 3248 res->add_to_ml = 0;
1da177e4
LT
3249 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3250 scsi_add_device(ioa_cfg->host, bus, target, lun);
3251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252 goto restart;
3253 }
3254 }
3255
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3257 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3258 LEAVE;
3259}
3260
3261#ifdef CONFIG_SCSI_IPR_TRACE
3262/**
3263 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3264 * @filp: open sysfs file
1da177e4 3265 * @kobj: kobject struct
91a69029 3266 * @bin_attr: bin_attribute struct
1da177e4
LT
3267 * @buf: buffer
3268 * @off: offset
3269 * @count: buffer size
3270 *
3271 * Return value:
3272 * number of bytes printed to buffer
3273 **/
2c3c8bea 3274static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3275 struct bin_attribute *bin_attr,
3276 char *buf, loff_t off, size_t count)
1da177e4 3277{
ee959b00
TJ
3278 struct device *dev = container_of(kobj, struct device, kobj);
3279 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3280 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3281 unsigned long lock_flags = 0;
d777aaf3 3282 ssize_t ret;
1da177e4
LT
3283
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3285 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3286 IPR_TRACE_SIZE);
1da177e4 3287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3288
3289 return ret;
1da177e4
LT
3290}
3291
3292static struct bin_attribute ipr_trace_attr = {
3293 .attr = {
3294 .name = "trace",
3295 .mode = S_IRUGO,
3296 },
3297 .size = 0,
3298 .read = ipr_read_trace,
3299};
3300#endif
3301
3302/**
3303 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3304 * @dev: class device struct
3305 * @buf: buffer
1da177e4
LT
3306 *
3307 * Return value:
3308 * number of bytes printed to buffer
3309 **/
ee959b00
TJ
3310static ssize_t ipr_show_fw_version(struct device *dev,
3311 struct device_attribute *attr, char *buf)
1da177e4 3312{
ee959b00 3313 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3314 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3315 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3316 unsigned long lock_flags = 0;
3317 int len;
3318
3319 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3320 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3321 ucode_vpd->major_release, ucode_vpd->card_type,
3322 ucode_vpd->minor_release[0],
3323 ucode_vpd->minor_release[1]);
3324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325 return len;
3326}
3327
ee959b00 3328static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3329 .attr = {
3330 .name = "fw_version",
3331 .mode = S_IRUGO,
3332 },
3333 .show = ipr_show_fw_version,
3334};
3335
3336/**
3337 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3338 * @dev: class device struct
3339 * @buf: buffer
1da177e4
LT
3340 *
3341 * Return value:
3342 * number of bytes printed to buffer
3343 **/
ee959b00
TJ
3344static ssize_t ipr_show_log_level(struct device *dev,
3345 struct device_attribute *attr, char *buf)
1da177e4 3346{
ee959b00 3347 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3348 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349 unsigned long lock_flags = 0;
3350 int len;
3351
3352 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3353 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355 return len;
3356}
3357
3358/**
3359 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3360 * @dev: class device struct
3361 * @buf: buffer
1da177e4
LT
3362 *
3363 * Return value:
3364 * number of bytes printed to buffer
3365 **/
ee959b00 3366static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3367 struct device_attribute *attr,
1da177e4
LT
3368 const char *buf, size_t count)
3369{
ee959b00 3370 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3371 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3372 unsigned long lock_flags = 0;
3373
3374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3375 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 return strlen(buf);
3378}
3379
ee959b00 3380static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3381 .attr = {
3382 .name = "log_level",
3383 .mode = S_IRUGO | S_IWUSR,
3384 },
3385 .show = ipr_show_log_level,
3386 .store = ipr_store_log_level
3387};
3388
3389/**
3390 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3391 * @dev: device struct
3392 * @buf: buffer
3393 * @count: buffer size
1da177e4
LT
3394 *
3395 * This function will reset the adapter and wait a reasonable
3396 * amount of time for any errors that the adapter might log.
3397 *
3398 * Return value:
3399 * count on success / other on failure
3400 **/
ee959b00
TJ
3401static ssize_t ipr_store_diagnostics(struct device *dev,
3402 struct device_attribute *attr,
1da177e4
LT
3403 const char *buf, size_t count)
3404{
ee959b00 3405 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407 unsigned long lock_flags = 0;
3408 int rc = count;
3409
3410 if (!capable(CAP_SYS_ADMIN))
3411 return -EACCES;
3412
1da177e4 3413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3414 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3416 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3417 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3418 }
3419
1da177e4
LT
3420 ioa_cfg->errors_logged = 0;
3421 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3422
3423 if (ioa_cfg->in_reset_reload) {
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3426
3427 /* Wait for a second for any errors to be logged */
3428 msleep(1000);
3429 } else {
3430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3431 return -EIO;
3432 }
3433
3434 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3435 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3436 rc = -EIO;
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438
3439 return rc;
3440}
3441
ee959b00 3442static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3443 .attr = {
3444 .name = "run_diagnostics",
3445 .mode = S_IWUSR,
3446 },
3447 .store = ipr_store_diagnostics
3448};
3449
f37eb54b
BK
3450/**
3451 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3452 * @class_dev: device struct
3453 * @buf: buffer
f37eb54b
BK
3454 *
3455 * Return value:
3456 * number of bytes printed to buffer
3457 **/
ee959b00
TJ
3458static ssize_t ipr_show_adapter_state(struct device *dev,
3459 struct device_attribute *attr, char *buf)
f37eb54b 3460{
ee959b00 3461 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3464 int len;
3465
3466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3467 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3468 len = snprintf(buf, PAGE_SIZE, "offline\n");
3469 else
3470 len = snprintf(buf, PAGE_SIZE, "online\n");
3471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472 return len;
3473}
3474
3475/**
3476 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3477 * @dev: device struct
3478 * @buf: buffer
3479 * @count: buffer size
f37eb54b
BK
3480 *
3481 * This function will change the adapter's state.
3482 *
3483 * Return value:
3484 * count on success / other on failure
3485 **/
ee959b00
TJ
3486static ssize_t ipr_store_adapter_state(struct device *dev,
3487 struct device_attribute *attr,
f37eb54b
BK
3488 const char *buf, size_t count)
3489{
ee959b00 3490 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3491 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3492 unsigned long lock_flags;
56d6aa33 3493 int result = count, i;
f37eb54b
BK
3494
3495 if (!capable(CAP_SYS_ADMIN))
3496 return -EACCES;
3497
3498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3499 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3500 !strncmp(buf, "online", 6)) {
3501 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3502 spin_lock(&ioa_cfg->hrrq[i]._lock);
3503 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3504 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3505 }
3506 wmb();
f37eb54b
BK
3507 ioa_cfg->reset_retries = 0;
3508 ioa_cfg->in_ioa_bringdown = 0;
3509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3510 }
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3513
3514 return result;
3515}
3516
ee959b00 3517static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3518 .attr = {
49dd0961 3519 .name = "online_state",
f37eb54b
BK
3520 .mode = S_IRUGO | S_IWUSR,
3521 },
3522 .show = ipr_show_adapter_state,
3523 .store = ipr_store_adapter_state
3524};
3525
1da177e4
LT
3526/**
3527 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3528 * @dev: device struct
3529 * @buf: buffer
3530 * @count: buffer size
1da177e4
LT
3531 *
3532 * This function will reset the adapter.
3533 *
3534 * Return value:
3535 * count on success / other on failure
3536 **/
ee959b00
TJ
3537static ssize_t ipr_store_reset_adapter(struct device *dev,
3538 struct device_attribute *attr,
1da177e4
LT
3539 const char *buf, size_t count)
3540{
ee959b00 3541 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3543 unsigned long lock_flags;
3544 int result = count;
3545
3546 if (!capable(CAP_SYS_ADMIN))
3547 return -EACCES;
3548
3549 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3550 if (!ioa_cfg->in_reset_reload)
3551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3553 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3554
3555 return result;
3556}
3557
ee959b00 3558static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3559 .attr = {
3560 .name = "reset_host",
3561 .mode = S_IWUSR,
3562 },
3563 .store = ipr_store_reset_adapter
3564};
3565
b53d124a 3566static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3567 /**
3568 * ipr_show_iopoll_weight - Show ipr polling mode
3569 * @dev: class device struct
3570 * @buf: buffer
3571 *
3572 * Return value:
3573 * number of bytes printed to buffer
3574 **/
3575static ssize_t ipr_show_iopoll_weight(struct device *dev,
3576 struct device_attribute *attr, char *buf)
3577{
3578 struct Scsi_Host *shost = class_to_shost(dev);
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3580 unsigned long lock_flags = 0;
3581 int len;
3582
3583 spin_lock_irqsave(shost->host_lock, lock_flags);
3584 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3585 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3586
3587 return len;
3588}
3589
3590/**
3591 * ipr_store_iopoll_weight - Change the adapter's polling mode
3592 * @dev: class device struct
3593 * @buf: buffer
3594 *
3595 * Return value:
3596 * number of bytes printed to buffer
3597 **/
3598static ssize_t ipr_store_iopoll_weight(struct device *dev,
3599 struct device_attribute *attr,
3600 const char *buf, size_t count)
3601{
3602 struct Scsi_Host *shost = class_to_shost(dev);
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3604 unsigned long user_iopoll_weight;
3605 unsigned long lock_flags = 0;
3606 int i;
3607
3608 if (!ioa_cfg->sis64) {
3609 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3610 return -EINVAL;
3611 }
3612 if (kstrtoul(buf, 10, &user_iopoll_weight))
3613 return -EINVAL;
3614
3615 if (user_iopoll_weight > 256) {
3616 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3617 return -EINVAL;
3618 }
3619
3620 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3621 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3622 return strlen(buf);
3623 }
3624
3625 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3626 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3627 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3628 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3629 }
3630
3631 spin_lock_irqsave(shost->host_lock, lock_flags);
3632 ioa_cfg->iopoll_weight = user_iopoll_weight;
3633 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3634 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3635 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3636 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3637 ioa_cfg->iopoll_weight, ipr_iopoll);
3638 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3639 }
3640 }
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643 return strlen(buf);
3644}
3645
3646static struct device_attribute ipr_iopoll_weight_attr = {
3647 .attr = {
3648 .name = "iopoll_weight",
3649 .mode = S_IRUGO | S_IWUSR,
3650 },
3651 .show = ipr_show_iopoll_weight,
3652 .store = ipr_store_iopoll_weight
3653};
3654
1da177e4
LT
3655/**
3656 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3657 * @buf_len: buffer length
3658 *
3659 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3660 * list to use for microcode download
3661 *
3662 * Return value:
3663 * pointer to sglist / NULL on failure
3664 **/
3665static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3666{
3667 int sg_size, order, bsize_elem, num_elem, i, j;
3668 struct ipr_sglist *sglist;
3669 struct scatterlist *scatterlist;
3670 struct page *page;
3671
3672 /* Get the minimum size per scatter/gather element */
3673 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3674
3675 /* Get the actual size per element */
3676 order = get_order(sg_size);
3677
3678 /* Determine the actual number of bytes per element */
3679 bsize_elem = PAGE_SIZE * (1 << order);
3680
3681 /* Determine the actual number of sg entries needed */
3682 if (buf_len % bsize_elem)
3683 num_elem = (buf_len / bsize_elem) + 1;
3684 else
3685 num_elem = buf_len / bsize_elem;
3686
3687 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3688 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3689 (sizeof(struct scatterlist) * (num_elem - 1)),
3690 GFP_KERNEL);
3691
3692 if (sglist == NULL) {
3693 ipr_trace;
3694 return NULL;
3695 }
3696
1da177e4 3697 scatterlist = sglist->scatterlist;
45711f1a 3698 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3699
3700 sglist->order = order;
3701 sglist->num_sg = num_elem;
3702
3703 /* Allocate a bunch of sg elements */
3704 for (i = 0; i < num_elem; i++) {
3705 page = alloc_pages(GFP_KERNEL, order);
3706 if (!page) {
3707 ipr_trace;
3708
3709 /* Free up what we already allocated */
3710 for (j = i - 1; j >= 0; j--)
45711f1a 3711 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3712 kfree(sglist);
3713 return NULL;
3714 }
3715
642f1490 3716 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3717 }
3718
3719 return sglist;
3720}
3721
3722/**
3723 * ipr_free_ucode_buffer - Frees a microcode download buffer
3724 * @p_dnld: scatter/gather list pointer
3725 *
3726 * Free a DMA'able ucode download buffer previously allocated with
3727 * ipr_alloc_ucode_buffer
3728 *
3729 * Return value:
3730 * nothing
3731 **/
3732static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3733{
3734 int i;
3735
3736 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3737 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3738
3739 kfree(sglist);
3740}
3741
3742/**
3743 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3744 * @sglist: scatter/gather list pointer
3745 * @buffer: buffer pointer
3746 * @len: buffer length
3747 *
3748 * Copy a microcode image from a user buffer into a buffer allocated by
3749 * ipr_alloc_ucode_buffer
3750 *
3751 * Return value:
3752 * 0 on success / other on failure
3753 **/
3754static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3755 u8 *buffer, u32 len)
3756{
3757 int bsize_elem, i, result = 0;
3758 struct scatterlist *scatterlist;
3759 void *kaddr;
3760
3761 /* Determine the actual number of bytes per element */
3762 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3763
3764 scatterlist = sglist->scatterlist;
3765
3766 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3767 struct page *page = sg_page(&scatterlist[i]);
3768
3769 kaddr = kmap(page);
1da177e4 3770 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3771 kunmap(page);
1da177e4
LT
3772
3773 scatterlist[i].length = bsize_elem;
3774
3775 if (result != 0) {
3776 ipr_trace;
3777 return result;
3778 }
3779 }
3780
3781 if (len % bsize_elem) {
45711f1a
JA
3782 struct page *page = sg_page(&scatterlist[i]);
3783
3784 kaddr = kmap(page);
1da177e4 3785 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3786 kunmap(page);
1da177e4
LT
3787
3788 scatterlist[i].length = len % bsize_elem;
3789 }
3790
3791 sglist->buffer_len = len;
3792 return result;
3793}
3794
a32c055f
WB
3795/**
3796 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3797 * @ipr_cmd: ipr command struct
3798 * @sglist: scatter/gather list
3799 *
3800 * Builds a microcode download IOA data list (IOADL).
3801 *
3802 **/
3803static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3804 struct ipr_sglist *sglist)
3805{
3806 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3807 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3808 struct scatterlist *scatterlist = sglist->scatterlist;
3809 int i;
3810
3811 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3812 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3813 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3814
3815 ioarcb->ioadl_len =
3816 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3817 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3818 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3819 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3820 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3821 }
3822
3823 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3824}
3825
1da177e4 3826/**
12baa420 3827 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3828 * @ipr_cmd: ipr command struct
3829 * @sglist: scatter/gather list
1da177e4 3830 *
12baa420 3831 * Builds a microcode download IOA data list (IOADL).
1da177e4 3832 *
1da177e4 3833 **/
12baa420
BK
3834static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3835 struct ipr_sglist *sglist)
1da177e4 3836{
1da177e4 3837 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3838 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3839 struct scatterlist *scatterlist = sglist->scatterlist;
3840 int i;
3841
12baa420 3842 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3843 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3844 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3845
3846 ioarcb->ioadl_len =
1da177e4
LT
3847 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3848
3849 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3850 ioadl[i].flags_and_data_len =
3851 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3852 ioadl[i].address =
3853 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3854 }
3855
12baa420
BK
3856 ioadl[i-1].flags_and_data_len |=
3857 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3858}
3859
3860/**
3861 * ipr_update_ioa_ucode - Update IOA's microcode
3862 * @ioa_cfg: ioa config struct
3863 * @sglist: scatter/gather list
3864 *
3865 * Initiate an adapter reset to update the IOA's microcode
3866 *
3867 * Return value:
3868 * 0 on success / -EIO on failure
3869 **/
3870static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3871 struct ipr_sglist *sglist)
3872{
3873 unsigned long lock_flags;
3874
3875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3876 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3878 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3880 }
12baa420
BK
3881
3882 if (ioa_cfg->ucode_sglist) {
3883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3884 dev_err(&ioa_cfg->pdev->dev,
3885 "Microcode download already in progress\n");
3886 return -EIO;
1da177e4 3887 }
12baa420
BK
3888
3889 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3890 sglist->num_sg, DMA_TO_DEVICE);
3891
3892 if (!sglist->num_dma_sg) {
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 dev_err(&ioa_cfg->pdev->dev,
3895 "Failed to map microcode download buffer!\n");
1da177e4
LT
3896 return -EIO;
3897 }
3898
12baa420
BK
3899 ioa_cfg->ucode_sglist = sglist;
3900 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3902 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3903
3904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3905 ioa_cfg->ucode_sglist = NULL;
3906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3907 return 0;
3908}
3909
3910/**
3911 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3912 * @class_dev: device struct
3913 * @buf: buffer
3914 * @count: buffer size
1da177e4
LT
3915 *
3916 * This function will update the firmware on the adapter.
3917 *
3918 * Return value:
3919 * count on success / other on failure
3920 **/
ee959b00
TJ
3921static ssize_t ipr_store_update_fw(struct device *dev,
3922 struct device_attribute *attr,
3923 const char *buf, size_t count)
1da177e4 3924{
ee959b00 3925 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3926 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3927 struct ipr_ucode_image_header *image_hdr;
3928 const struct firmware *fw_entry;
3929 struct ipr_sglist *sglist;
1da177e4
LT
3930 char fname[100];
3931 char *src;
3932 int len, result, dnld_size;
3933
3934 if (!capable(CAP_SYS_ADMIN))
3935 return -EACCES;
3936
3937 len = snprintf(fname, 99, "%s", buf);
3938 fname[len-1] = '\0';
3939
203fa3fe 3940 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3941 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3942 return -EIO;
3943 }
3944
3945 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3946
1da177e4
LT
3947 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3948 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3949 sglist = ipr_alloc_ucode_buffer(dnld_size);
3950
3951 if (!sglist) {
3952 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3953 release_firmware(fw_entry);
3954 return -ENOMEM;
3955 }
3956
3957 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3958
3959 if (result) {
3960 dev_err(&ioa_cfg->pdev->dev,
3961 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3962 goto out;
1da177e4
LT
3963 }
3964
14ed9cc7
WB
3965 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3966
12baa420 3967 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3968
12baa420
BK
3969 if (!result)
3970 result = count;
3971out:
1da177e4
LT
3972 ipr_free_ucode_buffer(sglist);
3973 release_firmware(fw_entry);
12baa420 3974 return result;
1da177e4
LT
3975}
3976
ee959b00 3977static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3978 .attr = {
3979 .name = "update_fw",
3980 .mode = S_IWUSR,
3981 },
3982 .store = ipr_store_update_fw
3983};
3984
75576bb9
WB
3985/**
3986 * ipr_show_fw_type - Show the adapter's firmware type.
3987 * @dev: class device struct
3988 * @buf: buffer
3989 *
3990 * Return value:
3991 * number of bytes printed to buffer
3992 **/
3993static ssize_t ipr_show_fw_type(struct device *dev,
3994 struct device_attribute *attr, char *buf)
3995{
3996 struct Scsi_Host *shost = class_to_shost(dev);
3997 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3998 unsigned long lock_flags = 0;
3999 int len;
4000
4001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4002 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004 return len;
4005}
4006
4007static struct device_attribute ipr_ioa_fw_type_attr = {
4008 .attr = {
4009 .name = "fw_type",
4010 .mode = S_IRUGO,
4011 },
4012 .show = ipr_show_fw_type
4013};
4014
ee959b00 4015static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4016 &ipr_fw_version_attr,
4017 &ipr_log_level_attr,
4018 &ipr_diagnostics_attr,
f37eb54b 4019 &ipr_ioa_state_attr,
1da177e4
LT
4020 &ipr_ioa_reset_attr,
4021 &ipr_update_fw_attr,
75576bb9 4022 &ipr_ioa_fw_type_attr,
b53d124a 4023 &ipr_iopoll_weight_attr,
1da177e4
LT
4024 NULL,
4025};
4026
4027#ifdef CONFIG_SCSI_IPR_DUMP
4028/**
4029 * ipr_read_dump - Dump the adapter
2c3c8bea 4030 * @filp: open sysfs file
1da177e4 4031 * @kobj: kobject struct
91a69029 4032 * @bin_attr: bin_attribute struct
1da177e4
LT
4033 * @buf: buffer
4034 * @off: offset
4035 * @count: buffer size
4036 *
4037 * Return value:
4038 * number of bytes printed to buffer
4039 **/
2c3c8bea 4040static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4041 struct bin_attribute *bin_attr,
4042 char *buf, loff_t off, size_t count)
1da177e4 4043{
ee959b00 4044 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4045 struct Scsi_Host *shost = class_to_shost(cdev);
4046 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4047 struct ipr_dump *dump;
4048 unsigned long lock_flags = 0;
4049 char *src;
4d4dd706 4050 int len, sdt_end;
1da177e4
LT
4051 size_t rc = count;
4052
4053 if (!capable(CAP_SYS_ADMIN))
4054 return -EACCES;
4055
4056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057 dump = ioa_cfg->dump;
4058
4059 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061 return 0;
4062 }
4063 kref_get(&dump->kref);
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4065
4066 if (off > dump->driver_dump.hdr.len) {
4067 kref_put(&dump->kref, ipr_release_dump);
4068 return 0;
4069 }
4070
4071 if (off + count > dump->driver_dump.hdr.len) {
4072 count = dump->driver_dump.hdr.len - off;
4073 rc = count;
4074 }
4075
4076 if (count && off < sizeof(dump->driver_dump)) {
4077 if (off + count > sizeof(dump->driver_dump))
4078 len = sizeof(dump->driver_dump) - off;
4079 else
4080 len = count;
4081 src = (u8 *)&dump->driver_dump + off;
4082 memcpy(buf, src, len);
4083 buf += len;
4084 off += len;
4085 count -= len;
4086 }
4087
4088 off -= sizeof(dump->driver_dump);
4089
4d4dd706
KSS
4090 if (ioa_cfg->sis64)
4091 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4092 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4093 sizeof(struct ipr_sdt_entry));
4094 else
4095 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4096 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4097
4098 if (count && off < sdt_end) {
4099 if (off + count > sdt_end)
4100 len = sdt_end - off;
1da177e4
LT
4101 else
4102 len = count;
4103 src = (u8 *)&dump->ioa_dump + off;
4104 memcpy(buf, src, len);
4105 buf += len;
4106 off += len;
4107 count -= len;
4108 }
4109
4d4dd706 4110 off -= sdt_end;
1da177e4
LT
4111
4112 while (count) {
4113 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4114 len = PAGE_ALIGN(off) - off;
4115 else
4116 len = count;
4117 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4118 src += off & ~PAGE_MASK;
4119 memcpy(buf, src, len);
4120 buf += len;
4121 off += len;
4122 count -= len;
4123 }
4124
4125 kref_put(&dump->kref, ipr_release_dump);
4126 return rc;
4127}
4128
4129/**
4130 * ipr_alloc_dump - Prepare for adapter dump
4131 * @ioa_cfg: ioa config struct
4132 *
4133 * Return value:
4134 * 0 on success / other on failure
4135 **/
4136static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4137{
4138 struct ipr_dump *dump;
4d4dd706 4139 __be32 **ioa_data;
1da177e4
LT
4140 unsigned long lock_flags = 0;
4141
0bc42e35 4142 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4143
4144 if (!dump) {
4145 ipr_err("Dump memory allocation failed\n");
4146 return -ENOMEM;
4147 }
4148
4d4dd706
KSS
4149 if (ioa_cfg->sis64)
4150 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4151 else
4152 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4153
4154 if (!ioa_data) {
4155 ipr_err("Dump memory allocation failed\n");
4156 kfree(dump);
4157 return -ENOMEM;
4158 }
4159
4160 dump->ioa_dump.ioa_data = ioa_data;
4161
1da177e4
LT
4162 kref_init(&dump->kref);
4163 dump->ioa_cfg = ioa_cfg;
4164
4165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4166
4167 if (INACTIVE != ioa_cfg->sdt_state) {
4168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4169 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4170 kfree(dump);
4171 return 0;
4172 }
4173
4174 ioa_cfg->dump = dump;
4175 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4176 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4177 ioa_cfg->dump_taken = 1;
4178 schedule_work(&ioa_cfg->work_q);
4179 }
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4181
1da177e4
LT
4182 return 0;
4183}
4184
4185/**
4186 * ipr_free_dump - Free adapter dump memory
4187 * @ioa_cfg: ioa config struct
4188 *
4189 * Return value:
4190 * 0 on success / other on failure
4191 **/
4192static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4193{
4194 struct ipr_dump *dump;
4195 unsigned long lock_flags = 0;
4196
4197 ENTER;
4198
4199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4200 dump = ioa_cfg->dump;
4201 if (!dump) {
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4203 return 0;
4204 }
4205
4206 ioa_cfg->dump = NULL;
4207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4208
4209 kref_put(&dump->kref, ipr_release_dump);
4210
4211 LEAVE;
4212 return 0;
4213}
4214
4215/**
4216 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4217 * @filp: open sysfs file
1da177e4 4218 * @kobj: kobject struct
91a69029 4219 * @bin_attr: bin_attribute struct
1da177e4
LT
4220 * @buf: buffer
4221 * @off: offset
4222 * @count: buffer size
4223 *
4224 * Return value:
4225 * number of bytes printed to buffer
4226 **/
2c3c8bea 4227static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4228 struct bin_attribute *bin_attr,
4229 char *buf, loff_t off, size_t count)
1da177e4 4230{
ee959b00 4231 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4232 struct Scsi_Host *shost = class_to_shost(cdev);
4233 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4234 int rc;
4235
4236 if (!capable(CAP_SYS_ADMIN))
4237 return -EACCES;
4238
4239 if (buf[0] == '1')
4240 rc = ipr_alloc_dump(ioa_cfg);
4241 else if (buf[0] == '0')
4242 rc = ipr_free_dump(ioa_cfg);
4243 else
4244 return -EINVAL;
4245
4246 if (rc)
4247 return rc;
4248 else
4249 return count;
4250}
4251
4252static struct bin_attribute ipr_dump_attr = {
4253 .attr = {
4254 .name = "dump",
4255 .mode = S_IRUSR | S_IWUSR,
4256 },
4257 .size = 0,
4258 .read = ipr_read_dump,
4259 .write = ipr_write_dump
4260};
4261#else
4262static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4263#endif
4264
4265/**
4266 * ipr_change_queue_depth - Change the device's queue depth
4267 * @sdev: scsi device struct
4268 * @qdepth: depth to set
e881a172 4269 * @reason: calling context
1da177e4
LT
4270 *
4271 * Return value:
4272 * actual depth set
4273 **/
e881a172
MC
4274static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4275 int reason)
1da177e4 4276{
35a39691
BK
4277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4278 struct ipr_resource_entry *res;
4279 unsigned long lock_flags = 0;
4280
e881a172
MC
4281 if (reason != SCSI_QDEPTH_DEFAULT)
4282 return -EOPNOTSUPP;
4283
35a39691
BK
4284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4285 res = (struct ipr_resource_entry *)sdev->hostdata;
4286
4287 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4288 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4290
1da177e4
LT
4291 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4292 return sdev->queue_depth;
4293}
4294
4295/**
4296 * ipr_change_queue_type - Change the device's queue type
4297 * @dsev: scsi device struct
4298 * @tag_type: type of tags to use
4299 *
4300 * Return value:
4301 * actual queue type set
4302 **/
4303static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4304{
4305 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4306 struct ipr_resource_entry *res;
4307 unsigned long lock_flags = 0;
4308
4309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4310 res = (struct ipr_resource_entry *)sdev->hostdata;
4311
4312 if (res) {
4313 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4314 /*
4315 * We don't bother quiescing the device here since the
4316 * adapter firmware does it for us.
4317 */
4318 scsi_set_tag_type(sdev, tag_type);
4319
4320 if (tag_type)
4321 scsi_activate_tcq(sdev, sdev->queue_depth);
4322 else
4323 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4324 } else
4325 tag_type = 0;
4326 } else
4327 tag_type = 0;
4328
4329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4330 return tag_type;
4331}
4332
4333/**
4334 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4335 * @dev: device struct
46d74563 4336 * @attr: device attribute structure
1da177e4
LT
4337 * @buf: buffer
4338 *
4339 * Return value:
4340 * number of bytes printed to buffer
4341 **/
10523b3b 4342static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4343{
4344 struct scsi_device *sdev = to_scsi_device(dev);
4345 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4346 struct ipr_resource_entry *res;
4347 unsigned long lock_flags = 0;
4348 ssize_t len = -ENXIO;
4349
4350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4351 res = (struct ipr_resource_entry *)sdev->hostdata;
4352 if (res)
3e7ebdfa 4353 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4355 return len;
4356}
4357
4358static struct device_attribute ipr_adapter_handle_attr = {
4359 .attr = {
4360 .name = "adapter_handle",
4361 .mode = S_IRUSR,
4362 },
4363 .show = ipr_show_adapter_handle
4364};
4365
3e7ebdfa 4366/**
5adcbeb3
WB
4367 * ipr_show_resource_path - Show the resource path or the resource address for
4368 * this device.
3e7ebdfa 4369 * @dev: device struct
46d74563 4370 * @attr: device attribute structure
3e7ebdfa
WB
4371 * @buf: buffer
4372 *
4373 * Return value:
4374 * number of bytes printed to buffer
4375 **/
4376static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4377{
4378 struct scsi_device *sdev = to_scsi_device(dev);
4379 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4380 struct ipr_resource_entry *res;
4381 unsigned long lock_flags = 0;
4382 ssize_t len = -ENXIO;
4383 char buffer[IPR_MAX_RES_PATH_LENGTH];
4384
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4387 if (res && ioa_cfg->sis64)
3e7ebdfa 4388 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4389 __ipr_format_res_path(res->res_path, buffer,
4390 sizeof(buffer)));
5adcbeb3
WB
4391 else if (res)
4392 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4393 res->bus, res->target, res->lun);
4394
3e7ebdfa
WB
4395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396 return len;
4397}
4398
4399static struct device_attribute ipr_resource_path_attr = {
4400 .attr = {
4401 .name = "resource_path",
75576bb9 4402 .mode = S_IRUGO,
3e7ebdfa
WB
4403 },
4404 .show = ipr_show_resource_path
4405};
4406
46d74563
WB
4407/**
4408 * ipr_show_device_id - Show the device_id for this device.
4409 * @dev: device struct
4410 * @attr: device attribute structure
4411 * @buf: buffer
4412 *
4413 * Return value:
4414 * number of bytes printed to buffer
4415 **/
4416static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4417{
4418 struct scsi_device *sdev = to_scsi_device(dev);
4419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4420 struct ipr_resource_entry *res;
4421 unsigned long lock_flags = 0;
4422 ssize_t len = -ENXIO;
4423
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425 res = (struct ipr_resource_entry *)sdev->hostdata;
4426 if (res && ioa_cfg->sis64)
4427 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4428 else if (res)
4429 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4430
4431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432 return len;
4433}
4434
4435static struct device_attribute ipr_device_id_attr = {
4436 .attr = {
4437 .name = "device_id",
4438 .mode = S_IRUGO,
4439 },
4440 .show = ipr_show_device_id
4441};
4442
75576bb9
WB
4443/**
4444 * ipr_show_resource_type - Show the resource type for this device.
4445 * @dev: device struct
46d74563 4446 * @attr: device attribute structure
75576bb9
WB
4447 * @buf: buffer
4448 *
4449 * Return value:
4450 * number of bytes printed to buffer
4451 **/
4452static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4453{
4454 struct scsi_device *sdev = to_scsi_device(dev);
4455 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4456 struct ipr_resource_entry *res;
4457 unsigned long lock_flags = 0;
4458 ssize_t len = -ENXIO;
4459
4460 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4461 res = (struct ipr_resource_entry *)sdev->hostdata;
4462
4463 if (res)
4464 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4465
4466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4467 return len;
4468}
4469
4470static struct device_attribute ipr_resource_type_attr = {
4471 .attr = {
4472 .name = "resource_type",
4473 .mode = S_IRUGO,
4474 },
4475 .show = ipr_show_resource_type
4476};
4477
1da177e4
LT
4478static struct device_attribute *ipr_dev_attrs[] = {
4479 &ipr_adapter_handle_attr,
3e7ebdfa 4480 &ipr_resource_path_attr,
46d74563 4481 &ipr_device_id_attr,
75576bb9 4482 &ipr_resource_type_attr,
1da177e4
LT
4483 NULL,
4484};
4485
4486/**
4487 * ipr_biosparam - Return the HSC mapping
4488 * @sdev: scsi device struct
4489 * @block_device: block device pointer
4490 * @capacity: capacity of the device
4491 * @parm: Array containing returned HSC values.
4492 *
4493 * This function generates the HSC parms that fdisk uses.
4494 * We want to make sure we return something that places partitions
4495 * on 4k boundaries for best performance with the IOA.
4496 *
4497 * Return value:
4498 * 0 on success
4499 **/
4500static int ipr_biosparam(struct scsi_device *sdev,
4501 struct block_device *block_device,
4502 sector_t capacity, int *parm)
4503{
4504 int heads, sectors;
4505 sector_t cylinders;
4506
4507 heads = 128;
4508 sectors = 32;
4509
4510 cylinders = capacity;
4511 sector_div(cylinders, (128 * 32));
4512
4513 /* return result */
4514 parm[0] = heads;
4515 parm[1] = sectors;
4516 parm[2] = cylinders;
4517
4518 return 0;
4519}
4520
35a39691
BK
4521/**
4522 * ipr_find_starget - Find target based on bus/target.
4523 * @starget: scsi target struct
4524 *
4525 * Return value:
4526 * resource entry pointer if found / NULL if not found
4527 **/
4528static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4529{
4530 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4532 struct ipr_resource_entry *res;
4533
4534 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4535 if ((res->bus == starget->channel) &&
0ee1d714 4536 (res->target == starget->id)) {
35a39691
BK
4537 return res;
4538 }
4539 }
4540
4541 return NULL;
4542}
4543
4544static struct ata_port_info sata_port_info;
4545
4546/**
4547 * ipr_target_alloc - Prepare for commands to a SCSI target
4548 * @starget: scsi target struct
4549 *
4550 * If the device is a SATA device, this function allocates an
4551 * ATA port with libata, else it does nothing.
4552 *
4553 * Return value:
4554 * 0 on success / non-0 on failure
4555 **/
4556static int ipr_target_alloc(struct scsi_target *starget)
4557{
4558 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4559 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4560 struct ipr_sata_port *sata_port;
4561 struct ata_port *ap;
4562 struct ipr_resource_entry *res;
4563 unsigned long lock_flags;
4564
4565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4566 res = ipr_find_starget(starget);
4567 starget->hostdata = NULL;
4568
4569 if (res && ipr_is_gata(res)) {
4570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4571 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4572 if (!sata_port)
4573 return -ENOMEM;
4574
4575 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4576 if (ap) {
4577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4578 sata_port->ioa_cfg = ioa_cfg;
4579 sata_port->ap = ap;
4580 sata_port->res = res;
4581
4582 res->sata_port = sata_port;
4583 ap->private_data = sata_port;
4584 starget->hostdata = sata_port;
4585 } else {
4586 kfree(sata_port);
4587 return -ENOMEM;
4588 }
4589 }
4590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4591
4592 return 0;
4593}
4594
4595/**
4596 * ipr_target_destroy - Destroy a SCSI target
4597 * @starget: scsi target struct
4598 *
4599 * If the device was a SATA device, this function frees the libata
4600 * ATA port, else it does nothing.
4601 *
4602 **/
4603static void ipr_target_destroy(struct scsi_target *starget)
4604{
4605 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4606 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4607 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4608
4609 if (ioa_cfg->sis64) {
0ee1d714
BK
4610 if (!ipr_find_starget(starget)) {
4611 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4612 clear_bit(starget->id, ioa_cfg->array_ids);
4613 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4614 clear_bit(starget->id, ioa_cfg->vset_ids);
4615 else if (starget->channel == 0)
4616 clear_bit(starget->id, ioa_cfg->target_ids);
4617 }
3e7ebdfa 4618 }
35a39691
BK
4619
4620 if (sata_port) {
4621 starget->hostdata = NULL;
4622 ata_sas_port_destroy(sata_port->ap);
4623 kfree(sata_port);
4624 }
4625}
4626
4627/**
4628 * ipr_find_sdev - Find device based on bus/target/lun.
4629 * @sdev: scsi device struct
4630 *
4631 * Return value:
4632 * resource entry pointer if found / NULL if not found
4633 **/
4634static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4635{
4636 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4637 struct ipr_resource_entry *res;
4638
4639 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4640 if ((res->bus == sdev->channel) &&
4641 (res->target == sdev->id) &&
4642 (res->lun == sdev->lun))
35a39691
BK
4643 return res;
4644 }
4645
4646 return NULL;
4647}
4648
1da177e4
LT
4649/**
4650 * ipr_slave_destroy - Unconfigure a SCSI device
4651 * @sdev: scsi device struct
4652 *
4653 * Return value:
4654 * nothing
4655 **/
4656static void ipr_slave_destroy(struct scsi_device *sdev)
4657{
4658 struct ipr_resource_entry *res;
4659 struct ipr_ioa_cfg *ioa_cfg;
4660 unsigned long lock_flags = 0;
4661
4662 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4663
4664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4665 res = (struct ipr_resource_entry *) sdev->hostdata;
4666 if (res) {
35a39691 4667 if (res->sata_port)
3e4ec344 4668 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4669 sdev->hostdata = NULL;
4670 res->sdev = NULL;
35a39691 4671 res->sata_port = NULL;
1da177e4
LT
4672 }
4673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674}
4675
4676/**
4677 * ipr_slave_configure - Configure a SCSI device
4678 * @sdev: scsi device struct
4679 *
4680 * This function configures the specified scsi device.
4681 *
4682 * Return value:
4683 * 0 on success
4684 **/
4685static int ipr_slave_configure(struct scsi_device *sdev)
4686{
4687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4688 struct ipr_resource_entry *res;
dd406ef8 4689 struct ata_port *ap = NULL;
1da177e4 4690 unsigned long lock_flags = 0;
3e7ebdfa 4691 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4692
4693 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4694 res = sdev->hostdata;
4695 if (res) {
4696 if (ipr_is_af_dasd_device(res))
4697 sdev->type = TYPE_RAID;
0726ce26 4698 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4699 sdev->scsi_level = 4;
0726ce26
BK
4700 sdev->no_uld_attach = 1;
4701 }
1da177e4 4702 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4703 blk_queue_rq_timeout(sdev->request_queue,
4704 IPR_VSET_RW_TIMEOUT);
086fa5ff 4705 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4706 }
dd406ef8
BK
4707 if (ipr_is_gata(res) && res->sata_port)
4708 ap = res->sata_port->ap;
4709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4710
4711 if (ap) {
35a39691 4712 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4713 ata_sas_slave_configure(sdev, ap);
4714 } else
35a39691 4715 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4716 if (ioa_cfg->sis64)
4717 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4718 ipr_format_res_path(ioa_cfg,
4719 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4720 return 0;
1da177e4
LT
4721 }
4722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723 return 0;
4724}
4725
35a39691
BK
4726/**
4727 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4728 * @sdev: scsi device struct
4729 *
4730 * This function initializes an ATA port so that future commands
4731 * sent through queuecommand will work.
4732 *
4733 * Return value:
4734 * 0 on success
4735 **/
4736static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4737{
4738 struct ipr_sata_port *sata_port = NULL;
4739 int rc = -ENXIO;
4740
4741 ENTER;
4742 if (sdev->sdev_target)
4743 sata_port = sdev->sdev_target->hostdata;
b2024459 4744 if (sata_port) {
35a39691 4745 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4746 if (rc == 0)
4747 rc = ata_sas_sync_probe(sata_port->ap);
4748 }
4749
35a39691
BK
4750 if (rc)
4751 ipr_slave_destroy(sdev);
4752
4753 LEAVE;
4754 return rc;
4755}
4756
1da177e4
LT
4757/**
4758 * ipr_slave_alloc - Prepare for commands to a device.
4759 * @sdev: scsi device struct
4760 *
4761 * This function saves a pointer to the resource entry
4762 * in the scsi device struct if the device exists. We
4763 * can then use this pointer in ipr_queuecommand when
4764 * handling new commands.
4765 *
4766 * Return value:
692aebfc 4767 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4768 **/
4769static int ipr_slave_alloc(struct scsi_device *sdev)
4770{
4771 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4772 struct ipr_resource_entry *res;
4773 unsigned long lock_flags;
692aebfc 4774 int rc = -ENXIO;
1da177e4
LT
4775
4776 sdev->hostdata = NULL;
4777
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779
35a39691
BK
4780 res = ipr_find_sdev(sdev);
4781 if (res) {
4782 res->sdev = sdev;
4783 res->add_to_ml = 0;
4784 res->in_erp = 0;
4785 sdev->hostdata = res;
4786 if (!ipr_is_naca_model(res))
4787 res->needs_sync_complete = 1;
4788 rc = 0;
4789 if (ipr_is_gata(res)) {
4790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4791 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4792 }
4793 }
4794
4795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4796
692aebfc 4797 return rc;
1da177e4
LT
4798}
4799
4800/**
4801 * ipr_eh_host_reset - Reset the host adapter
4802 * @scsi_cmd: scsi command struct
4803 *
4804 * Return value:
4805 * SUCCESS / FAILED
4806 **/
203fa3fe 4807static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4808{
4809 struct ipr_ioa_cfg *ioa_cfg;
4810 int rc;
4811
4812 ENTER;
4813 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4814
a92fa25c
KSS
4815 if (!ioa_cfg->in_reset_reload) {
4816 dev_err(&ioa_cfg->pdev->dev,
4817 "Adapter being reset as a result of error recovery.\n");
1da177e4 4818
a92fa25c
KSS
4819 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4820 ioa_cfg->sdt_state = GET_DUMP;
4821 }
1da177e4
LT
4822
4823 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4824
4825 LEAVE;
4826 return rc;
4827}
4828
203fa3fe 4829static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
df0ae249
JG
4830{
4831 int rc;
4832
4833 spin_lock_irq(cmd->device->host->host_lock);
4834 rc = __ipr_eh_host_reset(cmd);
4835 spin_unlock_irq(cmd->device->host->host_lock);
4836
4837 return rc;
4838}
4839
c6513096
BK
4840/**
4841 * ipr_device_reset - Reset the device
4842 * @ioa_cfg: ioa config struct
4843 * @res: resource entry struct
4844 *
4845 * This function issues a device reset to the affected device.
4846 * If the device is a SCSI device, a LUN reset will be sent
4847 * to the device first. If that does not work, a target reset
35a39691
BK
4848 * will be sent. If the device is a SATA device, a PHY reset will
4849 * be sent.
c6513096
BK
4850 *
4851 * Return value:
4852 * 0 on success / non-zero on failure
4853 **/
4854static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4855 struct ipr_resource_entry *res)
4856{
4857 struct ipr_cmnd *ipr_cmd;
4858 struct ipr_ioarcb *ioarcb;
4859 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4860 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4861 u32 ioasc;
4862
4863 ENTER;
4864 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4865 ioarcb = &ipr_cmd->ioarcb;
4866 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4867
4868 if (ipr_cmd->ioa_cfg->sis64) {
4869 regs = &ipr_cmd->i.ata_ioadl.regs;
4870 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4871 } else
4872 regs = &ioarcb->u.add_data.u.regs;
c6513096 4873
3e7ebdfa 4874 ioarcb->res_handle = res->res_handle;
c6513096
BK
4875 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4876 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4877 if (ipr_is_gata(res)) {
4878 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4879 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4880 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4881 }
c6513096
BK
4882
4883 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4884 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4885 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4886 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4887 if (ipr_cmd->ioa_cfg->sis64)
4888 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4889 sizeof(struct ipr_ioasa_gata));
4890 else
4891 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4892 sizeof(struct ipr_ioasa_gata));
4893 }
c6513096
BK
4894
4895 LEAVE;
203fa3fe 4896 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4897}
4898
35a39691
BK
4899/**
4900 * ipr_sata_reset - Reset the SATA port
cc0680a5 4901 * @link: SATA link to reset
35a39691
BK
4902 * @classes: class of the attached device
4903 *
cc0680a5 4904 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4905 *
4906 * Return value:
4907 * 0 on success / non-zero on failure
4908 **/
cc0680a5 4909static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4910 unsigned long deadline)
35a39691 4911{
cc0680a5 4912 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4913 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4914 struct ipr_resource_entry *res;
4915 unsigned long lock_flags = 0;
4916 int rc = -ENXIO;
4917
4918 ENTER;
4919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4920 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
4921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4922 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4924 }
4925
35a39691
BK
4926 res = sata_port->res;
4927 if (res) {
4928 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4929 *classes = res->ata_class;
35a39691
BK
4930 }
4931
4932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4933 LEAVE;
4934 return rc;
4935}
4936
1da177e4
LT
4937/**
4938 * ipr_eh_dev_reset - Reset the device
4939 * @scsi_cmd: scsi command struct
4940 *
4941 * This function issues a device reset to the affected device.
4942 * A LUN reset will be sent to the device first. If that does
4943 * not work, a target reset will be sent.
4944 *
4945 * Return value:
4946 * SUCCESS / FAILED
4947 **/
203fa3fe 4948static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4949{
4950 struct ipr_cmnd *ipr_cmd;
4951 struct ipr_ioa_cfg *ioa_cfg;
4952 struct ipr_resource_entry *res;
35a39691
BK
4953 struct ata_port *ap;
4954 int rc = 0;
05a6538a 4955 struct ipr_hrr_queue *hrrq;
1da177e4
LT
4956
4957 ENTER;
4958 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4959 res = scsi_cmd->device->hostdata;
4960
eeb88307 4961 if (!res)
1da177e4
LT
4962 return FAILED;
4963
4964 /*
4965 * If we are currently going through reset/reload, return failed. This will force the
4966 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4967 * reset to complete
4968 */
4969 if (ioa_cfg->in_reset_reload)
4970 return FAILED;
56d6aa33 4971 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
4972 return FAILED;
4973
05a6538a 4974 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 4975 spin_lock(&hrrq->_lock);
05a6538a 4976 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4977 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4978 if (ipr_cmd->scsi_cmd)
4979 ipr_cmd->done = ipr_scsi_eh_done;
4980 if (ipr_cmd->qc)
4981 ipr_cmd->done = ipr_sata_eh_done;
4982 if (ipr_cmd->qc &&
4983 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4984 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4985 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4986 }
7402ecef 4987 }
1da177e4 4988 }
56d6aa33 4989 spin_unlock(&hrrq->_lock);
1da177e4 4990 }
1da177e4 4991 res->resetting_device = 1;
fb3ed3cb 4992 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4993
4994 if (ipr_is_gata(res) && res->sata_port) {
4995 ap = res->sata_port->ap;
4996 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4997 ata_std_error_handler(ap);
35a39691 4998 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 4999
05a6538a 5000 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5001 spin_lock(&hrrq->_lock);
05a6538a 5002 list_for_each_entry(ipr_cmd,
5003 &hrrq->hrrq_pending_q, queue) {
5004 if (ipr_cmd->ioarcb.res_handle ==
5005 res->res_handle) {
5006 rc = -EIO;
5007 break;
5008 }
5af23d26 5009 }
56d6aa33 5010 spin_unlock(&hrrq->_lock);
5af23d26 5011 }
35a39691
BK
5012 } else
5013 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
5014 res->resetting_device = 0;
5015
1da177e4 5016 LEAVE;
203fa3fe 5017 return rc ? FAILED : SUCCESS;
1da177e4
LT
5018}
5019
203fa3fe 5020static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5021{
5022 int rc;
5023
5024 spin_lock_irq(cmd->device->host->host_lock);
5025 rc = __ipr_eh_dev_reset(cmd);
5026 spin_unlock_irq(cmd->device->host->host_lock);
5027
5028 return rc;
5029}
5030
1da177e4
LT
5031/**
5032 * ipr_bus_reset_done - Op done function for bus reset.
5033 * @ipr_cmd: ipr command struct
5034 *
5035 * This function is the op done function for a bus reset
5036 *
5037 * Return value:
5038 * none
5039 **/
5040static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5041{
5042 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5043 struct ipr_resource_entry *res;
5044
5045 ENTER;
3e7ebdfa
WB
5046 if (!ioa_cfg->sis64)
5047 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5048 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5049 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5050 break;
5051 }
1da177e4 5052 }
1da177e4
LT
5053
5054 /*
5055 * If abort has not completed, indicate the reset has, else call the
5056 * abort's done function to wake the sleeping eh thread
5057 */
5058 if (ipr_cmd->sibling->sibling)
5059 ipr_cmd->sibling->sibling = NULL;
5060 else
5061 ipr_cmd->sibling->done(ipr_cmd->sibling);
5062
05a6538a 5063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5064 LEAVE;
5065}
5066
5067/**
5068 * ipr_abort_timeout - An abort task has timed out
5069 * @ipr_cmd: ipr command struct
5070 *
5071 * This function handles when an abort task times out. If this
5072 * happens we issue a bus reset since we have resources tied
5073 * up that must be freed before returning to the midlayer.
5074 *
5075 * Return value:
5076 * none
5077 **/
5078static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5079{
5080 struct ipr_cmnd *reset_cmd;
5081 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5082 struct ipr_cmd_pkt *cmd_pkt;
5083 unsigned long lock_flags = 0;
5084
5085 ENTER;
5086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5087 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5089 return;
5090 }
5091
fb3ed3cb 5092 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5093 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5094 ipr_cmd->sibling = reset_cmd;
5095 reset_cmd->sibling = ipr_cmd;
5096 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5097 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5098 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5099 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5100 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5101
5102 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104 LEAVE;
5105}
5106
5107/**
5108 * ipr_cancel_op - Cancel specified op
5109 * @scsi_cmd: scsi command struct
5110 *
5111 * This function cancels specified op.
5112 *
5113 * Return value:
5114 * SUCCESS / FAILED
5115 **/
203fa3fe 5116static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5117{
5118 struct ipr_cmnd *ipr_cmd;
5119 struct ipr_ioa_cfg *ioa_cfg;
5120 struct ipr_resource_entry *res;
5121 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5122 u32 ioasc, int_reg;
1da177e4 5123 int op_found = 0;
05a6538a 5124 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5125
5126 ENTER;
5127 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5128 res = scsi_cmd->device->hostdata;
5129
8fa728a2
JG
5130 /* If we are currently going through reset/reload, return failed.
5131 * This will force the mid-layer to call ipr_eh_host_reset,
5132 * which will then go to sleep and wait for the reset to complete
5133 */
56d6aa33 5134 if (ioa_cfg->in_reset_reload ||
5135 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5136 return FAILED;
a92fa25c
KSS
5137 if (!res)
5138 return FAILED;
5139
5140 /*
5141 * If we are aborting a timed out op, chances are that the timeout was caused
5142 * by a still not detected EEH error. In such cases, reading a register will
5143 * trigger the EEH recovery infrastructure.
5144 */
5145 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5146
5147 if (!ipr_is_gscsi(res))
1da177e4
LT
5148 return FAILED;
5149
05a6538a 5150 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5151 spin_lock(&hrrq->_lock);
05a6538a 5152 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5153 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5154 ipr_cmd->done = ipr_scsi_eh_done;
5155 op_found = 1;
5156 break;
5157 }
1da177e4 5158 }
56d6aa33 5159 spin_unlock(&hrrq->_lock);
1da177e4
LT
5160 }
5161
5162 if (!op_found)
5163 return SUCCESS;
5164
5165 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5166 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5167 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5168 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5169 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5170 ipr_cmd->u.sdev = scsi_cmd->device;
5171
fb3ed3cb
BK
5172 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5173 scsi_cmd->cmnd[0]);
1da177e4 5174 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5175 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5176
5177 /*
5178 * If the abort task timed out and we sent a bus reset, we will get
5179 * one the following responses to the abort
5180 */
5181 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5182 ioasc = 0;
5183 ipr_trace;
5184 }
5185
05a6538a 5186 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
ee0a90fa
BK
5187 if (!ipr_is_naca_model(res))
5188 res->needs_sync_complete = 1;
1da177e4
LT
5189
5190 LEAVE;
203fa3fe 5191 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5192}
5193
5194/**
5195 * ipr_eh_abort - Abort a single op
5196 * @scsi_cmd: scsi command struct
5197 *
5198 * Return value:
5199 * SUCCESS / FAILED
5200 **/
203fa3fe 5201static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5202{
8fa728a2
JG
5203 unsigned long flags;
5204 int rc;
1da177e4
LT
5205
5206 ENTER;
1da177e4 5207
8fa728a2
JG
5208 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5209 rc = ipr_cancel_op(scsi_cmd);
5210 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
5211
5212 LEAVE;
8fa728a2 5213 return rc;
1da177e4
LT
5214}
5215
5216/**
5217 * ipr_handle_other_interrupt - Handle "other" interrupts
5218 * @ioa_cfg: ioa config struct
634651fa 5219 * @int_reg: interrupt register
1da177e4
LT
5220 *
5221 * Return value:
5222 * IRQ_NONE / IRQ_HANDLED
5223 **/
634651fa 5224static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5225 u32 int_reg)
1da177e4
LT
5226{
5227 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5228 u32 int_mask_reg;
56d6aa33 5229
7dacb64f
WB
5230 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5231 int_reg &= ~int_mask_reg;
5232
5233 /* If an interrupt on the adapter did not occur, ignore it.
5234 * Or in the case of SIS 64, check for a stage change interrupt.
5235 */
5236 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5237 if (ioa_cfg->sis64) {
5238 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5239 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5240 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5241
5242 /* clear stage change */
5243 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5244 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5245 list_del(&ioa_cfg->reset_cmd->queue);
5246 del_timer(&ioa_cfg->reset_cmd->timer);
5247 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5248 return IRQ_HANDLED;
5249 }
5250 }
5251
5252 return IRQ_NONE;
5253 }
1da177e4
LT
5254
5255 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5256 /* Mask the interrupt */
5257 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5258
5259 /* Clear the interrupt */
5260 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5261 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5262
5263 list_del(&ioa_cfg->reset_cmd->queue);
5264 del_timer(&ioa_cfg->reset_cmd->timer);
5265 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5266 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5267 if (ioa_cfg->clear_isr) {
5268 if (ipr_debug && printk_ratelimit())
5269 dev_err(&ioa_cfg->pdev->dev,
5270 "Spurious interrupt detected. 0x%08X\n", int_reg);
5271 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5272 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5273 return IRQ_NONE;
5274 }
1da177e4
LT
5275 } else {
5276 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5277 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5278 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5279 dev_err(&ioa_cfg->pdev->dev,
5280 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5281 else
5282 dev_err(&ioa_cfg->pdev->dev,
5283 "Permanent IOA failure. 0x%08X\n", int_reg);
5284
5285 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5286 ioa_cfg->sdt_state = GET_DUMP;
5287
5288 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5289 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5290 }
56d6aa33 5291
1da177e4
LT
5292 return rc;
5293}
5294
3feeb89d
WB
5295/**
5296 * ipr_isr_eh - Interrupt service routine error handler
5297 * @ioa_cfg: ioa config struct
5298 * @msg: message to log
5299 *
5300 * Return value:
5301 * none
5302 **/
05a6538a 5303static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5304{
5305 ioa_cfg->errors_logged++;
05a6538a 5306 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5307
5308 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5309 ioa_cfg->sdt_state = GET_DUMP;
5310
5311 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5312}
5313
b53d124a 5314static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5315 struct list_head *doneq)
5316{
5317 u32 ioasc;
5318 u16 cmd_index;
5319 struct ipr_cmnd *ipr_cmd;
5320 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5321 int num_hrrq = 0;
5322
5323 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5324 if (!hrr_queue->allow_interrupts)
05a6538a 5325 return 0;
5326
5327 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5328 hrr_queue->toggle_bit) {
5329
5330 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5331 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5332 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5333
5334 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5335 cmd_index < hrr_queue->min_cmd_id)) {
5336 ipr_isr_eh(ioa_cfg,
5337 "Invalid response handle from IOA: ",
5338 cmd_index);
5339 break;
5340 }
5341
5342 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5343 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5344
5345 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5346
5347 list_move_tail(&ipr_cmd->queue, doneq);
5348
5349 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5350 hrr_queue->hrrq_curr++;
5351 } else {
5352 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5353 hrr_queue->toggle_bit ^= 1u;
5354 }
5355 num_hrrq++;
b53d124a 5356 if (budget > 0 && num_hrrq >= budget)
5357 break;
05a6538a 5358 }
b53d124a 5359
05a6538a 5360 return num_hrrq;
5361}
b53d124a 5362
5363static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5364{
5365 struct ipr_ioa_cfg *ioa_cfg;
5366 struct ipr_hrr_queue *hrrq;
5367 struct ipr_cmnd *ipr_cmd, *temp;
5368 unsigned long hrrq_flags;
5369 int completed_ops;
5370 LIST_HEAD(doneq);
5371
5372 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5373 ioa_cfg = hrrq->ioa_cfg;
5374
5375 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5376 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5377
5378 if (completed_ops < budget)
5379 blk_iopoll_complete(iop);
5380 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5381
5382 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5383 list_del(&ipr_cmd->queue);
5384 del_timer(&ipr_cmd->timer);
5385 ipr_cmd->fast_done(ipr_cmd);
5386 }
5387
5388 return completed_ops;
5389}
5390
1da177e4
LT
5391/**
5392 * ipr_isr - Interrupt service routine
5393 * @irq: irq number
5394 * @devp: pointer to ioa config struct
1da177e4
LT
5395 *
5396 * Return value:
5397 * IRQ_NONE / IRQ_HANDLED
5398 **/
7d12e780 5399static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5400{
05a6538a 5401 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5402 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5403 unsigned long hrrq_flags = 0;
7dacb64f 5404 u32 int_reg = 0;
3feeb89d 5405 int num_hrrq = 0;
7dacb64f 5406 int irq_none = 0;
172cd6e1 5407 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5408 irqreturn_t rc = IRQ_NONE;
172cd6e1 5409 LIST_HEAD(doneq);
1da177e4 5410
56d6aa33 5411 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5412 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5413 if (!hrrq->allow_interrupts) {
5414 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5415 return IRQ_NONE;
5416 }
5417
1da177e4 5418 while (1) {
b53d124a 5419 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5420 rc = IRQ_HANDLED;
1da177e4 5421
b53d124a 5422 if (!ioa_cfg->clear_isr)
5423 break;
7dd21308 5424
1da177e4 5425 /* Clear the PCI interrupt */
a5442ba4 5426 num_hrrq = 0;
3feeb89d 5427 do {
b53d124a 5428 writel(IPR_PCII_HRRQ_UPDATED,
5429 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5430 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5431 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5432 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5433
7dacb64f
WB
5434 } else if (rc == IRQ_NONE && irq_none == 0) {
5435 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5436 irq_none++;
a5442ba4
WB
5437 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5438 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5439 ipr_isr_eh(ioa_cfg,
5440 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5441 rc = IRQ_HANDLED;
b53d124a 5442 break;
1da177e4
LT
5443 } else
5444 break;
5445 }
5446
5447 if (unlikely(rc == IRQ_NONE))
634651fa 5448 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5449
56d6aa33 5450 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5451 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5452 list_del(&ipr_cmd->queue);
5453 del_timer(&ipr_cmd->timer);
5454 ipr_cmd->fast_done(ipr_cmd);
5455 }
05a6538a 5456 return rc;
5457}
5458
5459/**
5460 * ipr_isr_mhrrq - Interrupt service routine
5461 * @irq: irq number
5462 * @devp: pointer to ioa config struct
5463 *
5464 * Return value:
5465 * IRQ_NONE / IRQ_HANDLED
5466 **/
5467static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5468{
5469 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5470 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5471 unsigned long hrrq_flags = 0;
05a6538a 5472 struct ipr_cmnd *ipr_cmd, *temp;
5473 irqreturn_t rc = IRQ_NONE;
5474 LIST_HEAD(doneq);
172cd6e1 5475
56d6aa33 5476 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5477
5478 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5479 if (!hrrq->allow_interrupts) {
5480 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5481 return IRQ_NONE;
5482 }
5483
b53d124a 5484 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5485 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5486 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5487 hrrq->toggle_bit) {
5488 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5489 blk_iopoll_sched(&hrrq->iopoll);
5490 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5491 return IRQ_HANDLED;
5492 }
5493 } else {
5494 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5495 hrrq->toggle_bit)
05a6538a 5496
b53d124a 5497 if (ipr_process_hrrq(hrrq, -1, &doneq))
5498 rc = IRQ_HANDLED;
5499 }
05a6538a 5500
56d6aa33 5501 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5502
5503 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5504 list_del(&ipr_cmd->queue);
5505 del_timer(&ipr_cmd->timer);
5506 ipr_cmd->fast_done(ipr_cmd);
5507 }
1da177e4
LT
5508 return rc;
5509}
5510
a32c055f
WB
5511/**
5512 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5513 * @ioa_cfg: ioa config struct
5514 * @ipr_cmd: ipr command struct
5515 *
5516 * Return value:
5517 * 0 on success / -1 on failure
5518 **/
5519static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5520 struct ipr_cmnd *ipr_cmd)
5521{
5522 int i, nseg;
5523 struct scatterlist *sg;
5524 u32 length;
5525 u32 ioadl_flags = 0;
5526 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5527 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5528 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5529
5530 length = scsi_bufflen(scsi_cmd);
5531 if (!length)
5532 return 0;
5533
5534 nseg = scsi_dma_map(scsi_cmd);
5535 if (nseg < 0) {
51f52a47
AB
5536 if (printk_ratelimit())
5537 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
a32c055f
WB
5538 return -1;
5539 }
5540
5541 ipr_cmd->dma_use_sg = nseg;
5542
438b0331 5543 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5544 ioarcb->ioadl_len =
5545 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5546
a32c055f
WB
5547 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5548 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5549 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5550 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5551 ioadl_flags = IPR_IOADL_FLAGS_READ;
5552
5553 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5554 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5555 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5556 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5557 }
5558
5559 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5560 return 0;
5561}
5562
1da177e4
LT
5563/**
5564 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5565 * @ioa_cfg: ioa config struct
5566 * @ipr_cmd: ipr command struct
5567 *
5568 * Return value:
5569 * 0 on success / -1 on failure
5570 **/
5571static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5572 struct ipr_cmnd *ipr_cmd)
5573{
63015bc9
FT
5574 int i, nseg;
5575 struct scatterlist *sg;
1da177e4
LT
5576 u32 length;
5577 u32 ioadl_flags = 0;
5578 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5579 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5580 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5581
63015bc9
FT
5582 length = scsi_bufflen(scsi_cmd);
5583 if (!length)
1da177e4
LT
5584 return 0;
5585
63015bc9
FT
5586 nseg = scsi_dma_map(scsi_cmd);
5587 if (nseg < 0) {
5588 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5589 return -1;
5590 }
51b1c7e1 5591
63015bc9
FT
5592 ipr_cmd->dma_use_sg = nseg;
5593
5594 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5595 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5596 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5597 ioarcb->data_transfer_length = cpu_to_be32(length);
5598 ioarcb->ioadl_len =
63015bc9
FT
5599 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5600 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5601 ioadl_flags = IPR_IOADL_FLAGS_READ;
5602 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5603 ioarcb->read_ioadl_len =
5604 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5605 }
1da177e4 5606
a32c055f
WB
5607 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5608 ioadl = ioarcb->u.add_data.u.ioadl;
5609 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5610 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5611 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5612 }
1da177e4 5613
63015bc9
FT
5614 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5615 ioadl[i].flags_and_data_len =
5616 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5617 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5618 }
5619
63015bc9
FT
5620 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5621 return 0;
1da177e4
LT
5622}
5623
5624/**
5625 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5626 * @scsi_cmd: scsi command struct
5627 *
5628 * Return value:
5629 * task attributes
5630 **/
5631static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5632{
5633 u8 tag[2];
5634 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5635
5636 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5637 switch (tag[0]) {
5638 case MSG_SIMPLE_TAG:
5639 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5640 break;
5641 case MSG_HEAD_TAG:
5642 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5643 break;
5644 case MSG_ORDERED_TAG:
5645 rc = IPR_FLAGS_LO_ORDERED_TASK;
5646 break;
5647 };
5648 }
5649
5650 return rc;
5651}
5652
5653/**
5654 * ipr_erp_done - Process completion of ERP for a device
5655 * @ipr_cmd: ipr command struct
5656 *
5657 * This function copies the sense buffer into the scsi_cmd
5658 * struct and pushes the scsi_done function.
5659 *
5660 * Return value:
5661 * nothing
5662 **/
5663static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5664{
5665 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5666 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5667 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5668
5669 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5670 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5671 scmd_printk(KERN_ERR, scsi_cmd,
5672 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5673 } else {
5674 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5675 SCSI_SENSE_BUFFERSIZE);
5676 }
5677
5678 if (res) {
ee0a90fa
BK
5679 if (!ipr_is_naca_model(res))
5680 res->needs_sync_complete = 1;
1da177e4
LT
5681 res->in_erp = 0;
5682 }
63015bc9 5683 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5684 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5685 scsi_cmd->scsi_done(scsi_cmd);
5686}
5687
5688/**
5689 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5690 * @ipr_cmd: ipr command struct
5691 *
5692 * Return value:
5693 * none
5694 **/
5695static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5696{
51b1c7e1 5697 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5698 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5699 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5700
5701 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5702 ioarcb->data_transfer_length = 0;
1da177e4 5703 ioarcb->read_data_transfer_length = 0;
a32c055f 5704 ioarcb->ioadl_len = 0;
1da177e4 5705 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5706 ioasa->hdr.ioasc = 0;
5707 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5708
5709 if (ipr_cmd->ioa_cfg->sis64)
5710 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5711 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5712 else {
5713 ioarcb->write_ioadl_addr =
5714 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5715 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5716 }
1da177e4
LT
5717}
5718
5719/**
5720 * ipr_erp_request_sense - Send request sense to a device
5721 * @ipr_cmd: ipr command struct
5722 *
5723 * This function sends a request sense to a device as a result
5724 * of a check condition.
5725 *
5726 * Return value:
5727 * nothing
5728 **/
5729static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5730{
5731 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5732 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5733
5734 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5735 ipr_erp_done(ipr_cmd);
5736 return;
5737 }
5738
5739 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5740
5741 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5742 cmd_pkt->cdb[0] = REQUEST_SENSE;
5743 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5744 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5745 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5746 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5747
a32c055f
WB
5748 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5749 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5750
5751 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5752 IPR_REQUEST_SENSE_TIMEOUT * 2);
5753}
5754
5755/**
5756 * ipr_erp_cancel_all - Send cancel all to a device
5757 * @ipr_cmd: ipr command struct
5758 *
5759 * This function sends a cancel all to a device to clear the
5760 * queue. If we are running TCQ on the device, QERR is set to 1,
5761 * which means all outstanding ops have been dropped on the floor.
5762 * Cancel all will return them to us.
5763 *
5764 * Return value:
5765 * nothing
5766 **/
5767static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5768{
5769 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5770 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5771 struct ipr_cmd_pkt *cmd_pkt;
5772
5773 res->in_erp = 1;
5774
5775 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5776
5777 if (!scsi_get_tag_type(scsi_cmd->device)) {
5778 ipr_erp_request_sense(ipr_cmd);
5779 return;
5780 }
5781
5782 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5783 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5784 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5785
5786 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5787 IPR_CANCEL_ALL_TIMEOUT);
5788}
5789
5790/**
5791 * ipr_dump_ioasa - Dump contents of IOASA
5792 * @ioa_cfg: ioa config struct
5793 * @ipr_cmd: ipr command struct
fe964d0a 5794 * @res: resource entry struct
1da177e4
LT
5795 *
5796 * This function is invoked by the interrupt handler when ops
5797 * fail. It will log the IOASA if appropriate. Only called
5798 * for GPDD ops.
5799 *
5800 * Return value:
5801 * none
5802 **/
5803static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5804 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5805{
5806 int i;
5807 u16 data_len;
b0692dd4 5808 u32 ioasc, fd_ioasc;
96d21f00 5809 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5810 __be32 *ioasa_data = (__be32 *)ioasa;
5811 int error_index;
5812
96d21f00
WB
5813 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5814 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5815
5816 if (0 == ioasc)
5817 return;
5818
5819 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5820 return;
5821
b0692dd4
BK
5822 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5823 error_index = ipr_get_error(fd_ioasc);
5824 else
5825 error_index = ipr_get_error(ioasc);
1da177e4
LT
5826
5827 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5828 /* Don't log an error if the IOA already logged one */
96d21f00 5829 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5830 return;
5831
cc9bd5d4
BK
5832 if (!ipr_is_gscsi(res))
5833 return;
5834
1da177e4
LT
5835 if (ipr_error_table[error_index].log_ioasa == 0)
5836 return;
5837 }
5838
fe964d0a 5839 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5840
96d21f00
WB
5841 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5842 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5843 data_len = sizeof(struct ipr_ioasa64);
5844 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5845 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5846
5847 ipr_err("IOASA Dump:\n");
5848
5849 for (i = 0; i < data_len / 4; i += 4) {
5850 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5851 be32_to_cpu(ioasa_data[i]),
5852 be32_to_cpu(ioasa_data[i+1]),
5853 be32_to_cpu(ioasa_data[i+2]),
5854 be32_to_cpu(ioasa_data[i+3]));
5855 }
5856}
5857
5858/**
5859 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5860 * @ioasa: IOASA
5861 * @sense_buf: sense data buffer
5862 *
5863 * Return value:
5864 * none
5865 **/
5866static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5867{
5868 u32 failing_lba;
5869 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5870 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5871 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5872 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5873
5874 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5875
5876 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5877 return;
5878
5879 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5880
5881 if (ipr_is_vset_device(res) &&
5882 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5883 ioasa->u.vset.failing_lba_hi != 0) {
5884 sense_buf[0] = 0x72;
5885 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5886 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5887 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5888
5889 sense_buf[7] = 12;
5890 sense_buf[8] = 0;
5891 sense_buf[9] = 0x0A;
5892 sense_buf[10] = 0x80;
5893
5894 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5895
5896 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5897 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5898 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5899 sense_buf[15] = failing_lba & 0x000000ff;
5900
5901 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5902
5903 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5904 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5905 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5906 sense_buf[19] = failing_lba & 0x000000ff;
5907 } else {
5908 sense_buf[0] = 0x70;
5909 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5910 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5911 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5912
5913 /* Illegal request */
5914 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5915 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5916 sense_buf[7] = 10; /* additional length */
5917
5918 /* IOARCB was in error */
5919 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5920 sense_buf[15] = 0xC0;
5921 else /* Parameter data was invalid */
5922 sense_buf[15] = 0x80;
5923
5924 sense_buf[16] =
5925 ((IPR_FIELD_POINTER_MASK &
96d21f00 5926 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5927 sense_buf[17] =
5928 (IPR_FIELD_POINTER_MASK &
96d21f00 5929 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5930 } else {
5931 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5932 if (ipr_is_vset_device(res))
5933 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5934 else
5935 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5936
5937 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5938 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5939 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5940 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5941 sense_buf[6] = failing_lba & 0x000000ff;
5942 }
5943
5944 sense_buf[7] = 6; /* additional length */
5945 }
5946 }
5947}
5948
ee0a90fa
BK
5949/**
5950 * ipr_get_autosense - Copy autosense data to sense buffer
5951 * @ipr_cmd: ipr command struct
5952 *
5953 * This function copies the autosense buffer to the buffer
5954 * in the scsi_cmd, if there is autosense available.
5955 *
5956 * Return value:
5957 * 1 if autosense was available / 0 if not
5958 **/
5959static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5960{
96d21f00
WB
5961 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5962 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5963
96d21f00 5964 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5965 return 0;
5966
96d21f00
WB
5967 if (ipr_cmd->ioa_cfg->sis64)
5968 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5969 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5970 SCSI_SENSE_BUFFERSIZE));
5971 else
5972 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5973 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5974 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5975 return 1;
5976}
5977
1da177e4
LT
5978/**
5979 * ipr_erp_start - Process an error response for a SCSI op
5980 * @ioa_cfg: ioa config struct
5981 * @ipr_cmd: ipr command struct
5982 *
5983 * This function determines whether or not to initiate ERP
5984 * on the affected device.
5985 *
5986 * Return value:
5987 * nothing
5988 **/
5989static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5990 struct ipr_cmnd *ipr_cmd)
5991{
5992 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5993 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5994 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5995 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5996
5997 if (!res) {
5998 ipr_scsi_eh_done(ipr_cmd);
5999 return;
6000 }
6001
8a048994 6002 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6003 ipr_gen_sense(ipr_cmd);
6004
cc9bd5d4
BK
6005 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6006
8a048994 6007 switch (masked_ioasc) {
1da177e4 6008 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
6009 if (ipr_is_naca_model(res))
6010 scsi_cmd->result |= (DID_ABORT << 16);
6011 else
6012 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6013 break;
6014 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6015 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6016 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6017 break;
6018 case IPR_IOASC_HW_SEL_TIMEOUT:
6019 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
6020 if (!ipr_is_naca_model(res))
6021 res->needs_sync_complete = 1;
1da177e4
LT
6022 break;
6023 case IPR_IOASC_SYNC_REQUIRED:
6024 if (!res->in_erp)
6025 res->needs_sync_complete = 1;
6026 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6027 break;
6028 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6029 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6030 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6031 break;
6032 case IPR_IOASC_BUS_WAS_RESET:
6033 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6034 /*
6035 * Report the bus reset and ask for a retry. The device
6036 * will give CC/UA the next command.
6037 */
6038 if (!res->resetting_device)
6039 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6040 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
6041 if (!ipr_is_naca_model(res))
6042 res->needs_sync_complete = 1;
1da177e4
LT
6043 break;
6044 case IPR_IOASC_HW_DEV_BUS_STATUS:
6045 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6046 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
6047 if (!ipr_get_autosense(ipr_cmd)) {
6048 if (!ipr_is_naca_model(res)) {
6049 ipr_erp_cancel_all(ipr_cmd);
6050 return;
6051 }
6052 }
1da177e4 6053 }
ee0a90fa
BK
6054 if (!ipr_is_naca_model(res))
6055 res->needs_sync_complete = 1;
1da177e4
LT
6056 break;
6057 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6058 break;
6059 default:
5b7304fb
BK
6060 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6061 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6062 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6063 res->needs_sync_complete = 1;
6064 break;
6065 }
6066
63015bc9 6067 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6069 scsi_cmd->scsi_done(scsi_cmd);
6070}
6071
6072/**
6073 * ipr_scsi_done - mid-layer done function
6074 * @ipr_cmd: ipr command struct
6075 *
6076 * This function is invoked by the interrupt handler for
6077 * ops generated by the SCSI mid-layer
6078 *
6079 * Return value:
6080 * none
6081 **/
6082static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6083{
6084 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6085 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6086 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 6087 unsigned long hrrq_flags;
1da177e4 6088
96d21f00 6089 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6090
6091 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6092 scsi_dma_unmap(scsi_cmd);
6093
56d6aa33 6094 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
05a6538a 6095 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6096 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6097 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6098 } else {
56d6aa33 6099 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
1da177e4 6100 ipr_erp_start(ioa_cfg, ipr_cmd);
56d6aa33 6101 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6102 }
1da177e4
LT
6103}
6104
1da177e4
LT
6105/**
6106 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6107 * @shost: scsi host struct
1da177e4 6108 * @scsi_cmd: scsi command struct
1da177e4
LT
6109 *
6110 * This function queues a request generated by the mid-layer.
6111 *
6112 * Return value:
6113 * 0 on success
6114 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6115 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6116 **/
00bfef2c
BK
6117static int ipr_queuecommand(struct Scsi_Host *shost,
6118 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6119{
6120 struct ipr_ioa_cfg *ioa_cfg;
6121 struct ipr_resource_entry *res;
6122 struct ipr_ioarcb *ioarcb;
6123 struct ipr_cmnd *ipr_cmd;
56d6aa33 6124 unsigned long hrrq_flags, lock_flags;
d12f1576 6125 int rc;
05a6538a 6126 struct ipr_hrr_queue *hrrq;
6127 int hrrq_id;
1da177e4 6128
00bfef2c
BK
6129 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6130
1da177e4 6131 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6132 res = scsi_cmd->device->hostdata;
56d6aa33 6133
6134 if (ipr_is_gata(res) && res->sata_port) {
6135 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6136 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6138 return rc;
6139 }
6140
05a6538a 6141 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6142 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6143
56d6aa33 6144 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6145 /*
6146 * We are currently blocking all devices due to a host reset
6147 * We have told the host to stop giving us new requests, but
6148 * ERP ops don't count. FIXME
6149 */
56d6aa33 6150 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6151 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6152 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6153 }
1da177e4
LT
6154
6155 /*
6156 * FIXME - Create scsi_set_host_offline interface
6157 * and the ioa_is_dead check can be removed
6158 */
56d6aa33 6159 if (unlikely(hrrq->ioa_is_dead || !res)) {
6160 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6161 goto err_nodev;
1da177e4
LT
6162 }
6163
05a6538a 6164 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6165 if (ipr_cmd == NULL) {
56d6aa33 6166 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6167 return SCSI_MLQUEUE_HOST_BUSY;
6168 }
56d6aa33 6169 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6170
172cd6e1 6171 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6172 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6173
6174 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6175 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6176 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6177
6178 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6179 if (scsi_cmd->underflow == 0)
6180 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6181
1da177e4 6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ab6c10b1
WB
6183 if (ipr_is_gscsi(res))
6184 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
1da177e4
LT
6185 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6186 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6187 }
6188
6189 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6190 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6191 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6192 }
1da177e4 6193
d12f1576
DC
6194 if (ioa_cfg->sis64)
6195 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6196 else
6197 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6198
56d6aa33 6199 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6200 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6201 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6202 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6203 if (!rc)
6204 scsi_dma_unmap(scsi_cmd);
a5fb407e 6205 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6206 }
6207
56d6aa33 6208 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6209 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6210 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6211 scsi_dma_unmap(scsi_cmd);
6212 goto err_nodev;
6213 }
6214
6215 ioarcb->res_handle = res->res_handle;
6216 if (res->needs_sync_complete) {
6217 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6218 res->needs_sync_complete = 0;
6219 }
05a6538a 6220 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6221 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6222 ipr_send_command(ipr_cmd);
56d6aa33 6223 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6224 return 0;
1da177e4 6225
00bfef2c 6226err_nodev:
56d6aa33 6227 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6228 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6229 scsi_cmd->result = (DID_NO_CONNECT << 16);
6230 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6231 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6232 return 0;
6233}
f281233d 6234
35a39691
BK
6235/**
6236 * ipr_ioctl - IOCTL handler
6237 * @sdev: scsi device struct
6238 * @cmd: IOCTL cmd
6239 * @arg: IOCTL arg
6240 *
6241 * Return value:
6242 * 0 on success / other on failure
6243 **/
bd705f2d 6244static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6245{
6246 struct ipr_resource_entry *res;
6247
6248 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6249 if (res && ipr_is_gata(res)) {
6250 if (cmd == HDIO_GET_IDENTITY)
6251 return -ENOTTY;
94be9a58 6252 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6253 }
35a39691
BK
6254
6255 return -EINVAL;
6256}
6257
1da177e4
LT
6258/**
6259 * ipr_info - Get information about the card/driver
6260 * @scsi_host: scsi host struct
6261 *
6262 * Return value:
6263 * pointer to buffer with description string
6264 **/
203fa3fe 6265static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6266{
6267 static char buffer[512];
6268 struct ipr_ioa_cfg *ioa_cfg;
6269 unsigned long lock_flags = 0;
6270
6271 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6272
6273 spin_lock_irqsave(host->host_lock, lock_flags);
6274 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6275 spin_unlock_irqrestore(host->host_lock, lock_flags);
6276
6277 return buffer;
6278}
6279
6280static struct scsi_host_template driver_template = {
6281 .module = THIS_MODULE,
6282 .name = "IPR",
6283 .info = ipr_ioa_info,
35a39691 6284 .ioctl = ipr_ioctl,
1da177e4
LT
6285 .queuecommand = ipr_queuecommand,
6286 .eh_abort_handler = ipr_eh_abort,
6287 .eh_device_reset_handler = ipr_eh_dev_reset,
6288 .eh_host_reset_handler = ipr_eh_host_reset,
6289 .slave_alloc = ipr_slave_alloc,
6290 .slave_configure = ipr_slave_configure,
6291 .slave_destroy = ipr_slave_destroy,
35a39691
BK
6292 .target_alloc = ipr_target_alloc,
6293 .target_destroy = ipr_target_destroy,
1da177e4
LT
6294 .change_queue_depth = ipr_change_queue_depth,
6295 .change_queue_type = ipr_change_queue_type,
6296 .bios_param = ipr_biosparam,
6297 .can_queue = IPR_MAX_COMMANDS,
6298 .this_id = -1,
6299 .sg_tablesize = IPR_MAX_SGLIST,
6300 .max_sectors = IPR_IOA_MAX_SECTORS,
6301 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6302 .use_clustering = ENABLE_CLUSTERING,
6303 .shost_attrs = ipr_ioa_attrs,
6304 .sdev_attrs = ipr_dev_attrs,
6305 .proc_name = IPR_NAME
6306};
6307
35a39691
BK
6308/**
6309 * ipr_ata_phy_reset - libata phy_reset handler
6310 * @ap: ata port to reset
6311 *
6312 **/
6313static void ipr_ata_phy_reset(struct ata_port *ap)
6314{
6315 unsigned long flags;
6316 struct ipr_sata_port *sata_port = ap->private_data;
6317 struct ipr_resource_entry *res = sata_port->res;
6318 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6319 int rc;
6320
6321 ENTER;
6322 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6323 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6325 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6326 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6327 }
6328
56d6aa33 6329 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6330 goto out_unlock;
6331
6332 rc = ipr_device_reset(ioa_cfg, res);
6333
6334 if (rc) {
3e4ec344 6335 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6336 goto out_unlock;
6337 }
6338
3e7ebdfa
WB
6339 ap->link.device[0].class = res->ata_class;
6340 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6341 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6342
6343out_unlock:
6344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6345 LEAVE;
6346}
6347
6348/**
6349 * ipr_ata_post_internal - Cleanup after an internal command
6350 * @qc: ATA queued command
6351 *
6352 * Return value:
6353 * none
6354 **/
6355static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6356{
6357 struct ipr_sata_port *sata_port = qc->ap->private_data;
6358 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6359 struct ipr_cmnd *ipr_cmd;
05a6538a 6360 struct ipr_hrr_queue *hrrq;
35a39691
BK
6361 unsigned long flags;
6362
6363 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6364 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6366 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6367 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6368 }
6369
05a6538a 6370 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6371 spin_lock(&hrrq->_lock);
05a6538a 6372 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6373 if (ipr_cmd->qc == qc) {
6374 ipr_device_reset(ioa_cfg, sata_port->res);
6375 break;
6376 }
35a39691 6377 }
56d6aa33 6378 spin_unlock(&hrrq->_lock);
35a39691
BK
6379 }
6380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6381}
6382
35a39691
BK
6383/**
6384 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6385 * @regs: destination
6386 * @tf: source ATA taskfile
6387 *
6388 * Return value:
6389 * none
6390 **/
6391static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6392 struct ata_taskfile *tf)
6393{
6394 regs->feature = tf->feature;
6395 regs->nsect = tf->nsect;
6396 regs->lbal = tf->lbal;
6397 regs->lbam = tf->lbam;
6398 regs->lbah = tf->lbah;
6399 regs->device = tf->device;
6400 regs->command = tf->command;
6401 regs->hob_feature = tf->hob_feature;
6402 regs->hob_nsect = tf->hob_nsect;
6403 regs->hob_lbal = tf->hob_lbal;
6404 regs->hob_lbam = tf->hob_lbam;
6405 regs->hob_lbah = tf->hob_lbah;
6406 regs->ctl = tf->ctl;
6407}
6408
6409/**
6410 * ipr_sata_done - done function for SATA commands
6411 * @ipr_cmd: ipr command struct
6412 *
6413 * This function is invoked by the interrupt handler for
6414 * ops generated by the SCSI mid-layer to SATA devices
6415 *
6416 * Return value:
6417 * none
6418 **/
6419static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6420{
6421 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6422 struct ata_queued_cmd *qc = ipr_cmd->qc;
6423 struct ipr_sata_port *sata_port = qc->ap->private_data;
6424 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6425 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6426
56d6aa33 6427 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6428 if (ipr_cmd->ioa_cfg->sis64)
6429 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6430 sizeof(struct ipr_ioasa_gata));
6431 else
6432 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6433 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6434 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6435
96d21f00 6436 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6437 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6438
6439 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6440 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6441 else
96d21f00 6442 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6443 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6444 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6445 ata_qc_complete(qc);
6446}
6447
a32c055f
WB
6448/**
6449 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6450 * @ipr_cmd: ipr command struct
6451 * @qc: ATA queued command
6452 *
6453 **/
6454static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6455 struct ata_queued_cmd *qc)
6456{
6457 u32 ioadl_flags = 0;
6458 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6459 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6460 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6461 int len = qc->nbytes;
6462 struct scatterlist *sg;
6463 unsigned int si;
6464 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6465
6466 if (len == 0)
6467 return;
6468
6469 if (qc->dma_dir == DMA_TO_DEVICE) {
6470 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6471 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6472 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6473 ioadl_flags = IPR_IOADL_FLAGS_READ;
6474
6475 ioarcb->data_transfer_length = cpu_to_be32(len);
6476 ioarcb->ioadl_len =
6477 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6478 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6479 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6480
6481 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6482 ioadl64->flags = cpu_to_be32(ioadl_flags);
6483 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6484 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6485
6486 last_ioadl64 = ioadl64;
6487 ioadl64++;
6488 }
6489
6490 if (likely(last_ioadl64))
6491 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6492}
6493
35a39691
BK
6494/**
6495 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6496 * @ipr_cmd: ipr command struct
6497 * @qc: ATA queued command
6498 *
6499 **/
6500static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6501 struct ata_queued_cmd *qc)
6502{
6503 u32 ioadl_flags = 0;
6504 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6505 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6506 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6507 int len = qc->nbytes;
35a39691 6508 struct scatterlist *sg;
ff2aeb1e 6509 unsigned int si;
35a39691
BK
6510
6511 if (len == 0)
6512 return;
6513
6514 if (qc->dma_dir == DMA_TO_DEVICE) {
6515 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6516 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6517 ioarcb->data_transfer_length = cpu_to_be32(len);
6518 ioarcb->ioadl_len =
35a39691
BK
6519 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6520 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6521 ioadl_flags = IPR_IOADL_FLAGS_READ;
6522 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6523 ioarcb->read_ioadl_len =
6524 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6525 }
6526
ff2aeb1e 6527 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6528 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6529 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6530
6531 last_ioadl = ioadl;
6532 ioadl++;
35a39691 6533 }
3be6cbd7
JG
6534
6535 if (likely(last_ioadl))
6536 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6537}
6538
56d6aa33 6539/**
6540 * ipr_qc_defer - Get a free ipr_cmd
6541 * @qc: queued command
6542 *
6543 * Return value:
6544 * 0 if success
6545 **/
6546static int ipr_qc_defer(struct ata_queued_cmd *qc)
6547{
6548 struct ata_port *ap = qc->ap;
6549 struct ipr_sata_port *sata_port = ap->private_data;
6550 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6551 struct ipr_cmnd *ipr_cmd;
6552 struct ipr_hrr_queue *hrrq;
6553 int hrrq_id;
6554
6555 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6556 hrrq = &ioa_cfg->hrrq[hrrq_id];
6557
6558 qc->lldd_task = NULL;
6559 spin_lock(&hrrq->_lock);
6560 if (unlikely(hrrq->ioa_is_dead)) {
6561 spin_unlock(&hrrq->_lock);
6562 return 0;
6563 }
6564
6565 if (unlikely(!hrrq->allow_cmds)) {
6566 spin_unlock(&hrrq->_lock);
6567 return ATA_DEFER_LINK;
6568 }
6569
6570 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6571 if (ipr_cmd == NULL) {
6572 spin_unlock(&hrrq->_lock);
6573 return ATA_DEFER_LINK;
6574 }
6575
6576 qc->lldd_task = ipr_cmd;
6577 spin_unlock(&hrrq->_lock);
6578 return 0;
6579}
6580
35a39691
BK
6581/**
6582 * ipr_qc_issue - Issue a SATA qc to a device
6583 * @qc: queued command
6584 *
6585 * Return value:
6586 * 0 if success
6587 **/
6588static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6589{
6590 struct ata_port *ap = qc->ap;
6591 struct ipr_sata_port *sata_port = ap->private_data;
6592 struct ipr_resource_entry *res = sata_port->res;
6593 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6594 struct ipr_cmnd *ipr_cmd;
6595 struct ipr_ioarcb *ioarcb;
6596 struct ipr_ioarcb_ata_regs *regs;
6597
56d6aa33 6598 if (qc->lldd_task == NULL)
6599 ipr_qc_defer(qc);
6600
6601 ipr_cmd = qc->lldd_task;
6602 if (ipr_cmd == NULL)
0feeed82 6603 return AC_ERR_SYSTEM;
35a39691 6604
56d6aa33 6605 qc->lldd_task = NULL;
6606 spin_lock(&ipr_cmd->hrrq->_lock);
6607 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6608 ipr_cmd->hrrq->ioa_is_dead)) {
6609 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6610 spin_unlock(&ipr_cmd->hrrq->_lock);
6611 return AC_ERR_SYSTEM;
6612 }
6613
05a6538a 6614 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6615 ioarcb = &ipr_cmd->ioarcb;
35a39691 6616
a32c055f
WB
6617 if (ioa_cfg->sis64) {
6618 regs = &ipr_cmd->i.ata_ioadl.regs;
6619 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6620 } else
6621 regs = &ioarcb->u.add_data.u.regs;
6622
6623 memset(regs, 0, sizeof(*regs));
6624 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6625
56d6aa33 6626 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6627 ipr_cmd->qc = qc;
6628 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6629 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6630 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6632 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6633 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6634
a32c055f
WB
6635 if (ioa_cfg->sis64)
6636 ipr_build_ata_ioadl64(ipr_cmd, qc);
6637 else
6638 ipr_build_ata_ioadl(ipr_cmd, qc);
6639
35a39691
BK
6640 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6641 ipr_copy_sata_tf(regs, &qc->tf);
6642 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6643 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6644
6645 switch (qc->tf.protocol) {
6646 case ATA_PROT_NODATA:
6647 case ATA_PROT_PIO:
6648 break;
6649
6650 case ATA_PROT_DMA:
6651 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6652 break;
6653
0dc36888
TH
6654 case ATAPI_PROT_PIO:
6655 case ATAPI_PROT_NODATA:
35a39691
BK
6656 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6657 break;
6658
0dc36888 6659 case ATAPI_PROT_DMA:
35a39691
BK
6660 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6661 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6662 break;
6663
6664 default:
6665 WARN_ON(1);
56d6aa33 6666 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6667 return AC_ERR_INVALID;
35a39691
BK
6668 }
6669
a32c055f 6670 ipr_send_command(ipr_cmd);
56d6aa33 6671 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6672
35a39691
BK
6673 return 0;
6674}
6675
4c9bf4e7
TH
6676/**
6677 * ipr_qc_fill_rtf - Read result TF
6678 * @qc: ATA queued command
6679 *
6680 * Return value:
6681 * true
6682 **/
6683static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6684{
6685 struct ipr_sata_port *sata_port = qc->ap->private_data;
6686 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6687 struct ata_taskfile *tf = &qc->result_tf;
6688
6689 tf->feature = g->error;
6690 tf->nsect = g->nsect;
6691 tf->lbal = g->lbal;
6692 tf->lbam = g->lbam;
6693 tf->lbah = g->lbah;
6694 tf->device = g->device;
6695 tf->command = g->status;
6696 tf->hob_nsect = g->hob_nsect;
6697 tf->hob_lbal = g->hob_lbal;
6698 tf->hob_lbam = g->hob_lbam;
6699 tf->hob_lbah = g->hob_lbah;
6700 tf->ctl = g->alt_status;
6701
6702 return true;
6703}
6704
35a39691 6705static struct ata_port_operations ipr_sata_ops = {
35a39691 6706 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6707 .hardreset = ipr_sata_reset,
35a39691 6708 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6709 .qc_prep = ata_noop_qc_prep,
56d6aa33 6710 .qc_defer = ipr_qc_defer,
35a39691 6711 .qc_issue = ipr_qc_issue,
4c9bf4e7 6712 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6713 .port_start = ata_sas_port_start,
6714 .port_stop = ata_sas_port_stop
6715};
6716
6717static struct ata_port_info sata_port_info = {
9cbe056f 6718 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6719 .pio_mask = ATA_PIO4_ONLY,
6720 .mwdma_mask = ATA_MWDMA2,
6721 .udma_mask = ATA_UDMA6,
35a39691
BK
6722 .port_ops = &ipr_sata_ops
6723};
6724
1da177e4
LT
6725#ifdef CONFIG_PPC_PSERIES
6726static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6727 PVR_NORTHSTAR,
6728 PVR_PULSAR,
6729 PVR_POWER4,
6730 PVR_ICESTAR,
6731 PVR_SSTAR,
6732 PVR_POWER4p,
6733 PVR_630,
6734 PVR_630p
1da177e4
LT
6735};
6736
6737/**
6738 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6739 * @ioa_cfg: ioa cfg struct
6740 *
6741 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6742 * certain pSeries hardware. This function determines if the given
6743 * adapter is in one of these confgurations or not.
6744 *
6745 * Return value:
6746 * 1 if adapter is not supported / 0 if adapter is supported
6747 **/
6748static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6749{
1da177e4
LT
6750 int i;
6751
44c10138 6752 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6753 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6754 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6755 return 1;
1da177e4
LT
6756 }
6757 }
6758 return 0;
6759}
6760#else
6761#define ipr_invalid_adapter(ioa_cfg) 0
6762#endif
6763
6764/**
6765 * ipr_ioa_bringdown_done - IOA bring down completion.
6766 * @ipr_cmd: ipr command struct
6767 *
6768 * This function processes the completion of an adapter bring down.
6769 * It wakes any reset sleepers.
6770 *
6771 * Return value:
6772 * IPR_RC_JOB_RETURN
6773 **/
6774static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6775{
6776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6777
6778 ENTER;
6779 ioa_cfg->in_reset_reload = 0;
6780 ioa_cfg->reset_retries = 0;
05a6538a 6781 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6782 wake_up_all(&ioa_cfg->reset_wait_q);
6783
6784 spin_unlock_irq(ioa_cfg->host->host_lock);
6785 scsi_unblock_requests(ioa_cfg->host);
6786 spin_lock_irq(ioa_cfg->host->host_lock);
6787 LEAVE;
6788
6789 return IPR_RC_JOB_RETURN;
6790}
6791
6792/**
6793 * ipr_ioa_reset_done - IOA reset completion.
6794 * @ipr_cmd: ipr command struct
6795 *
6796 * This function processes the completion of an adapter reset.
6797 * It schedules any necessary mid-layer add/removes and
6798 * wakes any reset sleepers.
6799 *
6800 * Return value:
6801 * IPR_RC_JOB_RETURN
6802 **/
6803static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6804{
6805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6806 struct ipr_resource_entry *res;
6807 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 6808 int i = 0, j;
1da177e4
LT
6809
6810 ENTER;
6811 ioa_cfg->in_reset_reload = 0;
56d6aa33 6812 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6813 spin_lock(&ioa_cfg->hrrq[j]._lock);
6814 ioa_cfg->hrrq[j].allow_cmds = 1;
6815 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6816 }
6817 wmb();
1da177e4 6818 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6819 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6820
6821 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6822 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6823 ipr_trace;
6824 break;
6825 }
6826 }
6827 schedule_work(&ioa_cfg->work_q);
6828
6829 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6830 list_del(&hostrcb->queue);
6831 if (i++ < IPR_NUM_LOG_HCAMS)
6832 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6833 else
6834 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6835 }
6836
6bb04170 6837 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6838 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6839
6840 ioa_cfg->reset_retries = 0;
05a6538a 6841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6842 wake_up_all(&ioa_cfg->reset_wait_q);
6843
30237853 6844 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6845 scsi_unblock_requests(ioa_cfg->host);
30237853 6846 spin_lock(ioa_cfg->host->host_lock);
1da177e4 6847
56d6aa33 6848 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
6849 scsi_block_requests(ioa_cfg->host);
6850
6851 LEAVE;
6852 return IPR_RC_JOB_RETURN;
6853}
6854
6855/**
6856 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6857 * @supported_dev: supported device struct
6858 * @vpids: vendor product id struct
6859 *
6860 * Return value:
6861 * none
6862 **/
6863static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6864 struct ipr_std_inq_vpids *vpids)
6865{
6866 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6867 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6868 supported_dev->num_records = 1;
6869 supported_dev->data_length =
6870 cpu_to_be16(sizeof(struct ipr_supported_device));
6871 supported_dev->reserved = 0;
6872}
6873
6874/**
6875 * ipr_set_supported_devs - Send Set Supported Devices for a device
6876 * @ipr_cmd: ipr command struct
6877 *
a32c055f 6878 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6879 *
6880 * Return value:
6881 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6882 **/
6883static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6884{
6885 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6886 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6887 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6888 struct ipr_resource_entry *res = ipr_cmd->u.res;
6889
6890 ipr_cmd->job_step = ipr_ioa_reset_done;
6891
6892 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6893 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6894 continue;
6895
6896 ipr_cmd->u.res = res;
3e7ebdfa 6897 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6898
6899 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6900 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6901 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6902
6903 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6904 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6905 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6906 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6907
a32c055f
WB
6908 ipr_init_ioadl(ipr_cmd,
6909 ioa_cfg->vpd_cbs_dma +
6910 offsetof(struct ipr_misc_cbs, supp_dev),
6911 sizeof(struct ipr_supported_device),
6912 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6913
6914 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6915 IPR_SET_SUP_DEVICE_TIMEOUT);
6916
3e7ebdfa
WB
6917 if (!ioa_cfg->sis64)
6918 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 6919 LEAVE;
1da177e4
LT
6920 return IPR_RC_JOB_RETURN;
6921 }
6922
05a6538a 6923 LEAVE;
1da177e4
LT
6924 return IPR_RC_JOB_CONTINUE;
6925}
6926
6927/**
6928 * ipr_get_mode_page - Locate specified mode page
6929 * @mode_pages: mode page buffer
6930 * @page_code: page code to find
6931 * @len: minimum required length for mode page
6932 *
6933 * Return value:
6934 * pointer to mode page / NULL on failure
6935 **/
6936static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6937 u32 page_code, u32 len)
6938{
6939 struct ipr_mode_page_hdr *mode_hdr;
6940 u32 page_length;
6941 u32 length;
6942
6943 if (!mode_pages || (mode_pages->hdr.length == 0))
6944 return NULL;
6945
6946 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6947 mode_hdr = (struct ipr_mode_page_hdr *)
6948 (mode_pages->data + mode_pages->hdr.block_desc_len);
6949
6950 while (length) {
6951 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6952 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6953 return mode_hdr;
6954 break;
6955 } else {
6956 page_length = (sizeof(struct ipr_mode_page_hdr) +
6957 mode_hdr->page_length);
6958 length -= page_length;
6959 mode_hdr = (struct ipr_mode_page_hdr *)
6960 ((unsigned long)mode_hdr + page_length);
6961 }
6962 }
6963 return NULL;
6964}
6965
6966/**
6967 * ipr_check_term_power - Check for term power errors
6968 * @ioa_cfg: ioa config struct
6969 * @mode_pages: IOAFP mode pages buffer
6970 *
6971 * Check the IOAFP's mode page 28 for term power errors
6972 *
6973 * Return value:
6974 * nothing
6975 **/
6976static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6977 struct ipr_mode_pages *mode_pages)
6978{
6979 int i;
6980 int entry_length;
6981 struct ipr_dev_bus_entry *bus;
6982 struct ipr_mode_page28 *mode_page;
6983
6984 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6985 sizeof(struct ipr_mode_page28));
6986
6987 entry_length = mode_page->entry_length;
6988
6989 bus = mode_page->bus;
6990
6991 for (i = 0; i < mode_page->num_entries; i++) {
6992 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6993 dev_err(&ioa_cfg->pdev->dev,
6994 "Term power is absent on scsi bus %d\n",
6995 bus->res_addr.bus);
6996 }
6997
6998 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6999 }
7000}
7001
7002/**
7003 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7004 * @ioa_cfg: ioa config struct
7005 *
7006 * Looks through the config table checking for SES devices. If
7007 * the SES device is in the SES table indicating a maximum SCSI
7008 * bus speed, the speed is limited for the bus.
7009 *
7010 * Return value:
7011 * none
7012 **/
7013static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7014{
7015 u32 max_xfer_rate;
7016 int i;
7017
7018 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7019 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7020 ioa_cfg->bus_attr[i].bus_width);
7021
7022 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7023 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7024 }
7025}
7026
7027/**
7028 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7029 * @ioa_cfg: ioa config struct
7030 * @mode_pages: mode page 28 buffer
7031 *
7032 * Updates mode page 28 based on driver configuration
7033 *
7034 * Return value:
7035 * none
7036 **/
7037static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7038 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7039{
7040 int i, entry_length;
7041 struct ipr_dev_bus_entry *bus;
7042 struct ipr_bus_attributes *bus_attr;
7043 struct ipr_mode_page28 *mode_page;
7044
7045 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7046 sizeof(struct ipr_mode_page28));
7047
7048 entry_length = mode_page->entry_length;
7049
7050 /* Loop for each device bus entry */
7051 for (i = 0, bus = mode_page->bus;
7052 i < mode_page->num_entries;
7053 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7054 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7055 dev_err(&ioa_cfg->pdev->dev,
7056 "Invalid resource address reported: 0x%08X\n",
7057 IPR_GET_PHYS_LOC(bus->res_addr));
7058 continue;
7059 }
7060
7061 bus_attr = &ioa_cfg->bus_attr[i];
7062 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7063 bus->bus_width = bus_attr->bus_width;
7064 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7065 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7066 if (bus_attr->qas_enabled)
7067 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7068 else
7069 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7070 }
7071}
7072
7073/**
7074 * ipr_build_mode_select - Build a mode select command
7075 * @ipr_cmd: ipr command struct
7076 * @res_handle: resource handle to send command to
7077 * @parm: Byte 2 of Mode Sense command
7078 * @dma_addr: DMA buffer address
7079 * @xfer_len: data transfer length
7080 *
7081 * Return value:
7082 * none
7083 **/
7084static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7085 __be32 res_handle, u8 parm,
7086 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7087{
1da177e4
LT
7088 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7089
7090 ioarcb->res_handle = res_handle;
7091 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7092 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7093 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7094 ioarcb->cmd_pkt.cdb[1] = parm;
7095 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7096
a32c055f 7097 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7098}
7099
7100/**
7101 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7102 * @ipr_cmd: ipr command struct
7103 *
7104 * This function sets up the SCSI bus attributes and sends
7105 * a Mode Select for Page 28 to activate them.
7106 *
7107 * Return value:
7108 * IPR_RC_JOB_RETURN
7109 **/
7110static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7111{
7112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7113 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7114 int length;
7115
7116 ENTER;
4733804c
BK
7117 ipr_scsi_bus_speed_limit(ioa_cfg);
7118 ipr_check_term_power(ioa_cfg, mode_pages);
7119 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7120 length = mode_pages->hdr.length + 1;
7121 mode_pages->hdr.length = 0;
1da177e4
LT
7122
7123 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7124 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7125 length);
7126
f72919ec
WB
7127 ipr_cmd->job_step = ipr_set_supported_devs;
7128 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7129 struct ipr_resource_entry, queue);
1da177e4
LT
7130 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7131
7132 LEAVE;
7133 return IPR_RC_JOB_RETURN;
7134}
7135
7136/**
7137 * ipr_build_mode_sense - Builds a mode sense command
7138 * @ipr_cmd: ipr command struct
7139 * @res: resource entry struct
7140 * @parm: Byte 2 of mode sense command
7141 * @dma_addr: DMA address of mode sense buffer
7142 * @xfer_len: Size of DMA buffer
7143 *
7144 * Return value:
7145 * none
7146 **/
7147static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7148 __be32 res_handle,
a32c055f 7149 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7150{
1da177e4
LT
7151 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7152
7153 ioarcb->res_handle = res_handle;
7154 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7155 ioarcb->cmd_pkt.cdb[2] = parm;
7156 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7157 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7158
a32c055f 7159 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7160}
7161
dfed823e
BK
7162/**
7163 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7164 * @ipr_cmd: ipr command struct
7165 *
7166 * This function handles the failure of an IOA bringup command.
7167 *
7168 * Return value:
7169 * IPR_RC_JOB_RETURN
7170 **/
7171static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7172{
7173 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7174 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7175
7176 dev_err(&ioa_cfg->pdev->dev,
7177 "0x%02X failed with IOASC: 0x%08X\n",
7178 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7179
7180 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7181 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7182 return IPR_RC_JOB_RETURN;
7183}
7184
7185/**
7186 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7187 * @ipr_cmd: ipr command struct
7188 *
7189 * This function handles the failure of a Mode Sense to the IOAFP.
7190 * Some adapters do not handle all mode pages.
7191 *
7192 * Return value:
7193 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7194 **/
7195static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7196{
f72919ec 7197 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7198 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7199
7200 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7201 ipr_cmd->job_step = ipr_set_supported_devs;
7202 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7203 struct ipr_resource_entry, queue);
dfed823e
BK
7204 return IPR_RC_JOB_CONTINUE;
7205 }
7206
7207 return ipr_reset_cmd_failed(ipr_cmd);
7208}
7209
1da177e4
LT
7210/**
7211 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7212 * @ipr_cmd: ipr command struct
7213 *
7214 * This function send a Page 28 mode sense to the IOA to
7215 * retrieve SCSI bus attributes.
7216 *
7217 * Return value:
7218 * IPR_RC_JOB_RETURN
7219 **/
7220static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7221{
7222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7223
7224 ENTER;
7225 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7226 0x28, ioa_cfg->vpd_cbs_dma +
7227 offsetof(struct ipr_misc_cbs, mode_pages),
7228 sizeof(struct ipr_mode_pages));
7229
7230 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7231 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7232
7233 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7234
7235 LEAVE;
7236 return IPR_RC_JOB_RETURN;
7237}
7238
ac09c349
BK
7239/**
7240 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7241 * @ipr_cmd: ipr command struct
7242 *
7243 * This function enables dual IOA RAID support if possible.
7244 *
7245 * Return value:
7246 * IPR_RC_JOB_RETURN
7247 **/
7248static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7249{
7250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7251 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7252 struct ipr_mode_page24 *mode_page;
7253 int length;
7254
7255 ENTER;
7256 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7257 sizeof(struct ipr_mode_page24));
7258
7259 if (mode_page)
7260 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7261
7262 length = mode_pages->hdr.length + 1;
7263 mode_pages->hdr.length = 0;
7264
7265 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7266 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7267 length);
7268
7269 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7270 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7271
7272 LEAVE;
7273 return IPR_RC_JOB_RETURN;
7274}
7275
7276/**
7277 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7278 * @ipr_cmd: ipr command struct
7279 *
7280 * This function handles the failure of a Mode Sense to the IOAFP.
7281 * Some adapters do not handle all mode pages.
7282 *
7283 * Return value:
7284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7285 **/
7286static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7287{
96d21f00 7288 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7289
7290 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7291 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7292 return IPR_RC_JOB_CONTINUE;
7293 }
7294
7295 return ipr_reset_cmd_failed(ipr_cmd);
7296}
7297
7298/**
7299 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7300 * @ipr_cmd: ipr command struct
7301 *
7302 * This function send a mode sense to the IOA to retrieve
7303 * the IOA Advanced Function Control mode page.
7304 *
7305 * Return value:
7306 * IPR_RC_JOB_RETURN
7307 **/
7308static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7309{
7310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7311
7312 ENTER;
7313 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7314 0x24, ioa_cfg->vpd_cbs_dma +
7315 offsetof(struct ipr_misc_cbs, mode_pages),
7316 sizeof(struct ipr_mode_pages));
7317
7318 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7319 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7320
7321 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7322
7323 LEAVE;
7324 return IPR_RC_JOB_RETURN;
7325}
7326
1da177e4
LT
7327/**
7328 * ipr_init_res_table - Initialize the resource table
7329 * @ipr_cmd: ipr command struct
7330 *
7331 * This function looks through the existing resource table, comparing
7332 * it with the config table. This function will take care of old/new
7333 * devices and schedule adding/removing them from the mid-layer
7334 * as appropriate.
7335 *
7336 * Return value:
7337 * IPR_RC_JOB_CONTINUE
7338 **/
7339static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7340{
7341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7342 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7343 struct ipr_config_table_entry_wrapper cfgtew;
7344 int entries, found, flag, i;
1da177e4
LT
7345 LIST_HEAD(old_res);
7346
7347 ENTER;
3e7ebdfa
WB
7348 if (ioa_cfg->sis64)
7349 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7350 else
7351 flag = ioa_cfg->u.cfg_table->hdr.flags;
7352
7353 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7354 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7355
7356 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7357 list_move_tail(&res->queue, &old_res);
7358
3e7ebdfa 7359 if (ioa_cfg->sis64)
438b0331 7360 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7361 else
7362 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7363
7364 for (i = 0; i < entries; i++) {
7365 if (ioa_cfg->sis64)
7366 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7367 else
7368 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7369 found = 0;
7370
7371 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7372 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7373 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7374 found = 1;
7375 break;
7376 }
7377 }
7378
7379 if (!found) {
7380 if (list_empty(&ioa_cfg->free_res_q)) {
7381 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7382 break;
7383 }
7384
7385 found = 1;
7386 res = list_entry(ioa_cfg->free_res_q.next,
7387 struct ipr_resource_entry, queue);
7388 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7389 ipr_init_res_entry(res, &cfgtew);
1da177e4 7390 res->add_to_ml = 1;
56115598
WB
7391 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7392 res->sdev->allow_restart = 1;
1da177e4
LT
7393
7394 if (found)
3e7ebdfa 7395 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7396 }
7397
7398 list_for_each_entry_safe(res, temp, &old_res, queue) {
7399 if (res->sdev) {
7400 res->del_from_ml = 1;
3e7ebdfa 7401 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7402 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7403 }
7404 }
7405
3e7ebdfa
WB
7406 list_for_each_entry_safe(res, temp, &old_res, queue) {
7407 ipr_clear_res_target(res);
7408 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7409 }
7410
ac09c349
BK
7411 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7412 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7413 else
7414 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7415
7416 LEAVE;
7417 return IPR_RC_JOB_CONTINUE;
7418}
7419
7420/**
7421 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7422 * @ipr_cmd: ipr command struct
7423 *
7424 * This function sends a Query IOA Configuration command
7425 * to the adapter to retrieve the IOA configuration table.
7426 *
7427 * Return value:
7428 * IPR_RC_JOB_RETURN
7429 **/
7430static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7431{
7432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7433 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7434 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7435 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7436
7437 ENTER;
ac09c349
BK
7438 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7439 ioa_cfg->dual_raid = 1;
1da177e4
LT
7440 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7441 ucode_vpd->major_release, ucode_vpd->card_type,
7442 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7443 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7444 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7445
7446 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7447 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7448 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7449 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7450
3e7ebdfa 7451 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7452 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7453
7454 ipr_cmd->job_step = ipr_init_res_table;
7455
7456 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7457
7458 LEAVE;
7459 return IPR_RC_JOB_RETURN;
7460}
7461
7462/**
7463 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7464 * @ipr_cmd: ipr command struct
7465 *
7466 * This utility function sends an inquiry to the adapter.
7467 *
7468 * Return value:
7469 * none
7470 **/
7471static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7472 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7473{
7474 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7475
7476 ENTER;
7477 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7478 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7479
7480 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7481 ioarcb->cmd_pkt.cdb[1] = flags;
7482 ioarcb->cmd_pkt.cdb[2] = page;
7483 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7484
a32c055f 7485 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7486
7487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7488 LEAVE;
7489}
7490
62275040
BK
7491/**
7492 * ipr_inquiry_page_supported - Is the given inquiry page supported
7493 * @page0: inquiry page 0 buffer
7494 * @page: page code.
7495 *
7496 * This function determines if the specified inquiry page is supported.
7497 *
7498 * Return value:
7499 * 1 if page is supported / 0 if not
7500 **/
7501static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7502{
7503 int i;
7504
7505 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7506 if (page0->page[i] == page)
7507 return 1;
7508
7509 return 0;
7510}
7511
ac09c349
BK
7512/**
7513 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7514 * @ipr_cmd: ipr command struct
7515 *
7516 * This function sends a Page 0xD0 inquiry to the adapter
7517 * to retrieve adapter capabilities.
7518 *
7519 * Return value:
7520 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7521 **/
7522static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7523{
7524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7525 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7526 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7527
7528 ENTER;
7529 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7530 memset(cap, 0, sizeof(*cap));
7531
7532 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7533 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7534 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7535 sizeof(struct ipr_inquiry_cap));
7536 return IPR_RC_JOB_RETURN;
7537 }
7538
7539 LEAVE;
7540 return IPR_RC_JOB_CONTINUE;
7541}
7542
1da177e4
LT
7543/**
7544 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7545 * @ipr_cmd: ipr command struct
7546 *
7547 * This function sends a Page 3 inquiry to the adapter
7548 * to retrieve software VPD information.
7549 *
7550 * Return value:
7551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7552 **/
7553static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7554{
7555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7556
7557 ENTER;
7558
ac09c349 7559 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7560
7561 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7562 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7563 sizeof(struct ipr_inquiry_page3));
7564
7565 LEAVE;
7566 return IPR_RC_JOB_RETURN;
7567}
7568
7569/**
7570 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7571 * @ipr_cmd: ipr command struct
7572 *
7573 * This function sends a Page 0 inquiry to the adapter
7574 * to retrieve supported inquiry pages.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7578 **/
7579static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7580{
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582 char type[5];
7583
7584 ENTER;
7585
7586 /* Grab the type out of the VPD and store it away */
7587 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7588 type[4] = '\0';
7589 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7590
62275040 7591 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7592
62275040
BK
7593 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7594 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7595 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7596
7597 LEAVE;
7598 return IPR_RC_JOB_RETURN;
7599}
7600
7601/**
7602 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7603 * @ipr_cmd: ipr command struct
7604 *
7605 * This function sends a standard inquiry to the adapter.
7606 *
7607 * Return value:
7608 * IPR_RC_JOB_RETURN
7609 **/
7610static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7611{
7612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7613
7614 ENTER;
62275040 7615 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7616
7617 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7618 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7619 sizeof(struct ipr_ioa_vpd));
7620
7621 LEAVE;
7622 return IPR_RC_JOB_RETURN;
7623}
7624
7625/**
214777ba 7626 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7627 * @ipr_cmd: ipr command struct
7628 *
7629 * This function send an Identify Host Request Response Queue
7630 * command to establish the HRRQ with the adapter.
7631 *
7632 * Return value:
7633 * IPR_RC_JOB_RETURN
7634 **/
214777ba 7635static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7636{
7637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7638 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7639 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7640
7641 ENTER;
05a6538a 7642 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7643 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7644
56d6aa33 7645 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7646 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7647
05a6538a 7648 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7649 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7650
05a6538a 7651 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7652 if (ioa_cfg->sis64)
7653 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7654
05a6538a 7655 if (ioa_cfg->nvectors == 1)
7656 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7657 else
7658 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7659
7660 ioarcb->cmd_pkt.cdb[2] =
7661 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7662 ioarcb->cmd_pkt.cdb[3] =
7663 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7664 ioarcb->cmd_pkt.cdb[4] =
7665 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7666 ioarcb->cmd_pkt.cdb[5] =
7667 ((u64) hrrq->host_rrq_dma) & 0xff;
7668 ioarcb->cmd_pkt.cdb[7] =
7669 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7670 ioarcb->cmd_pkt.cdb[8] =
7671 (sizeof(u32) * hrrq->size) & 0xff;
7672
7673 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7674 ioarcb->cmd_pkt.cdb[9] =
7675 ioa_cfg->identify_hrrq_index;
1da177e4 7676
05a6538a 7677 if (ioa_cfg->sis64) {
7678 ioarcb->cmd_pkt.cdb[10] =
7679 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7680 ioarcb->cmd_pkt.cdb[11] =
7681 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7682 ioarcb->cmd_pkt.cdb[12] =
7683 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7684 ioarcb->cmd_pkt.cdb[13] =
7685 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7686 }
7687
7688 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7689 ioarcb->cmd_pkt.cdb[14] =
7690 ioa_cfg->identify_hrrq_index;
05a6538a 7691
7692 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7693 IPR_INTERNAL_TIMEOUT);
7694
56d6aa33 7695 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7696 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7697
7698 LEAVE;
7699 return IPR_RC_JOB_RETURN;
05a6538a 7700 }
7701
1da177e4 7702 LEAVE;
05a6538a 7703 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7704}
7705
7706/**
7707 * ipr_reset_timer_done - Adapter reset timer function
7708 * @ipr_cmd: ipr command struct
7709 *
7710 * Description: This function is used in adapter reset processing
7711 * for timing events. If the reset_cmd pointer in the IOA
7712 * config struct is not this adapter's we are doing nested
7713 * resets and fail_all_ops will take care of freeing the
7714 * command block.
7715 *
7716 * Return value:
7717 * none
7718 **/
7719static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7720{
7721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7722 unsigned long lock_flags = 0;
7723
7724 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7725
7726 if (ioa_cfg->reset_cmd == ipr_cmd) {
7727 list_del(&ipr_cmd->queue);
7728 ipr_cmd->done(ipr_cmd);
7729 }
7730
7731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7732}
7733
7734/**
7735 * ipr_reset_start_timer - Start a timer for adapter reset job
7736 * @ipr_cmd: ipr command struct
7737 * @timeout: timeout value
7738 *
7739 * Description: This function is used in adapter reset processing
7740 * for timing events. If the reset_cmd pointer in the IOA
7741 * config struct is not this adapter's we are doing nested
7742 * resets and fail_all_ops will take care of freeing the
7743 * command block.
7744 *
7745 * Return value:
7746 * none
7747 **/
7748static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7749 unsigned long timeout)
7750{
05a6538a 7751
7752 ENTER;
7753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7754 ipr_cmd->done = ipr_reset_ioa_job;
7755
7756 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7757 ipr_cmd->timer.expires = jiffies + timeout;
7758 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7759 add_timer(&ipr_cmd->timer);
7760}
7761
7762/**
7763 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7764 * @ioa_cfg: ioa cfg struct
7765 *
7766 * Return value:
7767 * nothing
7768 **/
7769static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7770{
05a6538a 7771 struct ipr_hrr_queue *hrrq;
1da177e4 7772
05a6538a 7773 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7774 spin_lock(&hrrq->_lock);
05a6538a 7775 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7776
7777 /* Initialize Host RRQ pointers */
7778 hrrq->hrrq_start = hrrq->host_rrq;
7779 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7780 hrrq->hrrq_curr = hrrq->hrrq_start;
7781 hrrq->toggle_bit = 1;
56d6aa33 7782 spin_unlock(&hrrq->_lock);
05a6538a 7783 }
56d6aa33 7784 wmb();
05a6538a 7785
56d6aa33 7786 ioa_cfg->identify_hrrq_index = 0;
7787 if (ioa_cfg->hrrq_num == 1)
7788 atomic_set(&ioa_cfg->hrrq_index, 0);
7789 else
7790 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
7791
7792 /* Zero out config table */
3e7ebdfa 7793 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7794}
7795
214777ba
WB
7796/**
7797 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7798 * @ipr_cmd: ipr command struct
7799 *
7800 * Return value:
7801 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7802 **/
7803static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7804{
7805 unsigned long stage, stage_time;
7806 u32 feedback;
7807 volatile u32 int_reg;
7808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809 u64 maskval = 0;
7810
7811 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7812 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7813 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7814
7815 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7816
7817 /* sanity check the stage_time value */
438b0331
WB
7818 if (stage_time == 0)
7819 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7820 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7821 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7822 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7823 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7824
7825 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7826 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7827 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7828 stage_time = ioa_cfg->transop_timeout;
7829 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7830 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7832 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7833 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7834 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7835 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7836 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7837 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7838 return IPR_RC_JOB_CONTINUE;
7839 }
214777ba
WB
7840 }
7841
7842 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7843 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7844 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7845 ipr_cmd->done = ipr_reset_ioa_job;
7846 add_timer(&ipr_cmd->timer);
05a6538a 7847
7848 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7849
7850 return IPR_RC_JOB_RETURN;
7851}
7852
1da177e4
LT
7853/**
7854 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7855 * @ipr_cmd: ipr command struct
7856 *
7857 * This function reinitializes some control blocks and
7858 * enables destructive diagnostics on the adapter.
7859 *
7860 * Return value:
7861 * IPR_RC_JOB_RETURN
7862 **/
7863static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7864{
7865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7866 volatile u32 int_reg;
7be96900 7867 volatile u64 maskval;
56d6aa33 7868 int i;
1da177e4
LT
7869
7870 ENTER;
214777ba 7871 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7872 ipr_init_ioa_mem(ioa_cfg);
7873
56d6aa33 7874 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7875 spin_lock(&ioa_cfg->hrrq[i]._lock);
7876 ioa_cfg->hrrq[i].allow_interrupts = 1;
7877 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7878 }
7879 wmb();
8701f185
WB
7880 if (ioa_cfg->sis64) {
7881 /* Set the adapter to the correct endian mode. */
7882 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7883 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7884 }
7885
7be96900 7886 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7887
7888 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7889 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7890 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7891 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7892 return IPR_RC_JOB_CONTINUE;
7893 }
7894
7895 /* Enable destructive diagnostics on IOA */
214777ba
WB
7896 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7897
7be96900
WB
7898 if (ioa_cfg->sis64) {
7899 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7900 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7901 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7902 } else
7903 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7904
1da177e4
LT
7905 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7906
7907 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7908
214777ba
WB
7909 if (ioa_cfg->sis64) {
7910 ipr_cmd->job_step = ipr_reset_next_stage;
7911 return IPR_RC_JOB_CONTINUE;
7912 }
7913
1da177e4 7914 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7915 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7916 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7917 ipr_cmd->done = ipr_reset_ioa_job;
7918 add_timer(&ipr_cmd->timer);
05a6538a 7919 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7920
7921 LEAVE;
7922 return IPR_RC_JOB_RETURN;
7923}
7924
7925/**
7926 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7927 * @ipr_cmd: ipr command struct
7928 *
7929 * This function is invoked when an adapter dump has run out
7930 * of processing time.
7931 *
7932 * Return value:
7933 * IPR_RC_JOB_CONTINUE
7934 **/
7935static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7936{
7937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7938
7939 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7940 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7941 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7942 ioa_cfg->sdt_state = ABORT_DUMP;
7943
4c647e90 7944 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7945 ipr_cmd->job_step = ipr_reset_alert;
7946
7947 return IPR_RC_JOB_CONTINUE;
7948}
7949
7950/**
7951 * ipr_unit_check_no_data - Log a unit check/no data error log
7952 * @ioa_cfg: ioa config struct
7953 *
7954 * Logs an error indicating the adapter unit checked, but for some
7955 * reason, we were unable to fetch the unit check buffer.
7956 *
7957 * Return value:
7958 * nothing
7959 **/
7960static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7961{
7962 ioa_cfg->errors_logged++;
7963 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7964}
7965
7966/**
7967 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7968 * @ioa_cfg: ioa config struct
7969 *
7970 * Fetches the unit check buffer from the adapter by clocking the data
7971 * through the mailbox register.
7972 *
7973 * Return value:
7974 * nothing
7975 **/
7976static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7977{
7978 unsigned long mailbox;
7979 struct ipr_hostrcb *hostrcb;
7980 struct ipr_uc_sdt sdt;
7981 int rc, length;
65f56475 7982 u32 ioasc;
1da177e4
LT
7983
7984 mailbox = readl(ioa_cfg->ioa_mailbox);
7985
dcbad00e 7986 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7987 ipr_unit_check_no_data(ioa_cfg);
7988 return;
7989 }
7990
7991 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7992 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7993 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7994
dcbad00e
WB
7995 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7996 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7997 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7998 ipr_unit_check_no_data(ioa_cfg);
7999 return;
8000 }
8001
8002 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8003 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8004 length = be32_to_cpu(sdt.entry[0].end_token);
8005 else
8006 length = (be32_to_cpu(sdt.entry[0].end_token) -
8007 be32_to_cpu(sdt.entry[0].start_token)) &
8008 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8009
8010 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8011 struct ipr_hostrcb, queue);
8012 list_del(&hostrcb->queue);
8013 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8014
8015 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8016 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8017 (__be32 *)&hostrcb->hcam,
8018 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8019
65f56475 8020 if (!rc) {
1da177e4 8021 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8022 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8023 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8024 ioa_cfg->sdt_state == GET_DUMP)
8025 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8026 } else
1da177e4
LT
8027 ipr_unit_check_no_data(ioa_cfg);
8028
8029 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8030}
8031
110def85
WB
8032/**
8033 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8034 * @ipr_cmd: ipr command struct
8035 *
8036 * Description: This function will call to get the unit check buffer.
8037 *
8038 * Return value:
8039 * IPR_RC_JOB_RETURN
8040 **/
8041static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8042{
8043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8044
8045 ENTER;
8046 ioa_cfg->ioa_unit_checked = 0;
8047 ipr_get_unit_check_buffer(ioa_cfg);
8048 ipr_cmd->job_step = ipr_reset_alert;
8049 ipr_reset_start_timer(ipr_cmd, 0);
8050
8051 LEAVE;
8052 return IPR_RC_JOB_RETURN;
8053}
8054
1da177e4
LT
8055/**
8056 * ipr_reset_restore_cfg_space - Restore PCI config space.
8057 * @ipr_cmd: ipr command struct
8058 *
8059 * Description: This function restores the saved PCI config space of
8060 * the adapter, fails all outstanding ops back to the callers, and
8061 * fetches the dump/unit check if applicable to this reset.
8062 *
8063 * Return value:
8064 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8065 **/
8066static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8067{
8068 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8069 u32 int_reg;
1da177e4
LT
8070
8071 ENTER;
99c965dd 8072 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8073 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8074
8075 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8076 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8077 return IPR_RC_JOB_CONTINUE;
8078 }
8079
8080 ipr_fail_all_ops(ioa_cfg);
8081
8701f185
WB
8082 if (ioa_cfg->sis64) {
8083 /* Set the adapter to the correct endian mode. */
8084 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8085 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8086 }
8087
1da177e4 8088 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8089 if (ioa_cfg->sis64) {
8090 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8091 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8092 return IPR_RC_JOB_RETURN;
8093 } else {
8094 ioa_cfg->ioa_unit_checked = 0;
8095 ipr_get_unit_check_buffer(ioa_cfg);
8096 ipr_cmd->job_step = ipr_reset_alert;
8097 ipr_reset_start_timer(ipr_cmd, 0);
8098 return IPR_RC_JOB_RETURN;
8099 }
1da177e4
LT
8100 }
8101
8102 if (ioa_cfg->in_ioa_bringdown) {
8103 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8104 } else {
8105 ipr_cmd->job_step = ipr_reset_enable_ioa;
8106
8107 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8108 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8109 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8110 if (ioa_cfg->sis64)
8111 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8112 else
8113 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8114 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8115 schedule_work(&ioa_cfg->work_q);
8116 return IPR_RC_JOB_RETURN;
8117 }
8118 }
8119
438b0331 8120 LEAVE;
1da177e4
LT
8121 return IPR_RC_JOB_CONTINUE;
8122}
8123
e619e1a7
BK
8124/**
8125 * ipr_reset_bist_done - BIST has completed on the adapter.
8126 * @ipr_cmd: ipr command struct
8127 *
8128 * Description: Unblock config space and resume the reset process.
8129 *
8130 * Return value:
8131 * IPR_RC_JOB_CONTINUE
8132 **/
8133static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8134{
fb51ccbf
JK
8135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8136
e619e1a7 8137 ENTER;
fb51ccbf
JK
8138 if (ioa_cfg->cfg_locked)
8139 pci_cfg_access_unlock(ioa_cfg->pdev);
8140 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8141 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8142 LEAVE;
8143 return IPR_RC_JOB_CONTINUE;
8144}
8145
1da177e4
LT
8146/**
8147 * ipr_reset_start_bist - Run BIST on the adapter.
8148 * @ipr_cmd: ipr command struct
8149 *
8150 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8151 *
8152 * Return value:
8153 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8154 **/
8155static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8156{
8157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8158 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8159
8160 ENTER;
cb237ef7
WB
8161 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8162 writel(IPR_UPROCI_SIS64_START_BIST,
8163 ioa_cfg->regs.set_uproc_interrupt_reg32);
8164 else
8165 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8166
8167 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8168 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8169 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8170 rc = IPR_RC_JOB_RETURN;
cb237ef7 8171 } else {
fb51ccbf
JK
8172 if (ioa_cfg->cfg_locked)
8173 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8174 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8175 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8176 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8177 }
8178
8179 LEAVE;
8180 return rc;
8181}
8182
463fc696
BK
8183/**
8184 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8185 * @ipr_cmd: ipr command struct
8186 *
8187 * Description: This clears PCI reset to the adapter and delays two seconds.
8188 *
8189 * Return value:
8190 * IPR_RC_JOB_RETURN
8191 **/
8192static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8193{
8194 ENTER;
8195 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8196 ipr_cmd->job_step = ipr_reset_bist_done;
8197 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8198 LEAVE;
8199 return IPR_RC_JOB_RETURN;
8200}
8201
8202/**
8203 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8204 * @ipr_cmd: ipr command struct
8205 *
8206 * Description: This asserts PCI reset to the adapter.
8207 *
8208 * Return value:
8209 * IPR_RC_JOB_RETURN
8210 **/
8211static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8212{
8213 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8214 struct pci_dev *pdev = ioa_cfg->pdev;
8215
8216 ENTER;
463fc696
BK
8217 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8218 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8219 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8220 LEAVE;
8221 return IPR_RC_JOB_RETURN;
8222}
8223
fb51ccbf
JK
8224/**
8225 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8226 * @ipr_cmd: ipr command struct
8227 *
8228 * Description: This attempts to block config access to the IOA.
8229 *
8230 * Return value:
8231 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8232 **/
8233static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8234{
8235 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8236 int rc = IPR_RC_JOB_CONTINUE;
8237
8238 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8239 ioa_cfg->cfg_locked = 1;
8240 ipr_cmd->job_step = ioa_cfg->reset;
8241 } else {
8242 if (ipr_cmd->u.time_left) {
8243 rc = IPR_RC_JOB_RETURN;
8244 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8245 ipr_reset_start_timer(ipr_cmd,
8246 IPR_CHECK_FOR_RESET_TIMEOUT);
8247 } else {
8248 ipr_cmd->job_step = ioa_cfg->reset;
8249 dev_err(&ioa_cfg->pdev->dev,
8250 "Timed out waiting to lock config access. Resetting anyway.\n");
8251 }
8252 }
8253
8254 return rc;
8255}
8256
8257/**
8258 * ipr_reset_block_config_access - Block config access to the IOA
8259 * @ipr_cmd: ipr command struct
8260 *
8261 * Description: This attempts to block config access to the IOA
8262 *
8263 * Return value:
8264 * IPR_RC_JOB_CONTINUE
8265 **/
8266static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8267{
8268 ipr_cmd->ioa_cfg->cfg_locked = 0;
8269 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8270 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8271 return IPR_RC_JOB_CONTINUE;
8272}
8273
1da177e4
LT
8274/**
8275 * ipr_reset_allowed - Query whether or not IOA can be reset
8276 * @ioa_cfg: ioa config struct
8277 *
8278 * Return value:
8279 * 0 if reset not allowed / non-zero if reset is allowed
8280 **/
8281static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8282{
8283 volatile u32 temp_reg;
8284
8285 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8286 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8287}
8288
8289/**
8290 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8291 * @ipr_cmd: ipr command struct
8292 *
8293 * Description: This function waits for adapter permission to run BIST,
8294 * then runs BIST. If the adapter does not give permission after a
8295 * reasonable time, we will reset the adapter anyway. The impact of
8296 * resetting the adapter without warning the adapter is the risk of
8297 * losing the persistent error log on the adapter. If the adapter is
8298 * reset while it is writing to the flash on the adapter, the flash
8299 * segment will have bad ECC and be zeroed.
8300 *
8301 * Return value:
8302 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8303 **/
8304static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8305{
8306 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8307 int rc = IPR_RC_JOB_RETURN;
8308
8309 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8310 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8311 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8312 } else {
fb51ccbf 8313 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8314 rc = IPR_RC_JOB_CONTINUE;
8315 }
8316
8317 return rc;
8318}
8319
8320/**
8701f185 8321 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8322 * @ipr_cmd: ipr command struct
8323 *
8324 * Description: This function alerts the adapter that it will be reset.
8325 * If memory space is not currently enabled, proceed directly
8326 * to running BIST on the adapter. The timer must always be started
8327 * so we guarantee we do not run BIST from ipr_isr.
8328 *
8329 * Return value:
8330 * IPR_RC_JOB_RETURN
8331 **/
8332static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8333{
8334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8335 u16 cmd_reg;
8336 int rc;
8337
8338 ENTER;
8339 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8340
8341 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8342 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8343 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8344 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8345 } else {
fb51ccbf 8346 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8347 }
8348
8349 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8350 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8351
8352 LEAVE;
8353 return IPR_RC_JOB_RETURN;
8354}
8355
8356/**
8357 * ipr_reset_ucode_download_done - Microcode download completion
8358 * @ipr_cmd: ipr command struct
8359 *
8360 * Description: This function unmaps the microcode download buffer.
8361 *
8362 * Return value:
8363 * IPR_RC_JOB_CONTINUE
8364 **/
8365static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8366{
8367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8369
8370 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8371 sglist->num_sg, DMA_TO_DEVICE);
8372
8373 ipr_cmd->job_step = ipr_reset_alert;
8374 return IPR_RC_JOB_CONTINUE;
8375}
8376
8377/**
8378 * ipr_reset_ucode_download - Download microcode to the adapter
8379 * @ipr_cmd: ipr command struct
8380 *
8381 * Description: This function checks to see if it there is microcode
8382 * to download to the adapter. If there is, a download is performed.
8383 *
8384 * Return value:
8385 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8386 **/
8387static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8388{
8389 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8390 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8391
8392 ENTER;
8393 ipr_cmd->job_step = ipr_reset_alert;
8394
8395 if (!sglist)
8396 return IPR_RC_JOB_CONTINUE;
8397
8398 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8399 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8400 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8401 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8402 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8403 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8404 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8405
a32c055f
WB
8406 if (ioa_cfg->sis64)
8407 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8408 else
8409 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8410 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8411
8412 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8413 IPR_WRITE_BUFFER_TIMEOUT);
8414
8415 LEAVE;
8416 return IPR_RC_JOB_RETURN;
8417}
8418
8419/**
8420 * ipr_reset_shutdown_ioa - Shutdown the adapter
8421 * @ipr_cmd: ipr command struct
8422 *
8423 * Description: This function issues an adapter shutdown of the
8424 * specified type to the specified adapter as part of the
8425 * adapter reset job.
8426 *
8427 * Return value:
8428 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8429 **/
8430static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8431{
8432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8433 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8434 unsigned long timeout;
8435 int rc = IPR_RC_JOB_CONTINUE;
8436
8437 ENTER;
56d6aa33 8438 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8439 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8440 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8441 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8442 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8443 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8444
ac09c349
BK
8445 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8446 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8447 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8448 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8449 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8450 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8451 else
ac09c349 8452 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8453
8454 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8455
8456 rc = IPR_RC_JOB_RETURN;
8457 ipr_cmd->job_step = ipr_reset_ucode_download;
8458 } else
8459 ipr_cmd->job_step = ipr_reset_alert;
8460
8461 LEAVE;
8462 return rc;
8463}
8464
8465/**
8466 * ipr_reset_ioa_job - Adapter reset job
8467 * @ipr_cmd: ipr command struct
8468 *
8469 * Description: This function is the job router for the adapter reset job.
8470 *
8471 * Return value:
8472 * none
8473 **/
8474static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8475{
8476 u32 rc, ioasc;
1da177e4
LT
8477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8478
8479 do {
96d21f00 8480 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8481
8482 if (ioa_cfg->reset_cmd != ipr_cmd) {
8483 /*
8484 * We are doing nested adapter resets and this is
8485 * not the current reset job.
8486 */
05a6538a 8487 list_add_tail(&ipr_cmd->queue,
8488 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8489 return;
8490 }
8491
8492 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8493 rc = ipr_cmd->job_step_failed(ipr_cmd);
8494 if (rc == IPR_RC_JOB_RETURN)
8495 return;
1da177e4
LT
8496 }
8497
8498 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8499 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8500 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8501 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8502}
8503
8504/**
8505 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8506 * @ioa_cfg: ioa config struct
8507 * @job_step: first job step of reset job
8508 * @shutdown_type: shutdown type
8509 *
8510 * Description: This function will initiate the reset of the given adapter
8511 * starting at the selected job step.
8512 * If the caller needs to wait on the completion of the reset,
8513 * the caller must sleep on the reset_wait_q.
8514 *
8515 * Return value:
8516 * none
8517 **/
8518static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8519 int (*job_step) (struct ipr_cmnd *),
8520 enum ipr_shutdown_type shutdown_type)
8521{
8522 struct ipr_cmnd *ipr_cmd;
56d6aa33 8523 int i;
1da177e4
LT
8524
8525 ioa_cfg->in_reset_reload = 1;
56d6aa33 8526 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8527 spin_lock(&ioa_cfg->hrrq[i]._lock);
8528 ioa_cfg->hrrq[i].allow_cmds = 0;
8529 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8530 }
8531 wmb();
1da177e4
LT
8532 scsi_block_requests(ioa_cfg->host);
8533
8534 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8535 ioa_cfg->reset_cmd = ipr_cmd;
8536 ipr_cmd->job_step = job_step;
8537 ipr_cmd->u.shutdown_type = shutdown_type;
8538
8539 ipr_reset_ioa_job(ipr_cmd);
8540}
8541
8542/**
8543 * ipr_initiate_ioa_reset - Initiate an adapter reset
8544 * @ioa_cfg: ioa config struct
8545 * @shutdown_type: shutdown type
8546 *
8547 * Description: This function will initiate the reset of the given adapter.
8548 * If the caller needs to wait on the completion of the reset,
8549 * the caller must sleep on the reset_wait_q.
8550 *
8551 * Return value:
8552 * none
8553 **/
8554static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8555 enum ipr_shutdown_type shutdown_type)
8556{
56d6aa33 8557 int i;
8558
8559 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8560 return;
8561
41e9a696
BK
8562 if (ioa_cfg->in_reset_reload) {
8563 if (ioa_cfg->sdt_state == GET_DUMP)
8564 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8565 else if (ioa_cfg->sdt_state == READ_DUMP)
8566 ioa_cfg->sdt_state = ABORT_DUMP;
8567 }
1da177e4
LT
8568
8569 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8570 dev_err(&ioa_cfg->pdev->dev,
8571 "IOA taken offline - error recovery failed\n");
8572
8573 ioa_cfg->reset_retries = 0;
56d6aa33 8574 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8575 spin_lock(&ioa_cfg->hrrq[i]._lock);
8576 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8577 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8578 }
8579 wmb();
1da177e4
LT
8580
8581 if (ioa_cfg->in_ioa_bringdown) {
8582 ioa_cfg->reset_cmd = NULL;
8583 ioa_cfg->in_reset_reload = 0;
8584 ipr_fail_all_ops(ioa_cfg);
8585 wake_up_all(&ioa_cfg->reset_wait_q);
8586
8587 spin_unlock_irq(ioa_cfg->host->host_lock);
8588 scsi_unblock_requests(ioa_cfg->host);
8589 spin_lock_irq(ioa_cfg->host->host_lock);
8590 return;
8591 } else {
8592 ioa_cfg->in_ioa_bringdown = 1;
8593 shutdown_type = IPR_SHUTDOWN_NONE;
8594 }
8595 }
8596
8597 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8598 shutdown_type);
8599}
8600
f8a88b19
LV
8601/**
8602 * ipr_reset_freeze - Hold off all I/O activity
8603 * @ipr_cmd: ipr command struct
8604 *
8605 * Description: If the PCI slot is frozen, hold off all I/O
8606 * activity; then, as soon as the slot is available again,
8607 * initiate an adapter reset.
8608 */
8609static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8610{
56d6aa33 8611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8612 int i;
8613
f8a88b19 8614 /* Disallow new interrupts, avoid loop */
56d6aa33 8615 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8616 spin_lock(&ioa_cfg->hrrq[i]._lock);
8617 ioa_cfg->hrrq[i].allow_interrupts = 0;
8618 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8619 }
8620 wmb();
05a6538a 8621 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8622 ipr_cmd->done = ipr_reset_ioa_job;
8623 return IPR_RC_JOB_RETURN;
8624}
8625
8626/**
8627 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8628 * @pdev: PCI device struct
8629 *
8630 * Description: This routine is called to tell us that the PCI bus
8631 * is down. Can't do anything here, except put the device driver
8632 * into a holding pattern, waiting for the PCI bus to come back.
8633 */
8634static void ipr_pci_frozen(struct pci_dev *pdev)
8635{
8636 unsigned long flags = 0;
8637 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8638
8639 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8640 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8642}
8643
8644/**
8645 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8646 * @pdev: PCI device struct
8647 *
8648 * Description: This routine is called by the pci error recovery
8649 * code after the PCI slot has been reset, just before we
8650 * should resume normal operations.
8651 */
8652static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8653{
8654 unsigned long flags = 0;
8655 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8656
8657 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
8658 if (ioa_cfg->needs_warm_reset)
8659 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8660 else
8661 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8662 IPR_SHUTDOWN_NONE);
f8a88b19
LV
8663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8664 return PCI_ERS_RESULT_RECOVERED;
8665}
8666
8667/**
8668 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8669 * @pdev: PCI device struct
8670 *
8671 * Description: This routine is called when the PCI bus has
8672 * permanently failed.
8673 */
8674static void ipr_pci_perm_failure(struct pci_dev *pdev)
8675{
8676 unsigned long flags = 0;
8677 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 8678 int i;
f8a88b19
LV
8679
8680 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8681 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8682 ioa_cfg->sdt_state = ABORT_DUMP;
8683 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8684 ioa_cfg->in_ioa_bringdown = 1;
56d6aa33 8685 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8686 spin_lock(&ioa_cfg->hrrq[i]._lock);
8687 ioa_cfg->hrrq[i].allow_cmds = 0;
8688 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8689 }
8690 wmb();
f8a88b19
LV
8691 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8693}
8694
8695/**
8696 * ipr_pci_error_detected - Called when a PCI error is detected.
8697 * @pdev: PCI device struct
8698 * @state: PCI channel state
8699 *
8700 * Description: Called when a PCI error is detected.
8701 *
8702 * Return value:
8703 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8704 */
8705static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8706 pci_channel_state_t state)
8707{
8708 switch (state) {
8709 case pci_channel_io_frozen:
8710 ipr_pci_frozen(pdev);
8711 return PCI_ERS_RESULT_NEED_RESET;
8712 case pci_channel_io_perm_failure:
8713 ipr_pci_perm_failure(pdev);
8714 return PCI_ERS_RESULT_DISCONNECT;
8715 break;
8716 default:
8717 break;
8718 }
8719 return PCI_ERS_RESULT_NEED_RESET;
8720}
8721
1da177e4
LT
8722/**
8723 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8724 * @ioa_cfg: ioa cfg struct
8725 *
8726 * Description: This is the second phase of adapter intialization
8727 * This function takes care of initilizing the adapter to the point
8728 * where it can accept new commands.
8729
8730 * Return value:
b1c11812 8731 * 0 on success / -EIO on failure
1da177e4 8732 **/
6f039790 8733static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8734{
8735 int rc = 0;
8736 unsigned long host_lock_flags = 0;
8737
8738 ENTER;
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8740 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
8741 if (ioa_cfg->needs_hard_reset) {
8742 ioa_cfg->needs_hard_reset = 0;
8743 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8744 } else
8745 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8746 IPR_SHUTDOWN_NONE);
1da177e4
LT
8747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8748 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8749 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8750
56d6aa33 8751 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8752 rc = -EIO;
8753 } else if (ipr_invalid_adapter(ioa_cfg)) {
8754 if (!ipr_testmode)
8755 rc = -EIO;
8756
8757 dev_err(&ioa_cfg->pdev->dev,
8758 "Adapter not supported in this hardware configuration.\n");
8759 }
8760
8761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8762
8763 LEAVE;
8764 return rc;
8765}
8766
8767/**
8768 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8769 * @ioa_cfg: ioa config struct
8770 *
8771 * Return value:
8772 * none
8773 **/
8774static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8775{
8776 int i;
8777
8778 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8779 if (ioa_cfg->ipr_cmnd_list[i])
8780 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8781 ioa_cfg->ipr_cmnd_list[i],
8782 ioa_cfg->ipr_cmnd_list_dma[i]);
8783
8784 ioa_cfg->ipr_cmnd_list[i] = NULL;
8785 }
8786
8787 if (ioa_cfg->ipr_cmd_pool)
203fa3fe 8788 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8789
89aad428
BK
8790 kfree(ioa_cfg->ipr_cmnd_list);
8791 kfree(ioa_cfg->ipr_cmnd_list_dma);
8792 ioa_cfg->ipr_cmnd_list = NULL;
8793 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8794 ioa_cfg->ipr_cmd_pool = NULL;
8795}
8796
8797/**
8798 * ipr_free_mem - Frees memory allocated for an adapter
8799 * @ioa_cfg: ioa cfg struct
8800 *
8801 * Return value:
8802 * nothing
8803 **/
8804static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8805{
8806 int i;
8807
8808 kfree(ioa_cfg->res_entries);
8809 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8810 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8811 ipr_free_cmd_blks(ioa_cfg);
05a6538a 8812
8813 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8814 pci_free_consistent(ioa_cfg->pdev,
8815 sizeof(u32) * ioa_cfg->hrrq[i].size,
8816 ioa_cfg->hrrq[i].host_rrq,
8817 ioa_cfg->hrrq[i].host_rrq_dma);
8818
3e7ebdfa
WB
8819 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8820 ioa_cfg->u.cfg_table,
1da177e4
LT
8821 ioa_cfg->cfg_table_dma);
8822
8823 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8824 pci_free_consistent(ioa_cfg->pdev,
8825 sizeof(struct ipr_hostrcb),
8826 ioa_cfg->hostrcb[i],
8827 ioa_cfg->hostrcb_dma[i]);
8828 }
8829
8830 ipr_free_dump(ioa_cfg);
1da177e4
LT
8831 kfree(ioa_cfg->trace);
8832}
8833
8834/**
8835 * ipr_free_all_resources - Free all allocated resources for an adapter.
8836 * @ipr_cmd: ipr command struct
8837 *
8838 * This function frees all allocated resources for the
8839 * specified adapter.
8840 *
8841 * Return value:
8842 * none
8843 **/
8844static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8845{
8846 struct pci_dev *pdev = ioa_cfg->pdev;
8847
8848 ENTER;
05a6538a 8849 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8850 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8851 int i;
8852 for (i = 0; i < ioa_cfg->nvectors; i++)
8853 free_irq(ioa_cfg->vectors_info[i].vec,
8854 &ioa_cfg->hrrq[i]);
8855 } else
8856 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8857
56d6aa33 8858 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 8859 pci_disable_msi(pdev);
56d6aa33 8860 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8861 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 8862 pci_disable_msix(pdev);
56d6aa33 8863 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8864 }
05a6538a 8865
1da177e4
LT
8866 iounmap(ioa_cfg->hdw_dma_regs);
8867 pci_release_regions(pdev);
8868 ipr_free_mem(ioa_cfg);
8869 scsi_host_put(ioa_cfg->host);
8870 pci_disable_device(pdev);
8871 LEAVE;
8872}
8873
8874/**
8875 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8876 * @ioa_cfg: ioa config struct
8877 *
8878 * Return value:
8879 * 0 on success / -ENOMEM on allocation failure
8880 **/
6f039790 8881static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8882{
8883 struct ipr_cmnd *ipr_cmd;
8884 struct ipr_ioarcb *ioarcb;
8885 dma_addr_t dma_addr;
05a6538a 8886 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 8887
203fa3fe
KSS
8888 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8889 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
8890
8891 if (!ioa_cfg->ipr_cmd_pool)
8892 return -ENOMEM;
8893
89aad428
BK
8894 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8895 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8896
8897 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8898 ipr_free_cmd_blks(ioa_cfg);
8899 return -ENOMEM;
8900 }
8901
05a6538a 8902 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8903 if (ioa_cfg->hrrq_num > 1) {
8904 if (i == 0) {
8905 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8906 ioa_cfg->hrrq[i].min_cmd_id = 0;
8907 ioa_cfg->hrrq[i].max_cmd_id =
8908 (entries_each_hrrq - 1);
8909 } else {
8910 entries_each_hrrq =
8911 IPR_NUM_BASE_CMD_BLKS/
8912 (ioa_cfg->hrrq_num - 1);
8913 ioa_cfg->hrrq[i].min_cmd_id =
8914 IPR_NUM_INTERNAL_CMD_BLKS +
8915 (i - 1) * entries_each_hrrq;
8916 ioa_cfg->hrrq[i].max_cmd_id =
8917 (IPR_NUM_INTERNAL_CMD_BLKS +
8918 i * entries_each_hrrq - 1);
8919 }
8920 } else {
8921 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8922 ioa_cfg->hrrq[i].min_cmd_id = 0;
8923 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8924 }
8925 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8926 }
8927
8928 BUG_ON(ioa_cfg->hrrq_num == 0);
8929
8930 i = IPR_NUM_CMD_BLKS -
8931 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8932 if (i > 0) {
8933 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8934 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8935 }
8936
1da177e4 8937 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
203fa3fe 8938 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8939
8940 if (!ipr_cmd) {
8941 ipr_free_cmd_blks(ioa_cfg);
8942 return -ENOMEM;
8943 }
8944
8945 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8946 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8947 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8948
8949 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8950 ipr_cmd->dma_addr = dma_addr;
8951 if (ioa_cfg->sis64)
8952 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8953 else
8954 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8955
1da177e4 8956 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8957 if (ioa_cfg->sis64) {
8958 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8959 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8960 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8961 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8962 } else {
8963 ioarcb->write_ioadl_addr =
8964 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8965 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8966 ioarcb->ioasa_host_pci_addr =
96d21f00 8967 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8968 }
1da177e4
LT
8969 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8970 ipr_cmd->cmd_index = i;
8971 ipr_cmd->ioa_cfg = ioa_cfg;
8972 ipr_cmd->sense_buffer_dma = dma_addr +
8973 offsetof(struct ipr_cmnd, sense_buffer);
8974
05a6538a 8975 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8976 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8977 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8978 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8979 hrrq_id++;
1da177e4
LT
8980 }
8981
8982 return 0;
8983}
8984
8985/**
8986 * ipr_alloc_mem - Allocate memory for an adapter
8987 * @ioa_cfg: ioa config struct
8988 *
8989 * Return value:
8990 * 0 on success / non-zero for error
8991 **/
6f039790 8992static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8993{
8994 struct pci_dev *pdev = ioa_cfg->pdev;
8995 int i, rc = -ENOMEM;
8996
8997 ENTER;
0bc42e35 8998 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8999 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9000
9001 if (!ioa_cfg->res_entries)
9002 goto out;
9003
3e7ebdfa
WB
9004 if (ioa_cfg->sis64) {
9005 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
9006 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
9007 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
9008 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
9009 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
9010 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
a2e49cb2
BK
9011
9012 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
9013 || !ioa_cfg->vset_ids)
9014 goto out_free_res_entries;
3e7ebdfa
WB
9015 }
9016
9017 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9018 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9019 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9020 }
1da177e4
LT
9021
9022 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9023 sizeof(struct ipr_misc_cbs),
9024 &ioa_cfg->vpd_cbs_dma);
9025
9026 if (!ioa_cfg->vpd_cbs)
9027 goto out_free_res_entries;
9028
05a6538a 9029 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9030 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9031 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
56d6aa33 9032 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9033 if (i == 0)
9034 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9035 else
9036 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
05a6538a 9037 }
9038
1da177e4
LT
9039 if (ipr_alloc_cmd_blks(ioa_cfg))
9040 goto out_free_vpd_cbs;
9041
05a6538a 9042 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9043 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9044 sizeof(u32) * ioa_cfg->hrrq[i].size,
9045 &ioa_cfg->hrrq[i].host_rrq_dma);
9046
9047 if (!ioa_cfg->hrrq[i].host_rrq) {
9048 while (--i > 0)
9049 pci_free_consistent(pdev,
9050 sizeof(u32) * ioa_cfg->hrrq[i].size,
9051 ioa_cfg->hrrq[i].host_rrq,
9052 ioa_cfg->hrrq[i].host_rrq_dma);
9053 goto out_ipr_free_cmd_blocks;
9054 }
9055 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9056 }
1da177e4 9057
3e7ebdfa
WB
9058 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9059 ioa_cfg->cfg_table_size,
9060 &ioa_cfg->cfg_table_dma);
1da177e4 9061
3e7ebdfa 9062 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9063 goto out_free_host_rrq;
9064
9065 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9066 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9067 sizeof(struct ipr_hostrcb),
9068 &ioa_cfg->hostrcb_dma[i]);
9069
9070 if (!ioa_cfg->hostrcb[i])
9071 goto out_free_hostrcb_dma;
9072
9073 ioa_cfg->hostrcb[i]->hostrcb_dma =
9074 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9075 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9076 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9077 }
9078
0bc42e35 9079 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9080 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9081
9082 if (!ioa_cfg->trace)
9083 goto out_free_hostrcb_dma;
9084
1da177e4
LT
9085 rc = 0;
9086out:
9087 LEAVE;
9088 return rc;
9089
9090out_free_hostrcb_dma:
9091 while (i-- > 0) {
9092 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9093 ioa_cfg->hostrcb[i],
9094 ioa_cfg->hostrcb_dma[i]);
9095 }
3e7ebdfa
WB
9096 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9097 ioa_cfg->u.cfg_table,
9098 ioa_cfg->cfg_table_dma);
1da177e4 9099out_free_host_rrq:
05a6538a 9100 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9101 pci_free_consistent(pdev,
9102 sizeof(u32) * ioa_cfg->hrrq[i].size,
9103 ioa_cfg->hrrq[i].host_rrq,
9104 ioa_cfg->hrrq[i].host_rrq_dma);
9105 }
1da177e4
LT
9106out_ipr_free_cmd_blocks:
9107 ipr_free_cmd_blks(ioa_cfg);
9108out_free_vpd_cbs:
9109 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9110 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9111out_free_res_entries:
9112 kfree(ioa_cfg->res_entries);
a2e49cb2
BK
9113 kfree(ioa_cfg->target_ids);
9114 kfree(ioa_cfg->array_ids);
9115 kfree(ioa_cfg->vset_ids);
1da177e4
LT
9116 goto out;
9117}
9118
9119/**
9120 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9121 * @ioa_cfg: ioa config struct
9122 *
9123 * Return value:
9124 * none
9125 **/
6f039790 9126static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9127{
9128 int i;
9129
9130 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9131 ioa_cfg->bus_attr[i].bus = i;
9132 ioa_cfg->bus_attr[i].qas_enabled = 0;
9133 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9134 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9135 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9136 else
9137 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9138 }
9139}
9140
9141/**
9142 * ipr_init_ioa_cfg - Initialize IOA config struct
9143 * @ioa_cfg: ioa config struct
9144 * @host: scsi host struct
9145 * @pdev: PCI dev struct
9146 *
9147 * Return value:
9148 * none
9149 **/
6f039790
GKH
9150static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9151 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4
LT
9152{
9153 const struct ipr_interrupt_offsets *p;
9154 struct ipr_interrupts *t;
9155 void __iomem *base;
9156
9157 ioa_cfg->host = host;
9158 ioa_cfg->pdev = pdev;
9159 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9160 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9161 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9162 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9163 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9164 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9165 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9166 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9167
1da177e4
LT
9168 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9169 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9170 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9171 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9172 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9173 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9174 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
9175 ioa_cfg->sdt_state = INACTIVE;
9176
9177 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9178 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9179
3e7ebdfa
WB
9180 if (ioa_cfg->sis64) {
9181 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9182 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9183 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9184 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9185 } else {
9186 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9187 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9188 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9189 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9190 }
1da177e4
LT
9191 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9192 host->unique_id = host->host_no;
9193 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9194 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9195 pci_set_drvdata(pdev, ioa_cfg);
9196
9197 p = &ioa_cfg->chip_cfg->regs;
9198 t = &ioa_cfg->regs;
9199 base = ioa_cfg->hdw_dma_regs;
9200
9201 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9202 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 9203 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 9204 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 9205 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 9206 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 9207 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 9208 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 9209 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
9210 t->ioarrin_reg = base + p->ioarrin_reg;
9211 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 9212 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 9213 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 9214 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 9215 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 9216 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
9217
9218 if (ioa_cfg->sis64) {
214777ba 9219 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
9220 t->dump_addr_reg = base + p->dump_addr_reg;
9221 t->dump_data_reg = base + p->dump_data_reg;
8701f185 9222 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 9223 }
1da177e4
LT
9224}
9225
9226/**
1be7bd82 9227 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9228 * @dev_id: PCI device id struct
9229 *
9230 * Return value:
1be7bd82 9231 * ptr to chip information on success / NULL on failure
1da177e4 9232 **/
6f039790 9233static const struct ipr_chip_t *
1be7bd82 9234ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9235{
9236 int i;
9237
1da177e4
LT
9238 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9239 if (ipr_chip[i].vendor == dev_id->vendor &&
9240 ipr_chip[i].device == dev_id->device)
1be7bd82 9241 return &ipr_chip[i];
1da177e4
LT
9242 return NULL;
9243}
9244
05a6538a 9245static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9246{
9247 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9248 int i, err, vectors;
9249
9250 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9251 entries[i].entry = i;
9252
9253 vectors = ipr_number_of_msix;
9254
9255 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9256 vectors = err;
9257
9258 if (err < 0) {
9259 pci_disable_msix(ioa_cfg->pdev);
9260 return err;
9261 }
9262
9263 if (!err) {
9264 for (i = 0; i < vectors; i++)
9265 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9266 ioa_cfg->nvectors = vectors;
9267 }
9268
9269 return err;
9270}
9271
9272static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9273{
9274 int i, err, vectors;
9275
9276 vectors = ipr_number_of_msix;
9277
9278 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9279 vectors = err;
9280
9281 if (err < 0) {
9282 pci_disable_msi(ioa_cfg->pdev);
9283 return err;
9284 }
9285
9286 if (!err) {
9287 for (i = 0; i < vectors; i++)
9288 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9289 ioa_cfg->nvectors = vectors;
9290 }
9291
9292 return err;
9293}
9294
9295static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9296{
9297 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9298
9299 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9300 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9301 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9302 ioa_cfg->vectors_info[vec_idx].
9303 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9304 }
9305}
9306
9307static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9308{
9309 int i, rc;
9310
9311 for (i = 1; i < ioa_cfg->nvectors; i++) {
9312 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9313 ipr_isr_mhrrq,
9314 0,
9315 ioa_cfg->vectors_info[i].desc,
9316 &ioa_cfg->hrrq[i]);
9317 if (rc) {
9318 while (--i >= 0)
9319 free_irq(ioa_cfg->vectors_info[i].vec,
9320 &ioa_cfg->hrrq[i]);
9321 return rc;
9322 }
9323 }
9324 return 0;
9325}
9326
95fecd90
WB
9327/**
9328 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9329 * @pdev: PCI device struct
9330 *
9331 * Description: Simply set the msi_received flag to 1 indicating that
9332 * Message Signaled Interrupts are supported.
9333 *
9334 * Return value:
9335 * 0 on success / non-zero on failure
9336 **/
6f039790 9337static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9338{
9339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9340 unsigned long lock_flags = 0;
9341 irqreturn_t rc = IRQ_HANDLED;
9342
05a6538a 9343 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9345
9346 ioa_cfg->msi_received = 1;
9347 wake_up(&ioa_cfg->msi_wait_q);
9348
9349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9350 return rc;
9351}
9352
9353/**
9354 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9355 * @pdev: PCI device struct
9356 *
9357 * Description: The return value from pci_enable_msi() can not always be
9358 * trusted. This routine sets up and initiates a test interrupt to determine
9359 * if the interrupt is received via the ipr_test_intr() service routine.
9360 * If the tests fails, the driver will fall back to LSI.
9361 *
9362 * Return value:
9363 * 0 on success / non-zero on failure
9364 **/
6f039790 9365static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9366{
9367 int rc;
9368 volatile u32 int_reg;
9369 unsigned long lock_flags = 0;
9370
9371 ENTER;
9372
9373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9374 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9375 ioa_cfg->msi_received = 0;
9376 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9377 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9378 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9379 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9380
9381 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9382 if (rc) {
9383 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9384 return rc;
9385 } else if (ipr_debug)
9386 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9387
214777ba 9388 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9389 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9390 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9393
95fecd90
WB
9394 if (!ioa_cfg->msi_received) {
9395 /* MSI test failed */
9396 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9397 rc = -EOPNOTSUPP;
9398 } else if (ipr_debug)
9399 dev_info(&pdev->dev, "MSI test succeeded.\n");
9400
9401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9402
9403 free_irq(pdev->irq, ioa_cfg);
9404
9405 LEAVE;
9406
9407 return rc;
9408}
9409
05a6538a 9410 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9411 * @pdev: PCI device struct
9412 * @dev_id: PCI device id struct
9413 *
9414 * Return value:
9415 * 0 on success / non-zero on failure
9416 **/
6f039790
GKH
9417static int ipr_probe_ioa(struct pci_dev *pdev,
9418 const struct pci_device_id *dev_id)
1da177e4
LT
9419{
9420 struct ipr_ioa_cfg *ioa_cfg;
9421 struct Scsi_Host *host;
9422 unsigned long ipr_regs_pci;
9423 void __iomem *ipr_regs;
a2a65a3e 9424 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9425 volatile u32 mask, uproc, interrupts;
56d6aa33 9426 unsigned long lock_flags;
1da177e4
LT
9427
9428 ENTER;
9429
9430 if ((rc = pci_enable_device(pdev))) {
9431 dev_err(&pdev->dev, "Cannot enable adapter\n");
9432 goto out;
9433 }
9434
9435 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9436
9437 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9438
9439 if (!host) {
9440 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9441 rc = -ENOMEM;
9442 goto out_disable;
9443 }
9444
9445 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9446 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9447 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9448
1be7bd82 9449 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9450
1be7bd82 9451 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9452 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9453 dev_id->vendor, dev_id->device);
9454 goto out_scsi_host_put;
9455 }
9456
a32c055f
WB
9457 /* set SIS 32 or SIS 64 */
9458 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9459 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9460 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9461 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9462
5469cb5b
BK
9463 if (ipr_transop_timeout)
9464 ioa_cfg->transop_timeout = ipr_transop_timeout;
9465 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9466 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9467 else
9468 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9469
44c10138 9470 ioa_cfg->revid = pdev->revision;
463fc696 9471
1da177e4
LT
9472 ipr_regs_pci = pci_resource_start(pdev, 0);
9473
9474 rc = pci_request_regions(pdev, IPR_NAME);
9475 if (rc < 0) {
9476 dev_err(&pdev->dev,
9477 "Couldn't register memory range of registers\n");
9478 goto out_scsi_host_put;
9479 }
9480
25729a7f 9481 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9482
9483 if (!ipr_regs) {
9484 dev_err(&pdev->dev,
9485 "Couldn't map memory range of registers\n");
9486 rc = -ENOMEM;
9487 goto out_release_regions;
9488 }
9489
9490 ioa_cfg->hdw_dma_regs = ipr_regs;
9491 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9492 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9493
9494 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9495
9496 pci_set_master(pdev);
9497
a32c055f
WB
9498 if (ioa_cfg->sis64) {
9499 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9500 if (rc < 0) {
9501 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9502 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9503 }
9504
9505 } else
9506 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9507
1da177e4
LT
9508 if (rc < 0) {
9509 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9510 goto cleanup_nomem;
9511 }
9512
9513 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9514 ioa_cfg->chip_cfg->cache_line_size);
9515
9516 if (rc != PCIBIOS_SUCCESSFUL) {
9517 dev_err(&pdev->dev, "Write of cache line size failed\n");
9518 rc = -EIO;
9519 goto cleanup_nomem;
9520 }
9521
05a6538a 9522 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9523 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9524 IPR_MAX_MSIX_VECTORS);
9525 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9526 }
9527
9528 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9529 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9530 ioa_cfg->intr_flag = IPR_USE_MSIX;
9531 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9532 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9533 ioa_cfg->intr_flag = IPR_USE_MSI;
9534 else {
9535 ioa_cfg->intr_flag = IPR_USE_LSI;
9536 ioa_cfg->nvectors = 1;
9537 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9538 }
9539
9540 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9541 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9542 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9543 if (rc == -EOPNOTSUPP) {
9544 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9545 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9546 pci_disable_msi(pdev);
9547 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9548 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9549 pci_disable_msix(pdev);
9550 }
9551
9552 ioa_cfg->intr_flag = IPR_USE_LSI;
9553 ioa_cfg->nvectors = 1;
9554 }
95fecd90
WB
9555 else if (rc)
9556 goto out_msi_disable;
05a6538a 9557 else {
9558 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9559 dev_info(&pdev->dev,
9560 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9561 ioa_cfg->nvectors, pdev->irq);
9562 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9563 dev_info(&pdev->dev,
9564 "Request for %d MSIXs succeeded.",
9565 ioa_cfg->nvectors);
9566 }
9567 }
9568
9569 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9570 (unsigned int)num_online_cpus(),
9571 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9572
1da177e4
LT
9573 /* Save away PCI config space for use following IOA reset */
9574 rc = pci_save_state(pdev);
9575
9576 if (rc != PCIBIOS_SUCCESSFUL) {
9577 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9578 rc = -EIO;
f170c684 9579 goto out_msi_disable;
1da177e4
LT
9580 }
9581
9582 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9583 goto out_msi_disable;
1da177e4
LT
9584
9585 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9586 goto out_msi_disable;
1da177e4 9587
3e7ebdfa
WB
9588 if (ioa_cfg->sis64)
9589 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9590 + ((sizeof(struct ipr_config_table_entry64)
9591 * ioa_cfg->max_devs_supported)));
9592 else
9593 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9594 + ((sizeof(struct ipr_config_table_entry)
9595 * ioa_cfg->max_devs_supported)));
9596
1da177e4
LT
9597 rc = ipr_alloc_mem(ioa_cfg);
9598 if (rc < 0) {
9599 dev_err(&pdev->dev,
9600 "Couldn't allocate enough memory for device driver!\n");
f170c684 9601 goto out_msi_disable;
1da177e4
LT
9602 }
9603
ce155cce
BK
9604 /*
9605 * If HRRQ updated interrupt is not masked, or reset alert is set,
9606 * the card is in an unknown state and needs a hard reset
9607 */
214777ba
WB
9608 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9609 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9610 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9611 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9612 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9613 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9614 ioa_cfg->needs_hard_reset = 1;
9615 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9616 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9617
56d6aa33 9618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9619 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 9620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9621
05a6538a 9622 if (ioa_cfg->intr_flag == IPR_USE_MSI
9623 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9624 name_msi_vectors(ioa_cfg);
9625 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9626 0,
9627 ioa_cfg->vectors_info[0].desc,
9628 &ioa_cfg->hrrq[0]);
9629 if (!rc)
9630 rc = ipr_request_other_msi_irqs(ioa_cfg);
9631 } else {
9632 rc = request_irq(pdev->irq, ipr_isr,
9633 IRQF_SHARED,
9634 IPR_NAME, &ioa_cfg->hrrq[0]);
9635 }
1da177e4
LT
9636 if (rc) {
9637 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9638 pdev->irq, rc);
9639 goto cleanup_nolog;
9640 }
9641
463fc696
BK
9642 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9643 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9644 ioa_cfg->needs_warm_reset = 1;
9645 ioa_cfg->reset = ipr_reset_slot_reset;
9646 } else
9647 ioa_cfg->reset = ipr_reset_start_bist;
9648
1da177e4
LT
9649 spin_lock(&ipr_driver_lock);
9650 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9651 spin_unlock(&ipr_driver_lock);
9652
9653 LEAVE;
9654out:
9655 return rc;
9656
9657cleanup_nolog:
9658 ipr_free_mem(ioa_cfg);
95fecd90 9659out_msi_disable:
05a6538a 9660 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9661 pci_disable_msi(pdev);
9662 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9663 pci_disable_msix(pdev);
f170c684
JL
9664cleanup_nomem:
9665 iounmap(ipr_regs);
1da177e4
LT
9666out_release_regions:
9667 pci_release_regions(pdev);
9668out_scsi_host_put:
9669 scsi_host_put(host);
9670out_disable:
9671 pci_disable_device(pdev);
9672 goto out;
9673}
9674
9675/**
9676 * ipr_scan_vsets - Scans for VSET devices
9677 * @ioa_cfg: ioa config struct
9678 *
9679 * Description: Since the VSET resources do not follow SAM in that we can have
9680 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9681 *
9682 * Return value:
9683 * none
9684 **/
9685static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9686{
9687 int target, lun;
9688
9689 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
203fa3fe 9690 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
1da177e4
LT
9691 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9692}
9693
9694/**
9695 * ipr_initiate_ioa_bringdown - Bring down an adapter
9696 * @ioa_cfg: ioa config struct
9697 * @shutdown_type: shutdown type
9698 *
9699 * Description: This function will initiate bringing down the adapter.
9700 * This consists of issuing an IOA shutdown to the adapter
9701 * to flush the cache, and running BIST.
9702 * If the caller needs to wait on the completion of the reset,
9703 * the caller must sleep on the reset_wait_q.
9704 *
9705 * Return value:
9706 * none
9707 **/
9708static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9709 enum ipr_shutdown_type shutdown_type)
9710{
9711 ENTER;
9712 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9713 ioa_cfg->sdt_state = ABORT_DUMP;
9714 ioa_cfg->reset_retries = 0;
9715 ioa_cfg->in_ioa_bringdown = 1;
9716 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9717 LEAVE;
9718}
9719
9720/**
9721 * __ipr_remove - Remove a single adapter
9722 * @pdev: pci device struct
9723 *
9724 * Adapter hot plug remove entry point.
9725 *
9726 * Return value:
9727 * none
9728 **/
9729static void __ipr_remove(struct pci_dev *pdev)
9730{
9731 unsigned long host_lock_flags = 0;
9732 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9733 ENTER;
9734
9735 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9736 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9738 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9740 }
9741
1da177e4
LT
9742 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9743
9744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9745 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9746 flush_work(&ioa_cfg->work_q);
1da177e4
LT
9747 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9748
9749 spin_lock(&ipr_driver_lock);
9750 list_del(&ioa_cfg->queue);
9751 spin_unlock(&ipr_driver_lock);
9752
9753 if (ioa_cfg->sdt_state == ABORT_DUMP)
9754 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9756
9757 ipr_free_all_resources(ioa_cfg);
9758
9759 LEAVE;
9760}
9761
9762/**
9763 * ipr_remove - IOA hot plug remove entry point
9764 * @pdev: pci device struct
9765 *
9766 * Adapter hot plug remove entry point.
9767 *
9768 * Return value:
9769 * none
9770 **/
6f039790 9771static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9772{
9773 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9774
9775 ENTER;
9776
ee959b00 9777 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9778 &ipr_trace_attr);
ee959b00 9779 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9780 &ipr_dump_attr);
9781 scsi_remove_host(ioa_cfg->host);
9782
9783 __ipr_remove(pdev);
9784
9785 LEAVE;
9786}
9787
9788/**
9789 * ipr_probe - Adapter hot plug add entry point
9790 *
9791 * Return value:
9792 * 0 on success / non-zero on failure
9793 **/
6f039790 9794static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9795{
9796 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 9797 int rc, i;
1da177e4
LT
9798
9799 rc = ipr_probe_ioa(pdev, dev_id);
9800
9801 if (rc)
9802 return rc;
9803
9804 ioa_cfg = pci_get_drvdata(pdev);
9805 rc = ipr_probe_ioa_part2(ioa_cfg);
9806
9807 if (rc) {
9808 __ipr_remove(pdev);
9809 return rc;
9810 }
9811
9812 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9813
9814 if (rc) {
9815 __ipr_remove(pdev);
9816 return rc;
9817 }
9818
ee959b00 9819 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9820 &ipr_trace_attr);
9821
9822 if (rc) {
9823 scsi_remove_host(ioa_cfg->host);
9824 __ipr_remove(pdev);
9825 return rc;
9826 }
9827
ee959b00 9828 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9829 &ipr_dump_attr);
9830
9831 if (rc) {
ee959b00 9832 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9833 &ipr_trace_attr);
9834 scsi_remove_host(ioa_cfg->host);
9835 __ipr_remove(pdev);
9836 return rc;
9837 }
9838
9839 scsi_scan_host(ioa_cfg->host);
9840 ipr_scan_vsets(ioa_cfg);
9841 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9842 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 9843 ioa_cfg->host->max_channel = IPR_VSET_BUS;
b53d124a 9844 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9845
9846 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9847 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9848 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9849 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9850 ioa_cfg->iopoll_weight, ipr_iopoll);
9851 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9852 }
9853 }
9854
1da177e4
LT
9855 schedule_work(&ioa_cfg->work_q);
9856 return 0;
9857}
9858
9859/**
9860 * ipr_shutdown - Shutdown handler.
d18c3db5 9861 * @pdev: pci device struct
1da177e4
LT
9862 *
9863 * This function is invoked upon system shutdown/reboot. It will issue
9864 * an adapter shutdown to the adapter to flush the write cache.
9865 *
9866 * Return value:
9867 * none
9868 **/
d18c3db5 9869static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9870{
d18c3db5 9871 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 9872 unsigned long lock_flags = 0;
b53d124a 9873 int i;
1da177e4
LT
9874
9875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
b53d124a 9876 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9877 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9878 ioa_cfg->iopoll_weight = 0;
9879 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9880 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9881 }
9882
203fa3fe 9883 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9885 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9887 }
9888
1da177e4
LT
9889 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9891 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9892}
9893
6f039790 9894static struct pci_device_id ipr_pci_table[] = {
1da177e4 9895 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9896 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9897 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9898 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9899 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9900 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9901 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9902 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9903 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9904 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9905 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9906 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9907 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9908 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9909 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9910 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9911 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9912 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9913 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9914 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9915 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9916 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9917 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9918 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9919 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9920 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9921 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9922 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9923 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9924 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9925 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9926 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9927 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 9928 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
9929 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9930 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
9931 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9932 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
9933 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9934 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 9935 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 9936 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 9937 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 9938 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 9939 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 9940 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 9941 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 9942 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9943 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9944 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9945 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9946 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9947 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
9948 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9949 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9950 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9951 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9952 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9953 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 9954 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9955 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
9956 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9957 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
9958 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9959 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 9960 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9961 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 9962 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9963 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 9964 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9965 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
9966 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9967 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9968 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9969 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 9970 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9971 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9972 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9973 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9974 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9975 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9976 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9977 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
1da177e4
LT
9978 { }
9979};
9980MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9981
a55b2d21 9982static const struct pci_error_handlers ipr_err_handler = {
f8a88b19
LV
9983 .error_detected = ipr_pci_error_detected,
9984 .slot_reset = ipr_pci_slot_reset,
9985};
9986
1da177e4
LT
9987static struct pci_driver ipr_driver = {
9988 .name = IPR_NAME,
9989 .id_table = ipr_pci_table,
9990 .probe = ipr_probe,
6f039790 9991 .remove = ipr_remove,
d18c3db5 9992 .shutdown = ipr_shutdown,
f8a88b19 9993 .err_handler = &ipr_err_handler,
1da177e4
LT
9994};
9995
f72919ec
WB
9996/**
9997 * ipr_halt_done - Shutdown prepare completion
9998 *
9999 * Return value:
10000 * none
10001 **/
10002static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10003{
05a6538a 10004 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10005}
10006
10007/**
10008 * ipr_halt - Issue shutdown prepare to all adapters
10009 *
10010 * Return value:
10011 * NOTIFY_OK on success / NOTIFY_DONE on failure
10012 **/
10013static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10014{
10015 struct ipr_cmnd *ipr_cmd;
10016 struct ipr_ioa_cfg *ioa_cfg;
10017 unsigned long flags = 0;
10018
10019 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10020 return NOTIFY_DONE;
10021
10022 spin_lock(&ipr_driver_lock);
10023
10024 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10025 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
56d6aa33 10026 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
f72919ec
WB
10027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10028 continue;
10029 }
10030
10031 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10032 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10033 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10034 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10035 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10036
10037 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10039 }
10040 spin_unlock(&ipr_driver_lock);
10041
10042 return NOTIFY_OK;
10043}
10044
10045static struct notifier_block ipr_notifier = {
10046 ipr_halt, NULL, 0
10047};
10048
1da177e4
LT
10049/**
10050 * ipr_init - Module entry point
10051 *
10052 * Return value:
10053 * 0 on success / negative value on failure
10054 **/
10055static int __init ipr_init(void)
10056{
10057 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10058 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10059
f72919ec 10060 register_reboot_notifier(&ipr_notifier);
dcbccbde 10061 return pci_register_driver(&ipr_driver);
1da177e4
LT
10062}
10063
10064/**
10065 * ipr_exit - Module unload
10066 *
10067 * Module unload entry point.
10068 *
10069 * Return value:
10070 * none
10071 **/
10072static void __exit ipr_exit(void)
10073{
f72919ec 10074 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10075 pci_unregister_driver(&ipr_driver);
10076}
10077
10078module_init(ipr_init);
10079module_exit(ipr_exit);