]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
scsi: storvsc: Set the tablesize based on the information given by the host
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
1da177e4
LT
102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 107 .mailbox = 0x0042C,
89aad428 108 .max_cmds = 100,
1da177e4 109 .cache_line_size = 0x20,
7dd21308 110 .clear_isr = 1,
b53d124a 111 .iopoll_weight = 0,
1da177e4
LT
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
214777ba 115 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 116 .sense_interrupt_mask_reg = 0x0022C,
214777ba 117 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 118 .clr_interrupt_reg = 0x00228,
214777ba 119 .clr_interrupt_reg32 = 0x00228,
1da177e4 120 .sense_interrupt_reg = 0x00224,
214777ba 121 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
214777ba 124 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 125 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
89aad428 133 .max_cmds = 100,
1da177e4 134 .cache_line_size = 0x20,
7dd21308 135 .clear_isr = 1,
b53d124a 136 .iopoll_weight = 0,
1da177e4
LT
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
214777ba 140 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 141 .sense_interrupt_mask_reg = 0x00288,
214777ba 142 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 143 .clr_interrupt_reg = 0x00284,
214777ba 144 .clr_interrupt_reg32 = 0x00284,
1da177e4 145 .sense_interrupt_reg = 0x00280,
214777ba 146 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
214777ba 149 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 150 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
154 }
155 },
a74c1639 156 { /* CRoC */
110def85 157 .mailbox = 0x00044,
89aad428 158 .max_cmds = 1000,
a74c1639 159 .cache_line_size = 0x20,
7dd21308 160 .clear_isr = 0,
b53d124a 161 .iopoll_weight = 64,
a74c1639
WB
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
214777ba 165 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 166 .sense_interrupt_mask_reg = 0x00010,
214777ba 167 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 168 .clr_interrupt_reg = 0x00008,
214777ba 169 .clr_interrupt_reg32 = 0x0000C,
a74c1639 170 .sense_interrupt_reg = 0x00000,
214777ba 171 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
214777ba 174 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 175 .set_uproc_interrupt_reg = 0x00020,
214777ba 176 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 177 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
dcbad00e 180 .dump_addr_reg = 0x00064,
8701f185
WB
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
a74c1639
WB
183 }
184 },
1da177e4
LT
185};
186
187static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
197};
198
203fa3fe 199static int ipr_max_bus_speeds[] = {
1da177e4
LT
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
6634ff7c 223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
1da177e4
LT
224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
1da177e4
LT
227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
933916f3 230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
933916f3 238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 239 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 241 "4101: Soft device bus fabric error"},
5aa3a333
WB
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF9: Device sector reassign successful"},
933916f3 258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "7001: IOA sector reassignment successful"},
933916f3 262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFF6: Device hardware error recovered by the IOA"},
933916f3 270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FFF6: Device hardware error recovered by the device"},
933916f3 272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFFA: Undefined device response recovered by the IOA"},
933916f3 276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "FFF6: Device bus error, message or command phase"},
933916f3 278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 279 "FFFE: Task Management Function failed"},
933916f3 280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "FFF6: Failure prediction threshold exceeded"},
933916f3 282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 283 "8009: Impending cache battery pack failure"},
ed7bd661 284 {0x02040100, 0, 0,
285 "Logical Unit in process of becoming ready"},
286 {0x02040200, 0, 0,
287 "Initializing command required"},
1da177e4
LT
288 {0x02040400, 0, 0,
289 "34FF: Disk device format in progress"},
ed7bd661 290 {0x02040C00, 0, 0,
291 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
1da177e4
LT
294 {0x023F0000, 0, 0,
295 "Synchronization required"},
ed7bd661 296 {0x02408500, 0, 0,
297 "IOA microcode download required"},
298 {0x02408600, 0, 0,
299 "Device bus connection is prohibited by host"},
1da177e4
LT
300 {0x024E0000, 0, 0,
301 "No ready, IOA shutdown"},
302 {0x025A0000, 0, 0,
303 "Not ready, IOA has been shutdown"},
933916f3 304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
305 "3020: Storage subsystem configuration error"},
306 {0x03110B00, 0, 0,
307 "FFF5: Medium error, data unreadable, recommend reassign"},
308 {0x03110C00, 0, 0,
309 "7000: Medium error, data unreadable, do not reassign"},
933916f3 310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 311 "FFF3: Disk media format bad"},
933916f3 312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 313 "3002: Addressed device failed to respond to selection"},
933916f3 314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "3100: Device bus error"},
933916f3 316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
317 "3109: IOA timed out a device command"},
318 {0x04088000, 0, 0,
319 "3120: SCSI bus is not operational"},
933916f3 320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 321 "4100: Hard device bus fabric error"},
5aa3a333
WB
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
933916f3 336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "9000: IOA reserved area data check"},
933916f3 338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "9001: IOA reserved area invalid data pattern"},
933916f3 340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "9002: IOA reserved area LRC error"},
5aa3a333
WB
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
933916f3 344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "102E: Out of alternate sectors for disk storage"},
933916f3 346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 347 "FFF4: Data transfer underlength error"},
933916f3 348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "FFF4: Data transfer overlength error"},
933916f3 350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "3400: Logical unit failure"},
933916f3 352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "FFF4: Device microcode is corrupt"},
933916f3 354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
355 "8150: PCI bus error"},
356 {0x04430000, 1, 0,
357 "Unsupported device bus message received"},
933916f3 358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "FFF4: Disk device problem"},
933916f3 360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 361 "8150: Permanent IOA failure"},
933916f3 362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "3010: Disk device returned wrong response to IOA"},
933916f3 364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
365 "8151: IOA microcode error"},
366 {0x04448500, 0, 0,
367 "Device bus status error"},
933916f3 368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 369 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
370 {0x04448700, 0, 0,
371 "ATA device status error"},
1da177e4
LT
372 {0x04490000, 0, 0,
373 "Message reject received from the device"},
933916f3 374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 375 "8008: A permanent cache battery pack failure occurred"},
933916f3 376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 377 "9090: Disk unit has been modified after the last known status"},
933916f3 378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 379 "9081: IOA detected device error"},
933916f3 380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 381 "9082: IOA detected device error"},
933916f3 382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 383 "3110: Device bus error, message or command phase"},
933916f3 384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 385 "3110: SAS Command / Task Management Function failed"},
933916f3 386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 387 "9091: Incorrect hardware configuration change has been detected"},
933916f3 388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 389 "9073: Invalid multi-adapter configuration"},
933916f3 390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 391 "4010: Incorrect connection between cascaded expanders"},
933916f3 392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 393 "4020: Connections exceed IOA design limits"},
933916f3 394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 395 "4030: Incorrect multipath connection"},
933916f3 396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 397 "4110: Unsupported enclosure function"},
ed7bd661 398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
933916f3 400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
401 "FFF4: Command to logical unit failed"},
402 {0x05240000, 1, 0,
403 "Illegal request, invalid request type or request packet"},
404 {0x05250000, 0, 0,
405 "Illegal request, invalid resource handle"},
b0df54bb
BK
406 {0x05258000, 0, 0,
407 "Illegal request, commands not allowed to this device"},
408 {0x05258100, 0, 0,
409 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
410 {0x05258200, 0, 0,
411 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
412 {0x05260000, 0, 0,
413 "Illegal request, invalid field in parameter list"},
414 {0x05260100, 0, 0,
415 "Illegal request, parameter not supported"},
416 {0x05260200, 0, 0,
417 "Illegal request, parameter value invalid"},
418 {0x052C0000, 0, 0,
419 "Illegal request, command sequence error"},
b0df54bb
BK
420 {0x052C8000, 1, 0,
421 "Illegal request, dual adapter support not enabled"},
ed7bd661 422 {0x052C8100, 1, 0,
423 "Illegal request, another cable connector was physically disabled"},
424 {0x054E8000, 1, 0,
425 "Illegal request, inconsistent group id/group count"},
933916f3 426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 427 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 429 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
933916f3 434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 435 "3140: Device bus not ready to ready transition"},
933916f3 436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
437 "FFFB: SCSI bus was reset"},
438 {0x06290500, 0, 0,
439 "FFFE: SCSI bus transition to single ended"},
440 {0x06290600, 0, 0,
441 "FFFE: SCSI bus transition to LVD"},
933916f3 442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 443 "FFFB: SCSI bus was reset by another initiator"},
933916f3 444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 445 "3029: A device replacement has occurred"},
ed7bd661 446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
933916f3 448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "9051: IOA cache data exists for a missing or failed device"},
933916f3 450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 453 "9025: Disk unit is not supported at its physical location"},
933916f3 454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "3020: IOA detected a SCSI bus configuration error"},
933916f3 456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 457 "3150: SCSI bus configuration error"},
933916f3 458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 459 "9074: Asymmetric advanced function disk configuration"},
933916f3 460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 461 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 463 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 465 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 467 "9076: Configuration error, missing remote IOA"},
933916f3 468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 469 "4050: Enclosure does not support a required multipath function"},
ed7bd661 470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
933916f3 480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9041: Array protection temporarily suspended"},
933916f3 482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9042: Corrupt array parity detected on specified device"},
933916f3 484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 487 "9071: Link operational transition"},
933916f3 488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 489 "9072: Link not operational transition"},
933916f3 490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9032: Array exposed but still protected"},
e435340c
BK
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
933916f3 494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 495 "4061: Multipath redundancy level got better"},
933916f3 496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 497 "4060: Multipath redundancy level got worse"},
1da177e4
LT
498 {0x07270000, 0, 0,
499 "Failure due to other device"},
933916f3 500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 501 "9008: IOA does not support functions expected by devices"},
933916f3 502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 503 "9010: Cache data associated with attached devices cannot be found"},
933916f3 504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 505 "9011: Cache data belongs to devices other than those attached"},
933916f3 506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 507 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 509 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 511 "9022: Exposed array is missing a required device"},
933916f3 512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 513 "9023: Array member(s) not at required physical locations"},
933916f3 514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 515 "9024: Array not functional due to present hardware configuration"},
933916f3 516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 517 "9026: Array not functional due to present hardware configuration"},
933916f3 518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 519 "9027: Array is missing a device and parity is out of sync"},
933916f3 520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 521 "9028: Maximum number of arrays already exist"},
933916f3 522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 523 "9050: Required cache data cannot be located for a disk unit"},
933916f3 524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 525 "9052: Cache data exists for a device that has been modified"},
933916f3 526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 527 "9054: IOA resources not available due to previous problems"},
933916f3 528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 529 "9092: Disk unit requires initialization before use"},
933916f3 530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 531 "9029: Incorrect hardware configuration change has been detected"},
933916f3 532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 533 "9060: One or more disk pairs are missing from an array"},
933916f3 534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 535 "9061: One or more disks are missing from an array"},
933916f3 536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 537 "9062: One or more disks are missing from an array"},
933916f3 538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 539 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 540 {0x07279A00, 0, 0,
541 "Data protect, other volume set problem"},
1da177e4
LT
542 {0x0B260000, 0, 0,
543 "Aborted command, invalid descriptor"},
ed7bd661 544 {0x0B3F9000, 0, 0,
545 "Target operating conditions have changed, dual adapter takeover"},
546 {0x0B530200, 0, 0,
547 "Aborted command, medium removal prevented"},
1da177e4 548 {0x0B5A0000, 0, 0,
ed7bd661 549 "Command terminated by host"},
550 {0x0B5B8000, 0, 0,
551 "Aborted command, command terminated by host"}
1da177e4
LT
552};
553
554static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568};
569
570/*
571 * Function Prototypes
572 */
573static int ipr_reset_alert(struct ipr_cmnd *);
574static void ipr_process_ccn(struct ipr_cmnd *);
575static void ipr_process_error(struct ipr_cmnd *);
576static void ipr_reset_ioa_job(struct ipr_cmnd *);
577static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
579
580#ifdef CONFIG_SCSI_IPR_TRACE
581/**
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
584 * @type: trace type
585 * @add_data: additional data
586 *
587 * Return value:
588 * none
589 **/
590static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
592{
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
56d6aa33 596 trace_entry = &ioa_cfg->trace[atomic_add_return
597 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
1da177e4
LT
598 trace_entry->time = jiffies;
599 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 trace_entry->type = type;
a32c055f
WB
601 if (ipr_cmd->ioa_cfg->sis64)
602 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 else
604 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 605 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
606 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 trace_entry->u.add_data = add_data;
56d6aa33 608 wmb();
1da177e4
LT
609}
610#else
203fa3fe 611#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
612#endif
613
172cd6e1
BK
614/**
615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
617 *
618 * Return value:
619 * none
620 **/
621static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622{
623 unsigned long lock_flags;
624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 ipr_cmd->done(ipr_cmd);
628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629}
630
1da177e4
LT
631/**
632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
634 *
635 * Return value:
636 * none
637 **/
638static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639{
640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
641 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 643 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 644 int hrrq_id;
1da177e4 645
05a6538a 646 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 647 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 648 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 649 ioarcb->data_transfer_length = 0;
1da177e4 650 ioarcb->read_data_transfer_length = 0;
a32c055f 651 ioarcb->ioadl_len = 0;
1da177e4 652 ioarcb->read_ioadl_len = 0;
a32c055f 653
96d21f00 654 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
655 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
657 ioasa64->u.gata.status = 0;
658 } else {
a32c055f
WB
659 ioarcb->write_ioadl_addr =
660 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 662 ioasa->u.gata.status = 0;
a32c055f
WB
663 }
664
96d21f00
WB
665 ioasa->hdr.ioasc = 0;
666 ioasa->hdr.residual_data_len = 0;
1da177e4 667 ipr_cmd->scsi_cmd = NULL;
35a39691 668 ipr_cmd->qc = NULL;
1da177e4
LT
669 ipr_cmd->sense_buffer[0] = 0;
670 ipr_cmd->dma_use_sg = 0;
671}
672
673/**
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
676 *
677 * Return value:
678 * none
679 **/
172cd6e1
BK
680static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
682{
683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL;
6cdb0817 686 ipr_cmd->eh_comp = NULL;
172cd6e1 687 ipr_cmd->fast_done = fast_done;
1da177e4
LT
688 init_timer(&ipr_cmd->timer);
689}
690
691/**
00bfef2c 692 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
693 * @ioa_cfg: ioa config struct
694 *
695 * Return value:
696 * pointer to ipr command struct
697 **/
698static
05a6538a 699struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 700{
05a6538a 701 struct ipr_cmnd *ipr_cmd = NULL;
702
703 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
704 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
705 struct ipr_cmnd, queue);
706 list_del(&ipr_cmd->queue);
707 }
1da177e4 708
1da177e4
LT
709
710 return ipr_cmd;
711}
712
00bfef2c
BK
713/**
714 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
715 * @ioa_cfg: ioa config struct
716 *
717 * Return value:
718 * pointer to ipr command struct
719 **/
720static
721struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
722{
05a6538a 723 struct ipr_cmnd *ipr_cmd =
724 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 725 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
726 return ipr_cmd;
727}
728
1da177e4
LT
729/**
730 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
731 * @ioa_cfg: ioa config struct
732 * @clr_ints: interrupts to clear
733 *
734 * This function masks all interrupts on the adapter, then clears the
735 * interrupts specified in the mask
736 *
737 * Return value:
738 * none
739 **/
740static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
741 u32 clr_ints)
742{
743 volatile u32 int_reg;
56d6aa33 744 int i;
1da177e4
LT
745
746 /* Stop new interrupts */
56d6aa33 747 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
748 spin_lock(&ioa_cfg->hrrq[i]._lock);
749 ioa_cfg->hrrq[i].allow_interrupts = 0;
750 spin_unlock(&ioa_cfg->hrrq[i]._lock);
751 }
752 wmb();
1da177e4
LT
753
754 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
755 if (ioa_cfg->sis64)
756 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
757 else
758 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
759
760 /* Clear any pending interrupts */
214777ba
WB
761 if (ioa_cfg->sis64)
762 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
763 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
764 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
765}
766
767/**
768 * ipr_save_pcix_cmd_reg - Save PCI-X command register
769 * @ioa_cfg: ioa config struct
770 *
771 * Return value:
772 * 0 on success / -EIO on failure
773 **/
774static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
775{
776 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
777
7dce0e1c
BK
778 if (pcix_cmd_reg == 0)
779 return 0;
1da177e4
LT
780
781 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
782 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
783 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
784 return -EIO;
785 }
786
787 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
788 return 0;
789}
790
791/**
792 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
793 * @ioa_cfg: ioa config struct
794 *
795 * Return value:
796 * 0 on success / -EIO on failure
797 **/
798static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
799{
800 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
801
802 if (pcix_cmd_reg) {
803 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
804 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
805 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
806 return -EIO;
807 }
1da177e4
LT
808 }
809
810 return 0;
811}
812
35a39691
BK
813/**
814 * ipr_sata_eh_done - done function for aborted SATA commands
815 * @ipr_cmd: ipr command struct
816 *
817 * This function is invoked for ops generated to SATA
818 * devices which are being aborted.
819 *
820 * Return value:
821 * none
822 **/
823static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
824{
35a39691
BK
825 struct ata_queued_cmd *qc = ipr_cmd->qc;
826 struct ipr_sata_port *sata_port = qc->ap->private_data;
827
828 qc->err_mask |= AC_ERR_OTHER;
829 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 830 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
831 ata_qc_complete(qc);
832}
833
1da177e4
LT
834/**
835 * ipr_scsi_eh_done - mid-layer done function for aborted ops
836 * @ipr_cmd: ipr command struct
837 *
838 * This function is invoked by the interrupt handler for
839 * ops generated by the SCSI mid-layer which are being aborted.
840 *
841 * Return value:
842 * none
843 **/
844static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
845{
1da177e4
LT
846 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
847
848 scsi_cmd->result |= (DID_ERROR << 16);
849
63015bc9 850 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 851 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
852 if (ipr_cmd->eh_comp)
853 complete(ipr_cmd->eh_comp);
05a6538a 854 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
855}
856
857/**
858 * ipr_fail_all_ops - Fails all outstanding ops.
859 * @ioa_cfg: ioa config struct
860 *
861 * This function fails all outstanding ops.
862 *
863 * Return value:
864 * none
865 **/
866static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
867{
868 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 869 struct ipr_hrr_queue *hrrq;
1da177e4
LT
870
871 ENTER;
05a6538a 872 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 873 spin_lock(&hrrq->_lock);
05a6538a 874 list_for_each_entry_safe(ipr_cmd,
875 temp, &hrrq->hrrq_pending_q, queue) {
876 list_del(&ipr_cmd->queue);
1da177e4 877
05a6538a 878 ipr_cmd->s.ioasa.hdr.ioasc =
879 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
880 ipr_cmd->s.ioasa.hdr.ilid =
881 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 882
05a6538a 883 if (ipr_cmd->scsi_cmd)
884 ipr_cmd->done = ipr_scsi_eh_done;
885 else if (ipr_cmd->qc)
886 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 887
05a6538a 888 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
889 IPR_IOASC_IOA_WAS_RESET);
890 del_timer(&ipr_cmd->timer);
891 ipr_cmd->done(ipr_cmd);
892 }
56d6aa33 893 spin_unlock(&hrrq->_lock);
1da177e4 894 }
1da177e4
LT
895 LEAVE;
896}
897
a32c055f
WB
898/**
899 * ipr_send_command - Send driver initiated requests.
900 * @ipr_cmd: ipr command struct
901 *
902 * This function sends a command to the adapter using the correct write call.
903 * In the case of sis64, calculate the ioarcb size required. Then or in the
904 * appropriate bits.
905 *
906 * Return value:
907 * none
908 **/
909static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
910{
911 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
912 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
913
914 if (ioa_cfg->sis64) {
915 /* The default size is 256 bytes */
916 send_dma_addr |= 0x1;
917
918 /* If the number of ioadls * size of ioadl > 128 bytes,
919 then use a 512 byte ioarcb */
920 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
921 send_dma_addr |= 0x4;
922 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
923 } else
924 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
925}
926
1da177e4
LT
927/**
928 * ipr_do_req - Send driver initiated requests.
929 * @ipr_cmd: ipr command struct
930 * @done: done function
931 * @timeout_func: timeout function
932 * @timeout: timeout value
933 *
934 * This function sends the specified command to the adapter with the
935 * timeout given. The done function is invoked on command completion.
936 *
937 * Return value:
938 * none
939 **/
940static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
941 void (*done) (struct ipr_cmnd *),
942 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
943{
05a6538a 944 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
945
946 ipr_cmd->done = done;
947
948 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
949 ipr_cmd->timer.expires = jiffies + timeout;
950 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
951
952 add_timer(&ipr_cmd->timer);
953
954 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
955
a32c055f 956 ipr_send_command(ipr_cmd);
1da177e4
LT
957}
958
959/**
960 * ipr_internal_cmd_done - Op done function for an internally generated op.
961 * @ipr_cmd: ipr command struct
962 *
963 * This function is the op done function for an internally generated,
964 * blocking op. It simply wakes the sleeping thread.
965 *
966 * Return value:
967 * none
968 **/
969static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
970{
971 if (ipr_cmd->sibling)
972 ipr_cmd->sibling = NULL;
973 else
974 complete(&ipr_cmd->completion);
975}
976
a32c055f
WB
977/**
978 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
979 * @ipr_cmd: ipr command struct
980 * @dma_addr: dma address
981 * @len: transfer length
982 * @flags: ioadl flag value
983 *
984 * This function initializes an ioadl in the case where there is only a single
985 * descriptor.
986 *
987 * Return value:
988 * nothing
989 **/
990static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
991 u32 len, int flags)
992{
993 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
994 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
995
996 ipr_cmd->dma_use_sg = 1;
997
998 if (ipr_cmd->ioa_cfg->sis64) {
999 ioadl64->flags = cpu_to_be32(flags);
1000 ioadl64->data_len = cpu_to_be32(len);
1001 ioadl64->address = cpu_to_be64(dma_addr);
1002
1003 ipr_cmd->ioarcb.ioadl_len =
1004 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1005 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1006 } else {
1007 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1008 ioadl->address = cpu_to_be32(dma_addr);
1009
1010 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1011 ipr_cmd->ioarcb.read_ioadl_len =
1012 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1013 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1014 } else {
1015 ipr_cmd->ioarcb.ioadl_len =
1016 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1017 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1018 }
1019 }
1020}
1021
1da177e4
LT
1022/**
1023 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1024 * @ipr_cmd: ipr command struct
1025 * @timeout_func: function to invoke if command times out
1026 * @timeout: timeout
1027 *
1028 * Return value:
1029 * none
1030 **/
1031static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1032 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1033 u32 timeout)
1034{
1035 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1036
1037 init_completion(&ipr_cmd->completion);
1038 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1039
1040 spin_unlock_irq(ioa_cfg->host->host_lock);
1041 wait_for_completion(&ipr_cmd->completion);
1042 spin_lock_irq(ioa_cfg->host->host_lock);
1043}
1044
05a6538a 1045static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1046{
1047 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1048 return 0;
1049 else
1050 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1051}
1052
1da177e4
LT
1053/**
1054 * ipr_send_hcam - Send an HCAM to the adapter.
1055 * @ioa_cfg: ioa config struct
1056 * @type: HCAM type
1057 * @hostrcb: hostrcb struct
1058 *
1059 * This function will send a Host Controlled Async command to the adapter.
1060 * If HCAMs are currently not allowed to be issued to the adapter, it will
1061 * place the hostrcb on the free queue.
1062 *
1063 * Return value:
1064 * none
1065 **/
1066static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1067 struct ipr_hostrcb *hostrcb)
1068{
1069 struct ipr_cmnd *ipr_cmd;
1070 struct ipr_ioarcb *ioarcb;
1071
56d6aa33 1072 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1073 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1074 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1075 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1076
1077 ipr_cmd->u.hostrcb = hostrcb;
1078 ioarcb = &ipr_cmd->ioarcb;
1079
1080 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1081 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1082 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1083 ioarcb->cmd_pkt.cdb[1] = type;
1084 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1085 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1086
a32c055f
WB
1087 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1088 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1089
1090 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1091 ipr_cmd->done = ipr_process_ccn;
1092 else
1093 ipr_cmd->done = ipr_process_error;
1094
1095 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1096
a32c055f 1097 ipr_send_command(ipr_cmd);
1da177e4
LT
1098 } else {
1099 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1100 }
1101}
1102
3e7ebdfa
WB
1103/**
1104 * ipr_update_ata_class - Update the ata class in the resource entry
1105 * @res: resource entry struct
1106 * @proto: cfgte device bus protocol value
1107 *
1108 * Return value:
1109 * none
1110 **/
1111static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1112{
203fa3fe 1113 switch (proto) {
3e7ebdfa
WB
1114 case IPR_PROTO_SATA:
1115 case IPR_PROTO_SAS_STP:
1116 res->ata_class = ATA_DEV_ATA;
1117 break;
1118 case IPR_PROTO_SATA_ATAPI:
1119 case IPR_PROTO_SAS_STP_ATAPI:
1120 res->ata_class = ATA_DEV_ATAPI;
1121 break;
1122 default:
1123 res->ata_class = ATA_DEV_UNKNOWN;
1124 break;
1125 };
1126}
1127
1da177e4
LT
1128/**
1129 * ipr_init_res_entry - Initialize a resource entry struct.
1130 * @res: resource entry struct
3e7ebdfa 1131 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1132 *
1133 * Return value:
1134 * none
1135 **/
3e7ebdfa
WB
1136static void ipr_init_res_entry(struct ipr_resource_entry *res,
1137 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1138{
3e7ebdfa
WB
1139 int found = 0;
1140 unsigned int proto;
1141 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1142 struct ipr_resource_entry *gscsi_res = NULL;
1143
ee0a90fa 1144 res->needs_sync_complete = 0;
1da177e4
LT
1145 res->in_erp = 0;
1146 res->add_to_ml = 0;
1147 res->del_from_ml = 0;
1148 res->resetting_device = 0;
0b1f8d44 1149 res->reset_occurred = 0;
1da177e4 1150 res->sdev = NULL;
35a39691 1151 res->sata_port = NULL;
3e7ebdfa
WB
1152
1153 if (ioa_cfg->sis64) {
1154 proto = cfgtew->u.cfgte64->proto;
1155 res->res_flags = cfgtew->u.cfgte64->res_flags;
1156 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1157 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1158
1159 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1160 sizeof(res->res_path));
1161
1162 res->bus = 0;
0cb992ed
WB
1163 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1164 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1165 res->lun = scsilun_to_int(&res->dev_lun);
1166
1167 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1168 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1169 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1170 found = 1;
1171 res->target = gscsi_res->target;
1172 break;
1173 }
1174 }
1175 if (!found) {
1176 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1177 ioa_cfg->max_devs_supported);
1178 set_bit(res->target, ioa_cfg->target_ids);
1179 }
3e7ebdfa
WB
1180 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1181 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1182 res->target = 0;
1183 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1184 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1185 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1186 ioa_cfg->max_devs_supported);
1187 set_bit(res->target, ioa_cfg->array_ids);
1188 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1189 res->bus = IPR_VSET_VIRTUAL_BUS;
1190 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1191 ioa_cfg->max_devs_supported);
1192 set_bit(res->target, ioa_cfg->vset_ids);
1193 } else {
1194 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1195 ioa_cfg->max_devs_supported);
1196 set_bit(res->target, ioa_cfg->target_ids);
1197 }
1198 } else {
1199 proto = cfgtew->u.cfgte->proto;
1200 res->qmodel = IPR_QUEUEING_MODEL(res);
1201 res->flags = cfgtew->u.cfgte->flags;
1202 if (res->flags & IPR_IS_IOA_RESOURCE)
1203 res->type = IPR_RES_TYPE_IOAFP;
1204 else
1205 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1206
1207 res->bus = cfgtew->u.cfgte->res_addr.bus;
1208 res->target = cfgtew->u.cfgte->res_addr.target;
1209 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1210 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1211 }
1212
1213 ipr_update_ata_class(res, proto);
1214}
1215
1216/**
1217 * ipr_is_same_device - Determine if two devices are the same.
1218 * @res: resource entry struct
1219 * @cfgtew: config table entry wrapper struct
1220 *
1221 * Return value:
1222 * 1 if the devices are the same / 0 otherwise
1223 **/
1224static int ipr_is_same_device(struct ipr_resource_entry *res,
1225 struct ipr_config_table_entry_wrapper *cfgtew)
1226{
1227 if (res->ioa_cfg->sis64) {
1228 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1229 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1230 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1231 sizeof(cfgtew->u.cfgte64->lun))) {
1232 return 1;
1233 }
1234 } else {
1235 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1236 res->target == cfgtew->u.cfgte->res_addr.target &&
1237 res->lun == cfgtew->u.cfgte->res_addr.lun)
1238 return 1;
1239 }
1240
1241 return 0;
1242}
1243
1244/**
b3b3b407 1245 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1246 * @res_path: resource path
1247 * @buf: buffer
b3b3b407 1248 * @len: length of buffer provided
3e7ebdfa
WB
1249 *
1250 * Return value:
1251 * pointer to buffer
1252 **/
b3b3b407 1253static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1254{
1255 int i;
5adcbeb3 1256 char *p = buffer;
3e7ebdfa 1257
46d74563 1258 *p = '\0';
5adcbeb3
WB
1259 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1260 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1261 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1262
1263 return buffer;
1264}
1265
b3b3b407
BK
1266/**
1267 * ipr_format_res_path - Format the resource path for printing.
1268 * @ioa_cfg: ioa config struct
1269 * @res_path: resource path
1270 * @buf: buffer
1271 * @len: length of buffer provided
1272 *
1273 * Return value:
1274 * pointer to buffer
1275 **/
1276static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1277 u8 *res_path, char *buffer, int len)
1278{
1279 char *p = buffer;
1280
1281 *p = '\0';
1282 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1283 __ipr_format_res_path(res_path, p, len - (buffer - p));
1284 return buffer;
1285}
1286
3e7ebdfa
WB
1287/**
1288 * ipr_update_res_entry - Update the resource entry.
1289 * @res: resource entry struct
1290 * @cfgtew: config table entry wrapper struct
1291 *
1292 * Return value:
1293 * none
1294 **/
1295static void ipr_update_res_entry(struct ipr_resource_entry *res,
1296 struct ipr_config_table_entry_wrapper *cfgtew)
1297{
1298 char buffer[IPR_MAX_RES_PATH_LENGTH];
1299 unsigned int proto;
1300 int new_path = 0;
1301
1302 if (res->ioa_cfg->sis64) {
1303 res->flags = cfgtew->u.cfgte64->flags;
1304 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1305 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1306
1307 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1308 sizeof(struct ipr_std_inq_data));
1309
1310 res->qmodel = IPR_QUEUEING_MODEL64(res);
1311 proto = cfgtew->u.cfgte64->proto;
1312 res->res_handle = cfgtew->u.cfgte64->res_handle;
1313 res->dev_id = cfgtew->u.cfgte64->dev_id;
1314
1315 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1316 sizeof(res->dev_lun.scsi_lun));
1317
1318 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1319 sizeof(res->res_path))) {
1320 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1321 sizeof(res->res_path));
1322 new_path = 1;
1323 }
1324
1325 if (res->sdev && new_path)
1326 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1327 ipr_format_res_path(res->ioa_cfg,
1328 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1329 } else {
1330 res->flags = cfgtew->u.cfgte->flags;
1331 if (res->flags & IPR_IS_IOA_RESOURCE)
1332 res->type = IPR_RES_TYPE_IOAFP;
1333 else
1334 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1335
1336 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1337 sizeof(struct ipr_std_inq_data));
1338
1339 res->qmodel = IPR_QUEUEING_MODEL(res);
1340 proto = cfgtew->u.cfgte->proto;
1341 res->res_handle = cfgtew->u.cfgte->res_handle;
1342 }
1343
1344 ipr_update_ata_class(res, proto);
1345}
1346
1347/**
1348 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1349 * for the resource.
1350 * @res: resource entry struct
1351 * @cfgtew: config table entry wrapper struct
1352 *
1353 * Return value:
1354 * none
1355 **/
1356static void ipr_clear_res_target(struct ipr_resource_entry *res)
1357{
1358 struct ipr_resource_entry *gscsi_res = NULL;
1359 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1360
1361 if (!ioa_cfg->sis64)
1362 return;
1363
1364 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1365 clear_bit(res->target, ioa_cfg->array_ids);
1366 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1367 clear_bit(res->target, ioa_cfg->vset_ids);
1368 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1369 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1370 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1371 return;
1372 clear_bit(res->target, ioa_cfg->target_ids);
1373
1374 } else if (res->bus == 0)
1375 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1376}
1377
1378/**
1379 * ipr_handle_config_change - Handle a config change from the adapter
1380 * @ioa_cfg: ioa config struct
1381 * @hostrcb: hostrcb
1382 *
1383 * Return value:
1384 * none
1385 **/
1386static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1387 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1388{
1389 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1390 struct ipr_config_table_entry_wrapper cfgtew;
1391 __be32 cc_res_handle;
1392
1da177e4
LT
1393 u32 is_ndn = 1;
1394
3e7ebdfa
WB
1395 if (ioa_cfg->sis64) {
1396 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1397 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1398 } else {
1399 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1400 cc_res_handle = cfgtew.u.cfgte->res_handle;
1401 }
1da177e4
LT
1402
1403 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1404 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1405 is_ndn = 0;
1406 break;
1407 }
1408 }
1409
1410 if (is_ndn) {
1411 if (list_empty(&ioa_cfg->free_res_q)) {
1412 ipr_send_hcam(ioa_cfg,
1413 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1414 hostrcb);
1415 return;
1416 }
1417
1418 res = list_entry(ioa_cfg->free_res_q.next,
1419 struct ipr_resource_entry, queue);
1420
1421 list_del(&res->queue);
3e7ebdfa 1422 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1423 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1424 }
1425
3e7ebdfa 1426 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1427
1428 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1429 if (res->sdev) {
1da177e4 1430 res->del_from_ml = 1;
3e7ebdfa 1431 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1432 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1433 } else {
1434 ipr_clear_res_target(res);
1da177e4 1435 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1436 }
5767a1c4 1437 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1438 res->add_to_ml = 1;
f688f96d 1439 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1440 }
1441
1442 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1443}
1444
1445/**
1446 * ipr_process_ccn - Op done function for a CCN.
1447 * @ipr_cmd: ipr command struct
1448 *
1449 * This function is the op done function for a configuration
1450 * change notification host controlled async from the adapter.
1451 *
1452 * Return value:
1453 * none
1454 **/
1455static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1456{
1457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1458 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1459 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1460
1461 list_del(&hostrcb->queue);
05a6538a 1462 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1463
1464 if (ioasc) {
1465 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1466 dev_err(&ioa_cfg->pdev->dev,
1467 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1468
1469 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1470 } else {
1471 ipr_handle_config_change(ioa_cfg, hostrcb);
1472 }
1473}
1474
8cf093e2
BK
1475/**
1476 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1477 * @i: index into buffer
1478 * @buf: string to modify
1479 *
1480 * This function will strip all trailing whitespace, pad the end
1481 * of the string with a single space, and NULL terminate the string.
1482 *
1483 * Return value:
1484 * new length of string
1485 **/
1486static int strip_and_pad_whitespace(int i, char *buf)
1487{
1488 while (i && buf[i] == ' ')
1489 i--;
1490 buf[i+1] = ' ';
1491 buf[i+2] = '\0';
1492 return i + 2;
1493}
1494
1495/**
1496 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1497 * @prefix: string to print at start of printk
1498 * @hostrcb: hostrcb pointer
1499 * @vpd: vendor/product id/sn struct
1500 *
1501 * Return value:
1502 * none
1503 **/
1504static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1505 struct ipr_vpd *vpd)
1506{
1507 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1508 int i = 0;
1509
1510 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1511 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1512
1513 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1514 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1515
1516 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1517 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1518
1519 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1520}
1521
1da177e4
LT
1522/**
1523 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1524 * @vpd: vendor/product id/sn struct
1da177e4
LT
1525 *
1526 * Return value:
1527 * none
1528 **/
cfc32139 1529static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1530{
1531 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1532 + IPR_SERIAL_NUM_LEN];
1533
cfc32139
BK
1534 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1535 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1536 IPR_PROD_ID_LEN);
1537 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1538 ipr_err("Vendor/Product ID: %s\n", buffer);
1539
cfc32139 1540 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1541 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1542 ipr_err(" Serial Number: %s\n", buffer);
1543}
1544
8cf093e2
BK
1545/**
1546 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1547 * @prefix: string to print at start of printk
1548 * @hostrcb: hostrcb pointer
1549 * @vpd: vendor/product id/sn/wwn struct
1550 *
1551 * Return value:
1552 * none
1553 **/
1554static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1555 struct ipr_ext_vpd *vpd)
1556{
1557 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1558 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1559 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1560}
1561
ee0f05b8
BK
1562/**
1563 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1564 * @vpd: vendor/product id/sn/wwn struct
1565 *
1566 * Return value:
1567 * none
1568 **/
1569static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1570{
1571 ipr_log_vpd(&vpd->vpd);
1572 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1573 be32_to_cpu(vpd->wwid[1]));
1574}
1575
1576/**
1577 * ipr_log_enhanced_cache_error - Log a cache error.
1578 * @ioa_cfg: ioa config struct
1579 * @hostrcb: hostrcb struct
1580 *
1581 * Return value:
1582 * none
1583 **/
1584static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1585 struct ipr_hostrcb *hostrcb)
1586{
4565e370
WB
1587 struct ipr_hostrcb_type_12_error *error;
1588
1589 if (ioa_cfg->sis64)
1590 error = &hostrcb->hcam.u.error64.u.type_12_error;
1591 else
1592 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1593
1594 ipr_err("-----Current Configuration-----\n");
1595 ipr_err("Cache Directory Card Information:\n");
1596 ipr_log_ext_vpd(&error->ioa_vpd);
1597 ipr_err("Adapter Card Information:\n");
1598 ipr_log_ext_vpd(&error->cfc_vpd);
1599
1600 ipr_err("-----Expected Configuration-----\n");
1601 ipr_err("Cache Directory Card Information:\n");
1602 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1603 ipr_err("Adapter Card Information:\n");
1604 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1605
1606 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1607 be32_to_cpu(error->ioa_data[0]),
1608 be32_to_cpu(error->ioa_data[1]),
1609 be32_to_cpu(error->ioa_data[2]));
1610}
1611
1da177e4
LT
1612/**
1613 * ipr_log_cache_error - Log a cache error.
1614 * @ioa_cfg: ioa config struct
1615 * @hostrcb: hostrcb struct
1616 *
1617 * Return value:
1618 * none
1619 **/
1620static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1621 struct ipr_hostrcb *hostrcb)
1622{
1623 struct ipr_hostrcb_type_02_error *error =
1624 &hostrcb->hcam.u.error.u.type_02_error;
1625
1626 ipr_err("-----Current Configuration-----\n");
1627 ipr_err("Cache Directory Card Information:\n");
cfc32139 1628 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1629 ipr_err("Adapter Card Information:\n");
cfc32139 1630 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1631
1632 ipr_err("-----Expected Configuration-----\n");
1633 ipr_err("Cache Directory Card Information:\n");
cfc32139 1634 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1635 ipr_err("Adapter Card Information:\n");
cfc32139 1636 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1637
1638 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1639 be32_to_cpu(error->ioa_data[0]),
1640 be32_to_cpu(error->ioa_data[1]),
1641 be32_to_cpu(error->ioa_data[2]));
1642}
1643
ee0f05b8
BK
1644/**
1645 * ipr_log_enhanced_config_error - Log a configuration error.
1646 * @ioa_cfg: ioa config struct
1647 * @hostrcb: hostrcb struct
1648 *
1649 * Return value:
1650 * none
1651 **/
1652static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1653 struct ipr_hostrcb *hostrcb)
1654{
1655 int errors_logged, i;
1656 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1657 struct ipr_hostrcb_type_13_error *error;
1658
1659 error = &hostrcb->hcam.u.error.u.type_13_error;
1660 errors_logged = be32_to_cpu(error->errors_logged);
1661
1662 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1663 be32_to_cpu(error->errors_detected), errors_logged);
1664
1665 dev_entry = error->dev;
1666
1667 for (i = 0; i < errors_logged; i++, dev_entry++) {
1668 ipr_err_separator;
1669
1670 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1671 ipr_log_ext_vpd(&dev_entry->vpd);
1672
1673 ipr_err("-----New Device Information-----\n");
1674 ipr_log_ext_vpd(&dev_entry->new_vpd);
1675
1676 ipr_err("Cache Directory Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1678
1679 ipr_err("Adapter Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1681 }
1682}
1683
4565e370
WB
1684/**
1685 * ipr_log_sis64_config_error - Log a device error.
1686 * @ioa_cfg: ioa config struct
1687 * @hostrcb: hostrcb struct
1688 *
1689 * Return value:
1690 * none
1691 **/
1692static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1693 struct ipr_hostrcb *hostrcb)
1694{
1695 int errors_logged, i;
1696 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1697 struct ipr_hostrcb_type_23_error *error;
1698 char buffer[IPR_MAX_RES_PATH_LENGTH];
1699
1700 error = &hostrcb->hcam.u.error64.u.type_23_error;
1701 errors_logged = be32_to_cpu(error->errors_logged);
1702
1703 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1704 be32_to_cpu(error->errors_detected), errors_logged);
1705
1706 dev_entry = error->dev;
1707
1708 for (i = 0; i < errors_logged; i++, dev_entry++) {
1709 ipr_err_separator;
1710
1711 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1712 __ipr_format_res_path(dev_entry->res_path,
1713 buffer, sizeof(buffer)));
4565e370
WB
1714 ipr_log_ext_vpd(&dev_entry->vpd);
1715
1716 ipr_err("-----New Device Information-----\n");
1717 ipr_log_ext_vpd(&dev_entry->new_vpd);
1718
1719 ipr_err("Cache Directory Card Information:\n");
1720 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1721
1722 ipr_err("Adapter Card Information:\n");
1723 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1724 }
1725}
1726
1da177e4
LT
1727/**
1728 * ipr_log_config_error - Log a configuration error.
1729 * @ioa_cfg: ioa config struct
1730 * @hostrcb: hostrcb struct
1731 *
1732 * Return value:
1733 * none
1734 **/
1735static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1736 struct ipr_hostrcb *hostrcb)
1737{
1738 int errors_logged, i;
1739 struct ipr_hostrcb_device_data_entry *dev_entry;
1740 struct ipr_hostrcb_type_03_error *error;
1741
1742 error = &hostrcb->hcam.u.error.u.type_03_error;
1743 errors_logged = be32_to_cpu(error->errors_logged);
1744
1745 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1746 be32_to_cpu(error->errors_detected), errors_logged);
1747
cfc32139 1748 dev_entry = error->dev;
1da177e4
LT
1749
1750 for (i = 0; i < errors_logged; i++, dev_entry++) {
1751 ipr_err_separator;
1752
fa15b1f6 1753 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1754 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1755
1756 ipr_err("-----New Device Information-----\n");
cfc32139 1757 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1758
1759 ipr_err("Cache Directory Card Information:\n");
cfc32139 1760 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1761
1762 ipr_err("Adapter Card Information:\n");
cfc32139 1763 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1764
1765 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1766 be32_to_cpu(dev_entry->ioa_data[0]),
1767 be32_to_cpu(dev_entry->ioa_data[1]),
1768 be32_to_cpu(dev_entry->ioa_data[2]),
1769 be32_to_cpu(dev_entry->ioa_data[3]),
1770 be32_to_cpu(dev_entry->ioa_data[4]));
1771 }
1772}
1773
ee0f05b8
BK
1774/**
1775 * ipr_log_enhanced_array_error - Log an array configuration error.
1776 * @ioa_cfg: ioa config struct
1777 * @hostrcb: hostrcb struct
1778 *
1779 * Return value:
1780 * none
1781 **/
1782static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1783 struct ipr_hostrcb *hostrcb)
1784{
1785 int i, num_entries;
1786 struct ipr_hostrcb_type_14_error *error;
1787 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1788 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1789
1790 error = &hostrcb->hcam.u.error.u.type_14_error;
1791
1792 ipr_err_separator;
1793
1794 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1795 error->protection_level,
1796 ioa_cfg->host->host_no,
1797 error->last_func_vset_res_addr.bus,
1798 error->last_func_vset_res_addr.target,
1799 error->last_func_vset_res_addr.lun);
1800
1801 ipr_err_separator;
1802
1803 array_entry = error->array_member;
1804 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1805 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1806
1807 for (i = 0; i < num_entries; i++, array_entry++) {
1808 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1809 continue;
1810
1811 if (be32_to_cpu(error->exposed_mode_adn) == i)
1812 ipr_err("Exposed Array Member %d:\n", i);
1813 else
1814 ipr_err("Array Member %d:\n", i);
1815
1816 ipr_log_ext_vpd(&array_entry->vpd);
1817 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1818 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1819 "Expected Location");
1820
1821 ipr_err_separator;
1822 }
1823}
1824
1da177e4
LT
1825/**
1826 * ipr_log_array_error - Log an array configuration error.
1827 * @ioa_cfg: ioa config struct
1828 * @hostrcb: hostrcb struct
1829 *
1830 * Return value:
1831 * none
1832 **/
1833static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1834 struct ipr_hostrcb *hostrcb)
1835{
1836 int i;
1837 struct ipr_hostrcb_type_04_error *error;
1838 struct ipr_hostrcb_array_data_entry *array_entry;
1839 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1840
1841 error = &hostrcb->hcam.u.error.u.type_04_error;
1842
1843 ipr_err_separator;
1844
1845 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1846 error->protection_level,
1847 ioa_cfg->host->host_no,
1848 error->last_func_vset_res_addr.bus,
1849 error->last_func_vset_res_addr.target,
1850 error->last_func_vset_res_addr.lun);
1851
1852 ipr_err_separator;
1853
1854 array_entry = error->array_member;
1855
1856 for (i = 0; i < 18; i++) {
cfc32139 1857 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1858 continue;
1859
fa15b1f6 1860 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1861 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1862 else
1da177e4 1863 ipr_err("Array Member %d:\n", i);
1da177e4 1864
cfc32139 1865 ipr_log_vpd(&array_entry->vpd);
1da177e4 1866
fa15b1f6
BK
1867 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1868 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1869 "Expected Location");
1da177e4
LT
1870
1871 ipr_err_separator;
1872
1873 if (i == 9)
1874 array_entry = error->array_member2;
1875 else
1876 array_entry++;
1877 }
1878}
1879
1880/**
b0df54bb 1881 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1882 * @ioa_cfg: ioa config struct
b0df54bb
BK
1883 * @data: IOA error data
1884 * @len: data length
1da177e4
LT
1885 *
1886 * Return value:
1887 * none
1888 **/
ac719aba 1889static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1890{
1891 int i;
1da177e4 1892
b0df54bb 1893 if (len == 0)
1da177e4
LT
1894 return;
1895
ac719aba
BK
1896 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1897 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1898
b0df54bb 1899 for (i = 0; i < len / 4; i += 4) {
1da177e4 1900 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1901 be32_to_cpu(data[i]),
1902 be32_to_cpu(data[i+1]),
1903 be32_to_cpu(data[i+2]),
1904 be32_to_cpu(data[i+3]));
1da177e4
LT
1905 }
1906}
1907
ee0f05b8
BK
1908/**
1909 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1910 * @ioa_cfg: ioa config struct
1911 * @hostrcb: hostrcb struct
1912 *
1913 * Return value:
1914 * none
1915 **/
1916static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1917 struct ipr_hostrcb *hostrcb)
1918{
1919 struct ipr_hostrcb_type_17_error *error;
1920
4565e370
WB
1921 if (ioa_cfg->sis64)
1922 error = &hostrcb->hcam.u.error64.u.type_17_error;
1923 else
1924 error = &hostrcb->hcam.u.error.u.type_17_error;
1925
ee0f05b8 1926 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1927 strim(error->failure_reason);
ee0f05b8 1928
8cf093e2
BK
1929 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1930 be32_to_cpu(hostrcb->hcam.u.error.prc));
1931 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1932 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1933 be32_to_cpu(hostrcb->hcam.length) -
1934 (offsetof(struct ipr_hostrcb_error, u) +
1935 offsetof(struct ipr_hostrcb_type_17_error, data)));
1936}
1937
b0df54bb
BK
1938/**
1939 * ipr_log_dual_ioa_error - Log a dual adapter error.
1940 * @ioa_cfg: ioa config struct
1941 * @hostrcb: hostrcb struct
1942 *
1943 * Return value:
1944 * none
1945 **/
1946static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1947 struct ipr_hostrcb *hostrcb)
1948{
1949 struct ipr_hostrcb_type_07_error *error;
1950
1951 error = &hostrcb->hcam.u.error.u.type_07_error;
1952 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1953 strim(error->failure_reason);
b0df54bb 1954
8cf093e2
BK
1955 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1956 be32_to_cpu(hostrcb->hcam.u.error.prc));
1957 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1958 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1959 be32_to_cpu(hostrcb->hcam.length) -
1960 (offsetof(struct ipr_hostrcb_error, u) +
1961 offsetof(struct ipr_hostrcb_type_07_error, data)));
1962}
1963
49dc6a18
BK
1964static const struct {
1965 u8 active;
1966 char *desc;
1967} path_active_desc[] = {
1968 { IPR_PATH_NO_INFO, "Path" },
1969 { IPR_PATH_ACTIVE, "Active path" },
1970 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1971};
1972
1973static const struct {
1974 u8 state;
1975 char *desc;
1976} path_state_desc[] = {
1977 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1978 { IPR_PATH_HEALTHY, "is healthy" },
1979 { IPR_PATH_DEGRADED, "is degraded" },
1980 { IPR_PATH_FAILED, "is failed" }
1981};
1982
1983/**
1984 * ipr_log_fabric_path - Log a fabric path error
1985 * @hostrcb: hostrcb struct
1986 * @fabric: fabric descriptor
1987 *
1988 * Return value:
1989 * none
1990 **/
1991static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1992 struct ipr_hostrcb_fabric_desc *fabric)
1993{
1994 int i, j;
1995 u8 path_state = fabric->path_state;
1996 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1997 u8 state = path_state & IPR_PATH_STATE_MASK;
1998
1999 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2000 if (path_active_desc[i].active != active)
2001 continue;
2002
2003 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2004 if (path_state_desc[j].state != state)
2005 continue;
2006
2007 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2008 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2009 path_active_desc[i].desc, path_state_desc[j].desc,
2010 fabric->ioa_port);
2011 } else if (fabric->cascaded_expander == 0xff) {
2012 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2013 path_active_desc[i].desc, path_state_desc[j].desc,
2014 fabric->ioa_port, fabric->phy);
2015 } else if (fabric->phy == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2017 path_active_desc[i].desc, path_state_desc[j].desc,
2018 fabric->ioa_port, fabric->cascaded_expander);
2019 } else {
2020 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2023 }
2024 return;
2025 }
2026 }
2027
2028 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2029 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2030}
2031
4565e370
WB
2032/**
2033 * ipr_log64_fabric_path - Log a fabric path error
2034 * @hostrcb: hostrcb struct
2035 * @fabric: fabric descriptor
2036 *
2037 * Return value:
2038 * none
2039 **/
2040static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2041 struct ipr_hostrcb64_fabric_desc *fabric)
2042{
2043 int i, j;
2044 u8 path_state = fabric->path_state;
2045 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2046 u8 state = path_state & IPR_PATH_STATE_MASK;
2047 char buffer[IPR_MAX_RES_PATH_LENGTH];
2048
2049 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2050 if (path_active_desc[i].active != active)
2051 continue;
2052
2053 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2054 if (path_state_desc[j].state != state)
2055 continue;
2056
2057 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2058 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2059 ipr_format_res_path(hostrcb->ioa_cfg,
2060 fabric->res_path,
2061 buffer, sizeof(buffer)));
4565e370
WB
2062 return;
2063 }
2064 }
2065
2066 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2067 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2068 buffer, sizeof(buffer)));
4565e370
WB
2069}
2070
49dc6a18
BK
2071static const struct {
2072 u8 type;
2073 char *desc;
2074} path_type_desc[] = {
2075 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2076 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2077 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2078 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2079};
2080
2081static const struct {
2082 u8 status;
2083 char *desc;
2084} path_status_desc[] = {
2085 { IPR_PATH_CFG_NO_PROB, "Functional" },
2086 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2087 { IPR_PATH_CFG_FAILED, "Failed" },
2088 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2089 { IPR_PATH_NOT_DETECTED, "Missing" },
2090 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2091};
2092
2093static const char *link_rate[] = {
2094 "unknown",
2095 "disabled",
2096 "phy reset problem",
2097 "spinup hold",
2098 "port selector",
2099 "unknown",
2100 "unknown",
2101 "unknown",
2102 "1.5Gbps",
2103 "3.0Gbps",
2104 "unknown",
2105 "unknown",
2106 "unknown",
2107 "unknown",
2108 "unknown",
2109 "unknown"
2110};
2111
2112/**
2113 * ipr_log_path_elem - Log a fabric path element.
2114 * @hostrcb: hostrcb struct
2115 * @cfg: fabric path element struct
2116 *
2117 * Return value:
2118 * none
2119 **/
2120static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2121 struct ipr_hostrcb_config_element *cfg)
2122{
2123 int i, j;
2124 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2125 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2126
2127 if (type == IPR_PATH_CFG_NOT_EXIST)
2128 return;
2129
2130 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2131 if (path_type_desc[i].type != type)
2132 continue;
2133
2134 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2135 if (path_status_desc[j].status != status)
2136 continue;
2137
2138 if (type == IPR_PATH_CFG_IOA_PORT) {
2139 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2140 path_status_desc[j].desc, path_type_desc[i].desc,
2141 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2142 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2143 } else {
2144 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2145 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2146 path_status_desc[j].desc, path_type_desc[i].desc,
2147 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2148 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2149 } else if (cfg->cascaded_expander == 0xff) {
2150 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2151 "WWN=%08X%08X\n", path_status_desc[j].desc,
2152 path_type_desc[i].desc, cfg->phy,
2153 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2154 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2155 } else if (cfg->phy == 0xff) {
2156 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2157 "WWN=%08X%08X\n", path_status_desc[j].desc,
2158 path_type_desc[i].desc, cfg->cascaded_expander,
2159 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2160 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2161 } else {
2162 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2163 "WWN=%08X%08X\n", path_status_desc[j].desc,
2164 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2165 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2166 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2167 }
2168 }
2169 return;
2170 }
2171 }
2172
2173 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2174 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2175 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2176 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2177}
2178
4565e370
WB
2179/**
2180 * ipr_log64_path_elem - Log a fabric path element.
2181 * @hostrcb: hostrcb struct
2182 * @cfg: fabric path element struct
2183 *
2184 * Return value:
2185 * none
2186 **/
2187static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2188 struct ipr_hostrcb64_config_element *cfg)
2189{
2190 int i, j;
2191 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2192 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2193 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2194 char buffer[IPR_MAX_RES_PATH_LENGTH];
2195
2196 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2197 return;
2198
2199 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2200 if (path_type_desc[i].type != type)
2201 continue;
2202
2203 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2204 if (path_status_desc[j].status != status)
2205 continue;
2206
2207 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2208 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2209 ipr_format_res_path(hostrcb->ioa_cfg,
2210 cfg->res_path, buffer, sizeof(buffer)),
2211 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212 be32_to_cpu(cfg->wwid[0]),
2213 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2214 return;
2215 }
2216 }
2217 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2218 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2219 ipr_format_res_path(hostrcb->ioa_cfg,
2220 cfg->res_path, buffer, sizeof(buffer)),
2221 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2222 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2223}
2224
49dc6a18
BK
2225/**
2226 * ipr_log_fabric_error - Log a fabric error.
2227 * @ioa_cfg: ioa config struct
2228 * @hostrcb: hostrcb struct
2229 *
2230 * Return value:
2231 * none
2232 **/
2233static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2234 struct ipr_hostrcb *hostrcb)
2235{
2236 struct ipr_hostrcb_type_20_error *error;
2237 struct ipr_hostrcb_fabric_desc *fabric;
2238 struct ipr_hostrcb_config_element *cfg;
2239 int i, add_len;
2240
2241 error = &hostrcb->hcam.u.error.u.type_20_error;
2242 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2243 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2244
2245 add_len = be32_to_cpu(hostrcb->hcam.length) -
2246 (offsetof(struct ipr_hostrcb_error, u) +
2247 offsetof(struct ipr_hostrcb_type_20_error, desc));
2248
2249 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2250 ipr_log_fabric_path(hostrcb, fabric);
2251 for_each_fabric_cfg(fabric, cfg)
2252 ipr_log_path_elem(hostrcb, cfg);
2253
2254 add_len -= be16_to_cpu(fabric->length);
2255 fabric = (struct ipr_hostrcb_fabric_desc *)
2256 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2257 }
2258
ac719aba 2259 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2260}
2261
4565e370
WB
2262/**
2263 * ipr_log_sis64_array_error - Log a sis64 array error.
2264 * @ioa_cfg: ioa config struct
2265 * @hostrcb: hostrcb struct
2266 *
2267 * Return value:
2268 * none
2269 **/
2270static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2271 struct ipr_hostrcb *hostrcb)
2272{
2273 int i, num_entries;
2274 struct ipr_hostrcb_type_24_error *error;
2275 struct ipr_hostrcb64_array_data_entry *array_entry;
2276 char buffer[IPR_MAX_RES_PATH_LENGTH];
2277 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2278
2279 error = &hostrcb->hcam.u.error64.u.type_24_error;
2280
2281 ipr_err_separator;
2282
2283 ipr_err("RAID %s Array Configuration: %s\n",
2284 error->protection_level,
b3b3b407
BK
2285 ipr_format_res_path(ioa_cfg, error->last_res_path,
2286 buffer, sizeof(buffer)));
4565e370
WB
2287
2288 ipr_err_separator;
2289
2290 array_entry = error->array_member;
7262026f
WB
2291 num_entries = min_t(u32, error->num_entries,
2292 ARRAY_SIZE(error->array_member));
4565e370
WB
2293
2294 for (i = 0; i < num_entries; i++, array_entry++) {
2295
2296 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2297 continue;
2298
2299 if (error->exposed_mode_adn == i)
2300 ipr_err("Exposed Array Member %d:\n", i);
2301 else
2302 ipr_err("Array Member %d:\n", i);
2303
2304 ipr_err("Array Member %d:\n", i);
2305 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2306 ipr_err("Current Location: %s\n",
b3b3b407
BK
2307 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2308 buffer, sizeof(buffer)));
7262026f 2309 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2310 ipr_format_res_path(ioa_cfg,
2311 array_entry->expected_res_path,
2312 buffer, sizeof(buffer)));
4565e370
WB
2313
2314 ipr_err_separator;
2315 }
2316}
2317
2318/**
2319 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2320 * @ioa_cfg: ioa config struct
2321 * @hostrcb: hostrcb struct
2322 *
2323 * Return value:
2324 * none
2325 **/
2326static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2327 struct ipr_hostrcb *hostrcb)
2328{
2329 struct ipr_hostrcb_type_30_error *error;
2330 struct ipr_hostrcb64_fabric_desc *fabric;
2331 struct ipr_hostrcb64_config_element *cfg;
2332 int i, add_len;
2333
2334 error = &hostrcb->hcam.u.error64.u.type_30_error;
2335
2336 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2337 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2338
2339 add_len = be32_to_cpu(hostrcb->hcam.length) -
2340 (offsetof(struct ipr_hostrcb64_error, u) +
2341 offsetof(struct ipr_hostrcb_type_30_error, desc));
2342
2343 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2344 ipr_log64_fabric_path(hostrcb, fabric);
2345 for_each_fabric_cfg(fabric, cfg)
2346 ipr_log64_path_elem(hostrcb, cfg);
2347
2348 add_len -= be16_to_cpu(fabric->length);
2349 fabric = (struct ipr_hostrcb64_fabric_desc *)
2350 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2351 }
2352
2353 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2354}
2355
b0df54bb
BK
2356/**
2357 * ipr_log_generic_error - Log an adapter error.
2358 * @ioa_cfg: ioa config struct
2359 * @hostrcb: hostrcb struct
2360 *
2361 * Return value:
2362 * none
2363 **/
2364static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2365 struct ipr_hostrcb *hostrcb)
2366{
ac719aba 2367 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2368 be32_to_cpu(hostrcb->hcam.length));
2369}
2370
169b9ec8
WX
2371/**
2372 * ipr_log_sis64_device_error - Log a cache error.
2373 * @ioa_cfg: ioa config struct
2374 * @hostrcb: hostrcb struct
2375 *
2376 * Return value:
2377 * none
2378 **/
2379static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2380 struct ipr_hostrcb *hostrcb)
2381{
2382 struct ipr_hostrcb_type_21_error *error;
2383 char buffer[IPR_MAX_RES_PATH_LENGTH];
2384
2385 error = &hostrcb->hcam.u.error64.u.type_21_error;
2386
2387 ipr_err("-----Failing Device Information-----\n");
2388 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2389 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2390 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2391 ipr_err("Device Resource Path: %s\n",
2392 __ipr_format_res_path(error->res_path,
2393 buffer, sizeof(buffer)));
2394 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2395 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2396 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2397 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2398 ipr_err("SCSI Sense Data:\n");
2399 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2400 ipr_err("SCSI Command Descriptor Block: \n");
2401 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2402
2403 ipr_err("Additional IOA Data:\n");
2404 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2405}
2406
1da177e4
LT
2407/**
2408 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2409 * @ioasc: IOASC
2410 *
2411 * This function will return the index of into the ipr_error_table
2412 * for the specified IOASC. If the IOASC is not in the table,
2413 * 0 will be returned, which points to the entry used for unknown errors.
2414 *
2415 * Return value:
2416 * index into the ipr_error_table
2417 **/
2418static u32 ipr_get_error(u32 ioasc)
2419{
2420 int i;
2421
2422 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2423 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2424 return i;
2425
2426 return 0;
2427}
2428
2429/**
2430 * ipr_handle_log_data - Log an adapter error.
2431 * @ioa_cfg: ioa config struct
2432 * @hostrcb: hostrcb struct
2433 *
2434 * This function logs an adapter error to the system.
2435 *
2436 * Return value:
2437 * none
2438 **/
2439static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2440 struct ipr_hostrcb *hostrcb)
2441{
2442 u32 ioasc;
2443 int error_index;
3185ea63 2444 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2445
2446 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2447 return;
2448
2449 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2450 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2451
4565e370
WB
2452 if (ioa_cfg->sis64)
2453 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2454 else
2455 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2456
4565e370
WB
2457 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2458 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2459 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2460 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2461 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2462 }
2463
2464 error_index = ipr_get_error(ioasc);
2465
2466 if (!ipr_error_table[error_index].log_hcam)
2467 return;
2468
3185ea63 2469 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2470 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2471 error = &hostrcb->hcam.u.error64.u.type_21_error;
2472
2473 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2474 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2475 return;
2476 }
2477
49dc6a18 2478 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2479
2480 /* Set indication we have logged an error */
2481 ioa_cfg->errors_logged++;
2482
933916f3 2483 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2484 return;
cf852037
BK
2485 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2486 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2487
2488 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2489 case IPR_HOST_RCB_OVERLAY_ID_2:
2490 ipr_log_cache_error(ioa_cfg, hostrcb);
2491 break;
2492 case IPR_HOST_RCB_OVERLAY_ID_3:
2493 ipr_log_config_error(ioa_cfg, hostrcb);
2494 break;
2495 case IPR_HOST_RCB_OVERLAY_ID_4:
2496 case IPR_HOST_RCB_OVERLAY_ID_6:
2497 ipr_log_array_error(ioa_cfg, hostrcb);
2498 break;
b0df54bb
BK
2499 case IPR_HOST_RCB_OVERLAY_ID_7:
2500 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2501 break;
ee0f05b8
BK
2502 case IPR_HOST_RCB_OVERLAY_ID_12:
2503 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2504 break;
2505 case IPR_HOST_RCB_OVERLAY_ID_13:
2506 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2507 break;
2508 case IPR_HOST_RCB_OVERLAY_ID_14:
2509 case IPR_HOST_RCB_OVERLAY_ID_16:
2510 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2511 break;
2512 case IPR_HOST_RCB_OVERLAY_ID_17:
2513 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2514 break;
49dc6a18
BK
2515 case IPR_HOST_RCB_OVERLAY_ID_20:
2516 ipr_log_fabric_error(ioa_cfg, hostrcb);
2517 break;
169b9ec8
WX
2518 case IPR_HOST_RCB_OVERLAY_ID_21:
2519 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2520 break;
4565e370
WB
2521 case IPR_HOST_RCB_OVERLAY_ID_23:
2522 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2523 break;
2524 case IPR_HOST_RCB_OVERLAY_ID_24:
2525 case IPR_HOST_RCB_OVERLAY_ID_26:
2526 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2527 break;
2528 case IPR_HOST_RCB_OVERLAY_ID_30:
2529 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2530 break;
cf852037 2531 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2532 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2533 default:
a9cfca96 2534 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2535 break;
2536 }
2537}
2538
2539/**
2540 * ipr_process_error - Op done function for an adapter error log.
2541 * @ipr_cmd: ipr command struct
2542 *
2543 * This function is the op done function for an error log host
2544 * controlled async from the adapter. It will log the error and
2545 * send the HCAM back to the adapter.
2546 *
2547 * Return value:
2548 * none
2549 **/
2550static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2551{
2552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2553 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2554 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2555 u32 fd_ioasc;
2556
2557 if (ioa_cfg->sis64)
2558 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2559 else
2560 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2561
2562 list_del(&hostrcb->queue);
05a6538a 2563 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2564
2565 if (!ioasc) {
2566 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2567 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2568 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2569 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2570 dev_err(&ioa_cfg->pdev->dev,
2571 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2572 }
2573
2574 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2575}
2576
2577/**
2578 * ipr_timeout - An internally generated op has timed out.
2579 * @ipr_cmd: ipr command struct
2580 *
2581 * This function blocks host requests and initiates an
2582 * adapter reset.
2583 *
2584 * Return value:
2585 * none
2586 **/
2587static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2588{
2589 unsigned long lock_flags = 0;
2590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2591
2592 ENTER;
2593 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2594
2595 ioa_cfg->errors_logged++;
2596 dev_err(&ioa_cfg->pdev->dev,
2597 "Adapter being reset due to command timeout.\n");
2598
2599 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2600 ioa_cfg->sdt_state = GET_DUMP;
2601
2602 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2603 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2604
2605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2606 LEAVE;
2607}
2608
2609/**
2610 * ipr_oper_timeout - Adapter timed out transitioning to operational
2611 * @ipr_cmd: ipr command struct
2612 *
2613 * This function blocks host requests and initiates an
2614 * adapter reset.
2615 *
2616 * Return value:
2617 * none
2618 **/
2619static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2620{
2621 unsigned long lock_flags = 0;
2622 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2623
2624 ENTER;
2625 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2626
2627 ioa_cfg->errors_logged++;
2628 dev_err(&ioa_cfg->pdev->dev,
2629 "Adapter timed out transitioning to operational.\n");
2630
2631 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2632 ioa_cfg->sdt_state = GET_DUMP;
2633
2634 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2635 if (ipr_fastfail)
2636 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2637 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2638 }
2639
2640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2641 LEAVE;
2642}
2643
1da177e4
LT
2644/**
2645 * ipr_find_ses_entry - Find matching SES in SES table
2646 * @res: resource entry struct of SES
2647 *
2648 * Return value:
2649 * pointer to SES table entry / NULL on failure
2650 **/
2651static const struct ipr_ses_table_entry *
2652ipr_find_ses_entry(struct ipr_resource_entry *res)
2653{
2654 int i, j, matches;
3e7ebdfa 2655 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2656 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2657
2658 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2659 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2660 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2661 vpids = &res->std_inq_data.vpids;
2662 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2663 matches++;
2664 else
2665 break;
2666 } else
2667 matches++;
2668 }
2669
2670 if (matches == IPR_PROD_ID_LEN)
2671 return ste;
2672 }
2673
2674 return NULL;
2675}
2676
2677/**
2678 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2679 * @ioa_cfg: ioa config struct
2680 * @bus: SCSI bus
2681 * @bus_width: bus width
2682 *
2683 * Return value:
2684 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2685 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2686 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2687 * max 160MHz = max 320MB/sec).
2688 **/
2689static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2690{
2691 struct ipr_resource_entry *res;
2692 const struct ipr_ses_table_entry *ste;
2693 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2694
2695 /* Loop through each config table entry in the config table buffer */
2696 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2697 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2698 continue;
2699
3e7ebdfa 2700 if (bus != res->bus)
1da177e4
LT
2701 continue;
2702
2703 if (!(ste = ipr_find_ses_entry(res)))
2704 continue;
2705
2706 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2707 }
2708
2709 return max_xfer_rate;
2710}
2711
2712/**
2713 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2714 * @ioa_cfg: ioa config struct
2715 * @max_delay: max delay in micro-seconds to wait
2716 *
2717 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2718 *
2719 * Return value:
2720 * 0 on success / other on failure
2721 **/
2722static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2723{
2724 volatile u32 pcii_reg;
2725 int delay = 1;
2726
2727 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2728 while (delay < max_delay) {
2729 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2730
2731 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2732 return 0;
2733
2734 /* udelay cannot be used if delay is more than a few milliseconds */
2735 if ((delay / 1000) > MAX_UDELAY_MS)
2736 mdelay(delay / 1000);
2737 else
2738 udelay(delay);
2739
2740 delay += delay;
2741 }
2742 return -EIO;
2743}
2744
dcbad00e
WB
2745/**
2746 * ipr_get_sis64_dump_data_section - Dump IOA memory
2747 * @ioa_cfg: ioa config struct
2748 * @start_addr: adapter address to dump
2749 * @dest: destination kernel buffer
2750 * @length_in_words: length to dump in 4 byte words
2751 *
2752 * Return value:
2753 * 0 on success
2754 **/
2755static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2756 u32 start_addr,
2757 __be32 *dest, u32 length_in_words)
2758{
2759 int i;
2760
2761 for (i = 0; i < length_in_words; i++) {
2762 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2763 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2764 dest++;
2765 }
2766
2767 return 0;
2768}
2769
1da177e4
LT
2770/**
2771 * ipr_get_ldump_data_section - Dump IOA memory
2772 * @ioa_cfg: ioa config struct
2773 * @start_addr: adapter address to dump
2774 * @dest: destination kernel buffer
2775 * @length_in_words: length to dump in 4 byte words
2776 *
2777 * Return value:
2778 * 0 on success / -EIO on failure
2779 **/
2780static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2781 u32 start_addr,
2782 __be32 *dest, u32 length_in_words)
2783{
2784 volatile u32 temp_pcii_reg;
2785 int i, delay = 0;
2786
dcbad00e
WB
2787 if (ioa_cfg->sis64)
2788 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2789 dest, length_in_words);
2790
1da177e4
LT
2791 /* Write IOA interrupt reg starting LDUMP state */
2792 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2793 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2794
2795 /* Wait for IO debug acknowledge */
2796 if (ipr_wait_iodbg_ack(ioa_cfg,
2797 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2798 dev_err(&ioa_cfg->pdev->dev,
2799 "IOA dump long data transfer timeout\n");
2800 return -EIO;
2801 }
2802
2803 /* Signal LDUMP interlocked - clear IO debug ack */
2804 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2805 ioa_cfg->regs.clr_interrupt_reg);
2806
2807 /* Write Mailbox with starting address */
2808 writel(start_addr, ioa_cfg->ioa_mailbox);
2809
2810 /* Signal address valid - clear IOA Reset alert */
2811 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2812 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2813
2814 for (i = 0; i < length_in_words; i++) {
2815 /* Wait for IO debug acknowledge */
2816 if (ipr_wait_iodbg_ack(ioa_cfg,
2817 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2818 dev_err(&ioa_cfg->pdev->dev,
2819 "IOA dump short data transfer timeout\n");
2820 return -EIO;
2821 }
2822
2823 /* Read data from mailbox and increment destination pointer */
2824 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2825 dest++;
2826
2827 /* For all but the last word of data, signal data received */
2828 if (i < (length_in_words - 1)) {
2829 /* Signal dump data received - Clear IO debug Ack */
2830 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2831 ioa_cfg->regs.clr_interrupt_reg);
2832 }
2833 }
2834
2835 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2836 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2837 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2838
2839 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2840 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2841
2842 /* Signal dump data received - Clear IO debug Ack */
2843 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2844 ioa_cfg->regs.clr_interrupt_reg);
2845
2846 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2847 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2848 temp_pcii_reg =
214777ba 2849 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2850
2851 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2852 return 0;
2853
2854 udelay(10);
2855 delay += 10;
2856 }
2857
2858 return 0;
2859}
2860
2861#ifdef CONFIG_SCSI_IPR_DUMP
2862/**
2863 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2864 * @ioa_cfg: ioa config struct
2865 * @pci_address: adapter address
2866 * @length: length of data to copy
2867 *
2868 * Copy data from PCI adapter to kernel buffer.
2869 * Note: length MUST be a 4 byte multiple
2870 * Return value:
2871 * 0 on success / other on failure
2872 **/
2873static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2874 unsigned long pci_address, u32 length)
2875{
2876 int bytes_copied = 0;
4d4dd706 2877 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2878 __be32 *page;
2879 unsigned long lock_flags = 0;
2880 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2881
4d4dd706
KSS
2882 if (ioa_cfg->sis64)
2883 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2884 else
2885 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2886
1da177e4 2887 while (bytes_copied < length &&
4d4dd706 2888 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2889 if (ioa_dump->page_offset >= PAGE_SIZE ||
2890 ioa_dump->page_offset == 0) {
2891 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2892
2893 if (!page) {
2894 ipr_trace;
2895 return bytes_copied;
2896 }
2897
2898 ioa_dump->page_offset = 0;
2899 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2900 ioa_dump->next_page_index++;
2901 } else
2902 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2903
2904 rem_len = length - bytes_copied;
2905 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2906 cur_len = min(rem_len, rem_page_len);
2907
2908 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2909 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2910 rc = -EIO;
2911 } else {
2912 rc = ipr_get_ldump_data_section(ioa_cfg,
2913 pci_address + bytes_copied,
2914 &page[ioa_dump->page_offset / 4],
2915 (cur_len / sizeof(u32)));
2916 }
2917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2918
2919 if (!rc) {
2920 ioa_dump->page_offset += cur_len;
2921 bytes_copied += cur_len;
2922 } else {
2923 ipr_trace;
2924 break;
2925 }
2926 schedule();
2927 }
2928
2929 return bytes_copied;
2930}
2931
2932/**
2933 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2934 * @hdr: dump entry header struct
2935 *
2936 * Return value:
2937 * nothing
2938 **/
2939static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2940{
2941 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2942 hdr->num_elems = 1;
2943 hdr->offset = sizeof(*hdr);
2944 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2945}
2946
2947/**
2948 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2949 * @ioa_cfg: ioa config struct
2950 * @driver_dump: driver dump struct
2951 *
2952 * Return value:
2953 * nothing
2954 **/
2955static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2956 struct ipr_driver_dump *driver_dump)
2957{
2958 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2959
2960 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2961 driver_dump->ioa_type_entry.hdr.len =
2962 sizeof(struct ipr_dump_ioa_type_entry) -
2963 sizeof(struct ipr_dump_entry_header);
2964 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2965 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2966 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2967 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2968 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2969 ucode_vpd->minor_release[1];
2970 driver_dump->hdr.num_entries++;
2971}
2972
2973/**
2974 * ipr_dump_version_data - Fill in the driver version in the dump.
2975 * @ioa_cfg: ioa config struct
2976 * @driver_dump: driver dump struct
2977 *
2978 * Return value:
2979 * nothing
2980 **/
2981static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2982 struct ipr_driver_dump *driver_dump)
2983{
2984 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2985 driver_dump->version_entry.hdr.len =
2986 sizeof(struct ipr_dump_version_entry) -
2987 sizeof(struct ipr_dump_entry_header);
2988 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2989 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2990 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2991 driver_dump->hdr.num_entries++;
2992}
2993
2994/**
2995 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2996 * @ioa_cfg: ioa config struct
2997 * @driver_dump: driver dump struct
2998 *
2999 * Return value:
3000 * nothing
3001 **/
3002static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3003 struct ipr_driver_dump *driver_dump)
3004{
3005 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3006 driver_dump->trace_entry.hdr.len =
3007 sizeof(struct ipr_dump_trace_entry) -
3008 sizeof(struct ipr_dump_entry_header);
3009 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3010 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3011 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3012 driver_dump->hdr.num_entries++;
3013}
3014
3015/**
3016 * ipr_dump_location_data - Fill in the IOA location in the dump.
3017 * @ioa_cfg: ioa config struct
3018 * @driver_dump: driver dump struct
3019 *
3020 * Return value:
3021 * nothing
3022 **/
3023static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3024 struct ipr_driver_dump *driver_dump)
3025{
3026 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3027 driver_dump->location_entry.hdr.len =
3028 sizeof(struct ipr_dump_location_entry) -
3029 sizeof(struct ipr_dump_entry_header);
3030 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3031 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3032 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3033 driver_dump->hdr.num_entries++;
3034}
3035
3036/**
3037 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3038 * @ioa_cfg: ioa config struct
3039 * @dump: dump struct
3040 *
3041 * Return value:
3042 * nothing
3043 **/
3044static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3045{
3046 unsigned long start_addr, sdt_word;
3047 unsigned long lock_flags = 0;
3048 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3049 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3050 u32 num_entries, max_num_entries, start_off, end_off;
3051 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3052 struct ipr_sdt *sdt;
dcbad00e 3053 int valid = 1;
1da177e4
LT
3054 int i;
3055
3056 ENTER;
3057
3058 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3059
41e9a696 3060 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3062 return;
3063 }
3064
110def85
WB
3065 if (ioa_cfg->sis64) {
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 ssleep(IPR_DUMP_DELAY_SECONDS);
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069 }
3070
1da177e4
LT
3071 start_addr = readl(ioa_cfg->ioa_mailbox);
3072
dcbad00e 3073 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3074 dev_err(&ioa_cfg->pdev->dev,
3075 "Invalid dump table format: %lx\n", start_addr);
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 return;
3078 }
3079
3080 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3081
3082 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3083
3084 /* Initialize the overall dump header */
3085 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3086 driver_dump->hdr.num_entries = 1;
3087 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3088 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3089 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3090 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3091
3092 ipr_dump_version_data(ioa_cfg, driver_dump);
3093 ipr_dump_location_data(ioa_cfg, driver_dump);
3094 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3095 ipr_dump_trace_data(ioa_cfg, driver_dump);
3096
3097 /* Update dump_header */
3098 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3099
3100 /* IOA Dump entry */
3101 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3102 ioa_dump->hdr.len = 0;
3103 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3104 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3105
3106 /* First entries in sdt are actually a list of dump addresses and
3107 lengths to gather the real dump data. sdt represents the pointer
3108 to the ioa generated dump table. Dump data will be extracted based
3109 on entries in this table */
3110 sdt = &ioa_dump->sdt;
3111
4d4dd706
KSS
3112 if (ioa_cfg->sis64) {
3113 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3114 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3115 } else {
3116 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3117 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3118 }
3119
3120 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3121 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3122 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3123 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3124
3125 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3126 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3127 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3128 dev_err(&ioa_cfg->pdev->dev,
3129 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3130 rc, be32_to_cpu(sdt->hdr.state));
3131 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3132 ioa_cfg->sdt_state = DUMP_OBTAINED;
3133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3134 return;
3135 }
3136
3137 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3138
4d4dd706
KSS
3139 if (num_entries > max_num_entries)
3140 num_entries = max_num_entries;
3141
3142 /* Update dump length to the actual data to be copied */
3143 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3144 if (ioa_cfg->sis64)
3145 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3146 else
3147 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3148
3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150
3151 for (i = 0; i < num_entries; i++) {
4d4dd706 3152 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3153 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3154 break;
3155 }
3156
3157 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3158 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3159 if (ioa_cfg->sis64)
3160 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3161 else {
3162 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3163 end_off = be32_to_cpu(sdt->entry[i].end_token);
3164
3165 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3166 bytes_to_copy = end_off - start_off;
3167 else
3168 valid = 0;
3169 }
3170 if (valid) {
4d4dd706 3171 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3172 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3173 continue;
3174 }
3175
3176 /* Copy data from adapter to driver buffers */
3177 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3178 bytes_to_copy);
3179
3180 ioa_dump->hdr.len += bytes_copied;
3181
3182 if (bytes_copied != bytes_to_copy) {
3183 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3184 break;
3185 }
3186 }
3187 }
3188 }
3189
3190 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3191
3192 /* Update dump_header */
3193 driver_dump->hdr.len += ioa_dump->hdr.len;
3194 wmb();
3195 ioa_cfg->sdt_state = DUMP_OBTAINED;
3196 LEAVE;
3197}
3198
3199#else
203fa3fe 3200#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3201#endif
3202
3203/**
3204 * ipr_release_dump - Free adapter dump memory
3205 * @kref: kref struct
3206 *
3207 * Return value:
3208 * nothing
3209 **/
3210static void ipr_release_dump(struct kref *kref)
3211{
203fa3fe 3212 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3213 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3214 unsigned long lock_flags = 0;
3215 int i;
3216
3217 ENTER;
3218 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3219 ioa_cfg->dump = NULL;
3220 ioa_cfg->sdt_state = INACTIVE;
3221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3222
3223 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3224 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3225
4d4dd706 3226 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3227 kfree(dump);
3228 LEAVE;
3229}
3230
3231/**
3232 * ipr_worker_thread - Worker thread
c4028958 3233 * @work: ioa config struct
1da177e4
LT
3234 *
3235 * Called at task level from a work thread. This function takes care
3236 * of adding and removing device from the mid-layer as configuration
3237 * changes are detected by the adapter.
3238 *
3239 * Return value:
3240 * nothing
3241 **/
c4028958 3242static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3243{
3244 unsigned long lock_flags;
3245 struct ipr_resource_entry *res;
3246 struct scsi_device *sdev;
3247 struct ipr_dump *dump;
c4028958
DH
3248 struct ipr_ioa_cfg *ioa_cfg =
3249 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3250 u8 bus, target, lun;
3251 int did_work;
3252
3253 ENTER;
3254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3255
41e9a696 3256 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3257 dump = ioa_cfg->dump;
3258 if (!dump) {
3259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3260 return;
3261 }
3262 kref_get(&dump->kref);
3263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3264 ipr_get_ioa_dump(ioa_cfg, dump);
3265 kref_put(&dump->kref, ipr_release_dump);
3266
3267 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3268 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3269 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3271 return;
3272 }
3273
3274restart:
3275 do {
3276 did_work = 0;
f688f96d 3277 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 return;
3280 }
3281
3282 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283 if (res->del_from_ml && res->sdev) {
3284 did_work = 1;
3285 sdev = res->sdev;
3286 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3287 if (!res->add_to_ml)
3288 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289 else
3290 res->del_from_ml = 0;
1da177e4
LT
3291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292 scsi_remove_device(sdev);
3293 scsi_device_put(sdev);
3294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295 }
3296 break;
3297 }
3298 }
203fa3fe 3299 } while (did_work);
1da177e4
LT
3300
3301 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302 if (res->add_to_ml) {
3e7ebdfa
WB
3303 bus = res->bus;
3304 target = res->target;
3305 lun = res->lun;
1121b794 3306 res->add_to_ml = 0;
1da177e4
LT
3307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308 scsi_add_device(ioa_cfg->host, bus, target, lun);
3309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310 goto restart;
3311 }
3312 }
3313
f688f96d 3314 ioa_cfg->scan_done = 1;
1da177e4 3315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3316 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3317 LEAVE;
3318}
3319
3320#ifdef CONFIG_SCSI_IPR_TRACE
3321/**
3322 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3323 * @filp: open sysfs file
1da177e4 3324 * @kobj: kobject struct
91a69029 3325 * @bin_attr: bin_attribute struct
1da177e4
LT
3326 * @buf: buffer
3327 * @off: offset
3328 * @count: buffer size
3329 *
3330 * Return value:
3331 * number of bytes printed to buffer
3332 **/
2c3c8bea 3333static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3334 struct bin_attribute *bin_attr,
3335 char *buf, loff_t off, size_t count)
1da177e4 3336{
ee959b00
TJ
3337 struct device *dev = container_of(kobj, struct device, kobj);
3338 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3340 unsigned long lock_flags = 0;
d777aaf3 3341 ssize_t ret;
1da177e4
LT
3342
3343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3344 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3345 IPR_TRACE_SIZE);
1da177e4 3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3347
3348 return ret;
1da177e4
LT
3349}
3350
3351static struct bin_attribute ipr_trace_attr = {
3352 .attr = {
3353 .name = "trace",
3354 .mode = S_IRUGO,
3355 },
3356 .size = 0,
3357 .read = ipr_read_trace,
3358};
3359#endif
3360
3361/**
3362 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3363 * @dev: class device struct
3364 * @buf: buffer
1da177e4
LT
3365 *
3366 * Return value:
3367 * number of bytes printed to buffer
3368 **/
ee959b00
TJ
3369static ssize_t ipr_show_fw_version(struct device *dev,
3370 struct device_attribute *attr, char *buf)
1da177e4 3371{
ee959b00 3372 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3373 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3374 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3375 unsigned long lock_flags = 0;
3376 int len;
3377
3378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3379 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3380 ucode_vpd->major_release, ucode_vpd->card_type,
3381 ucode_vpd->minor_release[0],
3382 ucode_vpd->minor_release[1]);
3383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3384 return len;
3385}
3386
ee959b00 3387static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3388 .attr = {
3389 .name = "fw_version",
3390 .mode = S_IRUGO,
3391 },
3392 .show = ipr_show_fw_version,
3393};
3394
3395/**
3396 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3397 * @dev: class device struct
3398 * @buf: buffer
1da177e4
LT
3399 *
3400 * Return value:
3401 * number of bytes printed to buffer
3402 **/
ee959b00
TJ
3403static ssize_t ipr_show_log_level(struct device *dev,
3404 struct device_attribute *attr, char *buf)
1da177e4 3405{
ee959b00 3406 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3407 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3408 unsigned long lock_flags = 0;
3409 int len;
3410
3411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3412 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 return len;
3415}
3416
3417/**
3418 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3419 * @dev: class device struct
3420 * @buf: buffer
1da177e4
LT
3421 *
3422 * Return value:
3423 * number of bytes printed to buffer
3424 **/
ee959b00 3425static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3426 struct device_attribute *attr,
1da177e4
LT
3427 const char *buf, size_t count)
3428{
ee959b00 3429 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3430 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3431 unsigned long lock_flags = 0;
3432
3433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3434 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3436 return strlen(buf);
3437}
3438
ee959b00 3439static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3440 .attr = {
3441 .name = "log_level",
3442 .mode = S_IRUGO | S_IWUSR,
3443 },
3444 .show = ipr_show_log_level,
3445 .store = ipr_store_log_level
3446};
3447
3448/**
3449 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3450 * @dev: device struct
3451 * @buf: buffer
3452 * @count: buffer size
1da177e4
LT
3453 *
3454 * This function will reset the adapter and wait a reasonable
3455 * amount of time for any errors that the adapter might log.
3456 *
3457 * Return value:
3458 * count on success / other on failure
3459 **/
ee959b00
TJ
3460static ssize_t ipr_store_diagnostics(struct device *dev,
3461 struct device_attribute *attr,
1da177e4
LT
3462 const char *buf, size_t count)
3463{
ee959b00 3464 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3465 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3466 unsigned long lock_flags = 0;
3467 int rc = count;
3468
3469 if (!capable(CAP_SYS_ADMIN))
3470 return -EACCES;
3471
1da177e4 3472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3473 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3475 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477 }
3478
1da177e4
LT
3479 ioa_cfg->errors_logged = 0;
3480 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3481
3482 if (ioa_cfg->in_reset_reload) {
3483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3485
3486 /* Wait for a second for any errors to be logged */
3487 msleep(1000);
3488 } else {
3489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3490 return -EIO;
3491 }
3492
3493 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3494 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3495 rc = -EIO;
3496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3497
3498 return rc;
3499}
3500
ee959b00 3501static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3502 .attr = {
3503 .name = "run_diagnostics",
3504 .mode = S_IWUSR,
3505 },
3506 .store = ipr_store_diagnostics
3507};
3508
f37eb54b
BK
3509/**
3510 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3511 * @class_dev: device struct
3512 * @buf: buffer
f37eb54b
BK
3513 *
3514 * Return value:
3515 * number of bytes printed to buffer
3516 **/
ee959b00
TJ
3517static ssize_t ipr_show_adapter_state(struct device *dev,
3518 struct device_attribute *attr, char *buf)
f37eb54b 3519{
ee959b00 3520 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3521 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3522 unsigned long lock_flags = 0;
3523 int len;
3524
3525 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3526 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3527 len = snprintf(buf, PAGE_SIZE, "offline\n");
3528 else
3529 len = snprintf(buf, PAGE_SIZE, "online\n");
3530 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3531 return len;
3532}
3533
3534/**
3535 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3536 * @dev: device struct
3537 * @buf: buffer
3538 * @count: buffer size
f37eb54b
BK
3539 *
3540 * This function will change the adapter's state.
3541 *
3542 * Return value:
3543 * count on success / other on failure
3544 **/
ee959b00
TJ
3545static ssize_t ipr_store_adapter_state(struct device *dev,
3546 struct device_attribute *attr,
f37eb54b
BK
3547 const char *buf, size_t count)
3548{
ee959b00 3549 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3550 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3551 unsigned long lock_flags;
56d6aa33 3552 int result = count, i;
f37eb54b
BK
3553
3554 if (!capable(CAP_SYS_ADMIN))
3555 return -EACCES;
3556
3557 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3558 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3559 !strncmp(buf, "online", 6)) {
3560 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3561 spin_lock(&ioa_cfg->hrrq[i]._lock);
3562 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3563 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3564 }
3565 wmb();
f37eb54b
BK
3566 ioa_cfg->reset_retries = 0;
3567 ioa_cfg->in_ioa_bringdown = 0;
3568 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3569 }
3570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3571 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3572
3573 return result;
3574}
3575
ee959b00 3576static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3577 .attr = {
49dd0961 3578 .name = "online_state",
f37eb54b
BK
3579 .mode = S_IRUGO | S_IWUSR,
3580 },
3581 .show = ipr_show_adapter_state,
3582 .store = ipr_store_adapter_state
3583};
3584
1da177e4
LT
3585/**
3586 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3587 * @dev: device struct
3588 * @buf: buffer
3589 * @count: buffer size
1da177e4
LT
3590 *
3591 * This function will reset the adapter.
3592 *
3593 * Return value:
3594 * count on success / other on failure
3595 **/
ee959b00
TJ
3596static ssize_t ipr_store_reset_adapter(struct device *dev,
3597 struct device_attribute *attr,
1da177e4
LT
3598 const char *buf, size_t count)
3599{
ee959b00 3600 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3601 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3602 unsigned long lock_flags;
3603 int result = count;
3604
3605 if (!capable(CAP_SYS_ADMIN))
3606 return -EACCES;
3607
3608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3609 if (!ioa_cfg->in_reset_reload)
3610 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613
3614 return result;
3615}
3616
ee959b00 3617static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3618 .attr = {
3619 .name = "reset_host",
3620 .mode = S_IWUSR,
3621 },
3622 .store = ipr_store_reset_adapter
3623};
3624
b53d124a 3625static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3626 /**
3627 * ipr_show_iopoll_weight - Show ipr polling mode
3628 * @dev: class device struct
3629 * @buf: buffer
3630 *
3631 * Return value:
3632 * number of bytes printed to buffer
3633 **/
3634static ssize_t ipr_show_iopoll_weight(struct device *dev,
3635 struct device_attribute *attr, char *buf)
3636{
3637 struct Scsi_Host *shost = class_to_shost(dev);
3638 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3639 unsigned long lock_flags = 0;
3640 int len;
3641
3642 spin_lock_irqsave(shost->host_lock, lock_flags);
3643 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3644 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3645
3646 return len;
3647}
3648
3649/**
3650 * ipr_store_iopoll_weight - Change the adapter's polling mode
3651 * @dev: class device struct
3652 * @buf: buffer
3653 *
3654 * Return value:
3655 * number of bytes printed to buffer
3656 **/
3657static ssize_t ipr_store_iopoll_weight(struct device *dev,
3658 struct device_attribute *attr,
3659 const char *buf, size_t count)
3660{
3661 struct Scsi_Host *shost = class_to_shost(dev);
3662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3663 unsigned long user_iopoll_weight;
3664 unsigned long lock_flags = 0;
3665 int i;
3666
3667 if (!ioa_cfg->sis64) {
3668 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3669 return -EINVAL;
3670 }
3671 if (kstrtoul(buf, 10, &user_iopoll_weight))
3672 return -EINVAL;
3673
3674 if (user_iopoll_weight > 256) {
3675 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3676 return -EINVAL;
3677 }
3678
3679 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3680 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3681 return strlen(buf);
3682 }
3683
89f8b33c 3684 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3685 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3686 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3687 }
3688
3689 spin_lock_irqsave(shost->host_lock, lock_flags);
3690 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3691 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3692 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3693 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3694 ioa_cfg->iopoll_weight, ipr_iopoll);
3695 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3696 }
3697 }
3698 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3699
3700 return strlen(buf);
3701}
3702
3703static struct device_attribute ipr_iopoll_weight_attr = {
3704 .attr = {
3705 .name = "iopoll_weight",
3706 .mode = S_IRUGO | S_IWUSR,
3707 },
3708 .show = ipr_show_iopoll_weight,
3709 .store = ipr_store_iopoll_weight
3710};
3711
1da177e4
LT
3712/**
3713 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3714 * @buf_len: buffer length
3715 *
3716 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3717 * list to use for microcode download
3718 *
3719 * Return value:
3720 * pointer to sglist / NULL on failure
3721 **/
3722static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3723{
3724 int sg_size, order, bsize_elem, num_elem, i, j;
3725 struct ipr_sglist *sglist;
3726 struct scatterlist *scatterlist;
3727 struct page *page;
3728
3729 /* Get the minimum size per scatter/gather element */
3730 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3731
3732 /* Get the actual size per element */
3733 order = get_order(sg_size);
3734
3735 /* Determine the actual number of bytes per element */
3736 bsize_elem = PAGE_SIZE * (1 << order);
3737
3738 /* Determine the actual number of sg entries needed */
3739 if (buf_len % bsize_elem)
3740 num_elem = (buf_len / bsize_elem) + 1;
3741 else
3742 num_elem = buf_len / bsize_elem;
3743
3744 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3745 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3746 (sizeof(struct scatterlist) * (num_elem - 1)),
3747 GFP_KERNEL);
3748
3749 if (sglist == NULL) {
3750 ipr_trace;
3751 return NULL;
3752 }
3753
1da177e4 3754 scatterlist = sglist->scatterlist;
45711f1a 3755 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3756
3757 sglist->order = order;
3758 sglist->num_sg = num_elem;
3759
3760 /* Allocate a bunch of sg elements */
3761 for (i = 0; i < num_elem; i++) {
3762 page = alloc_pages(GFP_KERNEL, order);
3763 if (!page) {
3764 ipr_trace;
3765
3766 /* Free up what we already allocated */
3767 for (j = i - 1; j >= 0; j--)
45711f1a 3768 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3769 kfree(sglist);
3770 return NULL;
3771 }
3772
642f1490 3773 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3774 }
3775
3776 return sglist;
3777}
3778
3779/**
3780 * ipr_free_ucode_buffer - Frees a microcode download buffer
3781 * @p_dnld: scatter/gather list pointer
3782 *
3783 * Free a DMA'able ucode download buffer previously allocated with
3784 * ipr_alloc_ucode_buffer
3785 *
3786 * Return value:
3787 * nothing
3788 **/
3789static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3790{
3791 int i;
3792
3793 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3794 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3795
3796 kfree(sglist);
3797}
3798
3799/**
3800 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3801 * @sglist: scatter/gather list pointer
3802 * @buffer: buffer pointer
3803 * @len: buffer length
3804 *
3805 * Copy a microcode image from a user buffer into a buffer allocated by
3806 * ipr_alloc_ucode_buffer
3807 *
3808 * Return value:
3809 * 0 on success / other on failure
3810 **/
3811static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3812 u8 *buffer, u32 len)
3813{
3814 int bsize_elem, i, result = 0;
3815 struct scatterlist *scatterlist;
3816 void *kaddr;
3817
3818 /* Determine the actual number of bytes per element */
3819 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3820
3821 scatterlist = sglist->scatterlist;
3822
3823 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3824 struct page *page = sg_page(&scatterlist[i]);
3825
3826 kaddr = kmap(page);
1da177e4 3827 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3828 kunmap(page);
1da177e4
LT
3829
3830 scatterlist[i].length = bsize_elem;
3831
3832 if (result != 0) {
3833 ipr_trace;
3834 return result;
3835 }
3836 }
3837
3838 if (len % bsize_elem) {
45711f1a
JA
3839 struct page *page = sg_page(&scatterlist[i]);
3840
3841 kaddr = kmap(page);
1da177e4 3842 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3843 kunmap(page);
1da177e4
LT
3844
3845 scatterlist[i].length = len % bsize_elem;
3846 }
3847
3848 sglist->buffer_len = len;
3849 return result;
3850}
3851
a32c055f
WB
3852/**
3853 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3854 * @ipr_cmd: ipr command struct
3855 * @sglist: scatter/gather list
3856 *
3857 * Builds a microcode download IOA data list (IOADL).
3858 *
3859 **/
3860static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3861 struct ipr_sglist *sglist)
3862{
3863 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3864 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3865 struct scatterlist *scatterlist = sglist->scatterlist;
3866 int i;
3867
3868 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3869 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3870 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3871
3872 ioarcb->ioadl_len =
3873 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3874 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3875 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3876 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3877 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3878 }
3879
3880 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3881}
3882
1da177e4 3883/**
12baa420 3884 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3885 * @ipr_cmd: ipr command struct
3886 * @sglist: scatter/gather list
1da177e4 3887 *
12baa420 3888 * Builds a microcode download IOA data list (IOADL).
1da177e4 3889 *
1da177e4 3890 **/
12baa420
BK
3891static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3892 struct ipr_sglist *sglist)
1da177e4 3893{
1da177e4 3894 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3895 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3896 struct scatterlist *scatterlist = sglist->scatterlist;
3897 int i;
3898
12baa420 3899 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3900 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3901 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3902
3903 ioarcb->ioadl_len =
1da177e4
LT
3904 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3905
3906 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3907 ioadl[i].flags_and_data_len =
3908 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3909 ioadl[i].address =
3910 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3911 }
3912
12baa420
BK
3913 ioadl[i-1].flags_and_data_len |=
3914 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3915}
3916
3917/**
3918 * ipr_update_ioa_ucode - Update IOA's microcode
3919 * @ioa_cfg: ioa config struct
3920 * @sglist: scatter/gather list
3921 *
3922 * Initiate an adapter reset to update the IOA's microcode
3923 *
3924 * Return value:
3925 * 0 on success / -EIO on failure
3926 **/
3927static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3928 struct ipr_sglist *sglist)
3929{
3930 unsigned long lock_flags;
3931
3932 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3933 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3935 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3936 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3937 }
12baa420
BK
3938
3939 if (ioa_cfg->ucode_sglist) {
3940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3941 dev_err(&ioa_cfg->pdev->dev,
3942 "Microcode download already in progress\n");
3943 return -EIO;
1da177e4 3944 }
12baa420 3945
d73341bf
AB
3946 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3947 sglist->scatterlist, sglist->num_sg,
3948 DMA_TO_DEVICE);
12baa420
BK
3949
3950 if (!sglist->num_dma_sg) {
3951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3952 dev_err(&ioa_cfg->pdev->dev,
3953 "Failed to map microcode download buffer!\n");
1da177e4
LT
3954 return -EIO;
3955 }
3956
12baa420
BK
3957 ioa_cfg->ucode_sglist = sglist;
3958 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3959 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3960 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3961
3962 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3963 ioa_cfg->ucode_sglist = NULL;
3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3965 return 0;
3966}
3967
3968/**
3969 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3970 * @class_dev: device struct
3971 * @buf: buffer
3972 * @count: buffer size
1da177e4
LT
3973 *
3974 * This function will update the firmware on the adapter.
3975 *
3976 * Return value:
3977 * count on success / other on failure
3978 **/
ee959b00
TJ
3979static ssize_t ipr_store_update_fw(struct device *dev,
3980 struct device_attribute *attr,
3981 const char *buf, size_t count)
1da177e4 3982{
ee959b00 3983 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3984 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3985 struct ipr_ucode_image_header *image_hdr;
3986 const struct firmware *fw_entry;
3987 struct ipr_sglist *sglist;
1da177e4
LT
3988 char fname[100];
3989 char *src;
3990 int len, result, dnld_size;
3991
3992 if (!capable(CAP_SYS_ADMIN))
3993 return -EACCES;
3994
3995 len = snprintf(fname, 99, "%s", buf);
3996 fname[len-1] = '\0';
3997
203fa3fe 3998 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3999 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4000 return -EIO;
4001 }
4002
4003 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4004
1da177e4
LT
4005 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4006 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4007 sglist = ipr_alloc_ucode_buffer(dnld_size);
4008
4009 if (!sglist) {
4010 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4011 release_firmware(fw_entry);
4012 return -ENOMEM;
4013 }
4014
4015 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4016
4017 if (result) {
4018 dev_err(&ioa_cfg->pdev->dev,
4019 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4020 goto out;
1da177e4
LT
4021 }
4022
14ed9cc7
WB
4023 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4024
12baa420 4025 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4026
12baa420
BK
4027 if (!result)
4028 result = count;
4029out:
1da177e4
LT
4030 ipr_free_ucode_buffer(sglist);
4031 release_firmware(fw_entry);
12baa420 4032 return result;
1da177e4
LT
4033}
4034
ee959b00 4035static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4036 .attr = {
4037 .name = "update_fw",
4038 .mode = S_IWUSR,
4039 },
4040 .store = ipr_store_update_fw
4041};
4042
75576bb9
WB
4043/**
4044 * ipr_show_fw_type - Show the adapter's firmware type.
4045 * @dev: class device struct
4046 * @buf: buffer
4047 *
4048 * Return value:
4049 * number of bytes printed to buffer
4050 **/
4051static ssize_t ipr_show_fw_type(struct device *dev,
4052 struct device_attribute *attr, char *buf)
4053{
4054 struct Scsi_Host *shost = class_to_shost(dev);
4055 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4056 unsigned long lock_flags = 0;
4057 int len;
4058
4059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4060 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062 return len;
4063}
4064
4065static struct device_attribute ipr_ioa_fw_type_attr = {
4066 .attr = {
4067 .name = "fw_type",
4068 .mode = S_IRUGO,
4069 },
4070 .show = ipr_show_fw_type
4071};
4072
ee959b00 4073static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4074 &ipr_fw_version_attr,
4075 &ipr_log_level_attr,
4076 &ipr_diagnostics_attr,
f37eb54b 4077 &ipr_ioa_state_attr,
1da177e4
LT
4078 &ipr_ioa_reset_attr,
4079 &ipr_update_fw_attr,
75576bb9 4080 &ipr_ioa_fw_type_attr,
b53d124a 4081 &ipr_iopoll_weight_attr,
1da177e4
LT
4082 NULL,
4083};
4084
4085#ifdef CONFIG_SCSI_IPR_DUMP
4086/**
4087 * ipr_read_dump - Dump the adapter
2c3c8bea 4088 * @filp: open sysfs file
1da177e4 4089 * @kobj: kobject struct
91a69029 4090 * @bin_attr: bin_attribute struct
1da177e4
LT
4091 * @buf: buffer
4092 * @off: offset
4093 * @count: buffer size
4094 *
4095 * Return value:
4096 * number of bytes printed to buffer
4097 **/
2c3c8bea 4098static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4099 struct bin_attribute *bin_attr,
4100 char *buf, loff_t off, size_t count)
1da177e4 4101{
ee959b00 4102 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4103 struct Scsi_Host *shost = class_to_shost(cdev);
4104 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4105 struct ipr_dump *dump;
4106 unsigned long lock_flags = 0;
4107 char *src;
4d4dd706 4108 int len, sdt_end;
1da177e4
LT
4109 size_t rc = count;
4110
4111 if (!capable(CAP_SYS_ADMIN))
4112 return -EACCES;
4113
4114 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4115 dump = ioa_cfg->dump;
4116
4117 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4119 return 0;
4120 }
4121 kref_get(&dump->kref);
4122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4123
4124 if (off > dump->driver_dump.hdr.len) {
4125 kref_put(&dump->kref, ipr_release_dump);
4126 return 0;
4127 }
4128
4129 if (off + count > dump->driver_dump.hdr.len) {
4130 count = dump->driver_dump.hdr.len - off;
4131 rc = count;
4132 }
4133
4134 if (count && off < sizeof(dump->driver_dump)) {
4135 if (off + count > sizeof(dump->driver_dump))
4136 len = sizeof(dump->driver_dump) - off;
4137 else
4138 len = count;
4139 src = (u8 *)&dump->driver_dump + off;
4140 memcpy(buf, src, len);
4141 buf += len;
4142 off += len;
4143 count -= len;
4144 }
4145
4146 off -= sizeof(dump->driver_dump);
4147
4d4dd706
KSS
4148 if (ioa_cfg->sis64)
4149 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4150 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4151 sizeof(struct ipr_sdt_entry));
4152 else
4153 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4154 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4155
4156 if (count && off < sdt_end) {
4157 if (off + count > sdt_end)
4158 len = sdt_end - off;
1da177e4
LT
4159 else
4160 len = count;
4161 src = (u8 *)&dump->ioa_dump + off;
4162 memcpy(buf, src, len);
4163 buf += len;
4164 off += len;
4165 count -= len;
4166 }
4167
4d4dd706 4168 off -= sdt_end;
1da177e4
LT
4169
4170 while (count) {
4171 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4172 len = PAGE_ALIGN(off) - off;
4173 else
4174 len = count;
4175 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4176 src += off & ~PAGE_MASK;
4177 memcpy(buf, src, len);
4178 buf += len;
4179 off += len;
4180 count -= len;
4181 }
4182
4183 kref_put(&dump->kref, ipr_release_dump);
4184 return rc;
4185}
4186
4187/**
4188 * ipr_alloc_dump - Prepare for adapter dump
4189 * @ioa_cfg: ioa config struct
4190 *
4191 * Return value:
4192 * 0 on success / other on failure
4193 **/
4194static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4195{
4196 struct ipr_dump *dump;
4d4dd706 4197 __be32 **ioa_data;
1da177e4
LT
4198 unsigned long lock_flags = 0;
4199
0bc42e35 4200 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4201
4202 if (!dump) {
4203 ipr_err("Dump memory allocation failed\n");
4204 return -ENOMEM;
4205 }
4206
4d4dd706
KSS
4207 if (ioa_cfg->sis64)
4208 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4209 else
4210 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4211
4212 if (!ioa_data) {
4213 ipr_err("Dump memory allocation failed\n");
4214 kfree(dump);
4215 return -ENOMEM;
4216 }
4217
4218 dump->ioa_dump.ioa_data = ioa_data;
4219
1da177e4
LT
4220 kref_init(&dump->kref);
4221 dump->ioa_cfg = ioa_cfg;
4222
4223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4224
4225 if (INACTIVE != ioa_cfg->sdt_state) {
4226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4227 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4228 kfree(dump);
4229 return 0;
4230 }
4231
4232 ioa_cfg->dump = dump;
4233 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4234 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4235 ioa_cfg->dump_taken = 1;
4236 schedule_work(&ioa_cfg->work_q);
4237 }
4238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4239
1da177e4
LT
4240 return 0;
4241}
4242
4243/**
4244 * ipr_free_dump - Free adapter dump memory
4245 * @ioa_cfg: ioa config struct
4246 *
4247 * Return value:
4248 * 0 on success / other on failure
4249 **/
4250static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4251{
4252 struct ipr_dump *dump;
4253 unsigned long lock_flags = 0;
4254
4255 ENTER;
4256
4257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 dump = ioa_cfg->dump;
4259 if (!dump) {
4260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4261 return 0;
4262 }
4263
4264 ioa_cfg->dump = NULL;
4265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4266
4267 kref_put(&dump->kref, ipr_release_dump);
4268
4269 LEAVE;
4270 return 0;
4271}
4272
4273/**
4274 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4275 * @filp: open sysfs file
1da177e4 4276 * @kobj: kobject struct
91a69029 4277 * @bin_attr: bin_attribute struct
1da177e4
LT
4278 * @buf: buffer
4279 * @off: offset
4280 * @count: buffer size
4281 *
4282 * Return value:
4283 * number of bytes printed to buffer
4284 **/
2c3c8bea 4285static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4286 struct bin_attribute *bin_attr,
4287 char *buf, loff_t off, size_t count)
1da177e4 4288{
ee959b00 4289 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4290 struct Scsi_Host *shost = class_to_shost(cdev);
4291 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4292 int rc;
4293
4294 if (!capable(CAP_SYS_ADMIN))
4295 return -EACCES;
4296
4297 if (buf[0] == '1')
4298 rc = ipr_alloc_dump(ioa_cfg);
4299 else if (buf[0] == '0')
4300 rc = ipr_free_dump(ioa_cfg);
4301 else
4302 return -EINVAL;
4303
4304 if (rc)
4305 return rc;
4306 else
4307 return count;
4308}
4309
4310static struct bin_attribute ipr_dump_attr = {
4311 .attr = {
4312 .name = "dump",
4313 .mode = S_IRUSR | S_IWUSR,
4314 },
4315 .size = 0,
4316 .read = ipr_read_dump,
4317 .write = ipr_write_dump
4318};
4319#else
4320static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4321#endif
4322
4323/**
4324 * ipr_change_queue_depth - Change the device's queue depth
4325 * @sdev: scsi device struct
4326 * @qdepth: depth to set
e881a172 4327 * @reason: calling context
1da177e4
LT
4328 *
4329 * Return value:
4330 * actual depth set
4331 **/
db5ed4df 4332static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4333{
35a39691
BK
4334 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4335 struct ipr_resource_entry *res;
4336 unsigned long lock_flags = 0;
4337
4338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4339 res = (struct ipr_resource_entry *)sdev->hostdata;
4340
4341 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4342 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4344
db5ed4df 4345 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4346 return sdev->queue_depth;
4347}
4348
1da177e4
LT
4349/**
4350 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4351 * @dev: device struct
46d74563 4352 * @attr: device attribute structure
1da177e4
LT
4353 * @buf: buffer
4354 *
4355 * Return value:
4356 * number of bytes printed to buffer
4357 **/
10523b3b 4358static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4359{
4360 struct scsi_device *sdev = to_scsi_device(dev);
4361 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4362 struct ipr_resource_entry *res;
4363 unsigned long lock_flags = 0;
4364 ssize_t len = -ENXIO;
4365
4366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4367 res = (struct ipr_resource_entry *)sdev->hostdata;
4368 if (res)
3e7ebdfa 4369 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4371 return len;
4372}
4373
4374static struct device_attribute ipr_adapter_handle_attr = {
4375 .attr = {
4376 .name = "adapter_handle",
4377 .mode = S_IRUSR,
4378 },
4379 .show = ipr_show_adapter_handle
4380};
4381
3e7ebdfa 4382/**
5adcbeb3
WB
4383 * ipr_show_resource_path - Show the resource path or the resource address for
4384 * this device.
3e7ebdfa 4385 * @dev: device struct
46d74563 4386 * @attr: device attribute structure
3e7ebdfa
WB
4387 * @buf: buffer
4388 *
4389 * Return value:
4390 * number of bytes printed to buffer
4391 **/
4392static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4393{
4394 struct scsi_device *sdev = to_scsi_device(dev);
4395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4396 struct ipr_resource_entry *res;
4397 unsigned long lock_flags = 0;
4398 ssize_t len = -ENXIO;
4399 char buffer[IPR_MAX_RES_PATH_LENGTH];
4400
4401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4402 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4403 if (res && ioa_cfg->sis64)
3e7ebdfa 4404 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4405 __ipr_format_res_path(res->res_path, buffer,
4406 sizeof(buffer)));
5adcbeb3
WB
4407 else if (res)
4408 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4409 res->bus, res->target, res->lun);
4410
3e7ebdfa
WB
4411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412 return len;
4413}
4414
4415static struct device_attribute ipr_resource_path_attr = {
4416 .attr = {
4417 .name = "resource_path",
75576bb9 4418 .mode = S_IRUGO,
3e7ebdfa
WB
4419 },
4420 .show = ipr_show_resource_path
4421};
4422
46d74563
WB
4423/**
4424 * ipr_show_device_id - Show the device_id for this device.
4425 * @dev: device struct
4426 * @attr: device attribute structure
4427 * @buf: buffer
4428 *
4429 * Return value:
4430 * number of bytes printed to buffer
4431 **/
4432static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4433{
4434 struct scsi_device *sdev = to_scsi_device(dev);
4435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4436 struct ipr_resource_entry *res;
4437 unsigned long lock_flags = 0;
4438 ssize_t len = -ENXIO;
4439
4440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4441 res = (struct ipr_resource_entry *)sdev->hostdata;
4442 if (res && ioa_cfg->sis64)
4443 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4444 else if (res)
4445 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4446
4447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4448 return len;
4449}
4450
4451static struct device_attribute ipr_device_id_attr = {
4452 .attr = {
4453 .name = "device_id",
4454 .mode = S_IRUGO,
4455 },
4456 .show = ipr_show_device_id
4457};
4458
75576bb9
WB
4459/**
4460 * ipr_show_resource_type - Show the resource type for this device.
4461 * @dev: device struct
46d74563 4462 * @attr: device attribute structure
75576bb9
WB
4463 * @buf: buffer
4464 *
4465 * Return value:
4466 * number of bytes printed to buffer
4467 **/
4468static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4469{
4470 struct scsi_device *sdev = to_scsi_device(dev);
4471 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4472 struct ipr_resource_entry *res;
4473 unsigned long lock_flags = 0;
4474 ssize_t len = -ENXIO;
4475
4476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4477 res = (struct ipr_resource_entry *)sdev->hostdata;
4478
4479 if (res)
4480 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4481
4482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4483 return len;
4484}
4485
4486static struct device_attribute ipr_resource_type_attr = {
4487 .attr = {
4488 .name = "resource_type",
4489 .mode = S_IRUGO,
4490 },
4491 .show = ipr_show_resource_type
4492};
4493
1da177e4
LT
4494static struct device_attribute *ipr_dev_attrs[] = {
4495 &ipr_adapter_handle_attr,
3e7ebdfa 4496 &ipr_resource_path_attr,
46d74563 4497 &ipr_device_id_attr,
75576bb9 4498 &ipr_resource_type_attr,
1da177e4
LT
4499 NULL,
4500};
4501
4502/**
4503 * ipr_biosparam - Return the HSC mapping
4504 * @sdev: scsi device struct
4505 * @block_device: block device pointer
4506 * @capacity: capacity of the device
4507 * @parm: Array containing returned HSC values.
4508 *
4509 * This function generates the HSC parms that fdisk uses.
4510 * We want to make sure we return something that places partitions
4511 * on 4k boundaries for best performance with the IOA.
4512 *
4513 * Return value:
4514 * 0 on success
4515 **/
4516static int ipr_biosparam(struct scsi_device *sdev,
4517 struct block_device *block_device,
4518 sector_t capacity, int *parm)
4519{
4520 int heads, sectors;
4521 sector_t cylinders;
4522
4523 heads = 128;
4524 sectors = 32;
4525
4526 cylinders = capacity;
4527 sector_div(cylinders, (128 * 32));
4528
4529 /* return result */
4530 parm[0] = heads;
4531 parm[1] = sectors;
4532 parm[2] = cylinders;
4533
4534 return 0;
4535}
4536
35a39691
BK
4537/**
4538 * ipr_find_starget - Find target based on bus/target.
4539 * @starget: scsi target struct
4540 *
4541 * Return value:
4542 * resource entry pointer if found / NULL if not found
4543 **/
4544static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4545{
4546 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4547 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4548 struct ipr_resource_entry *res;
4549
4550 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4551 if ((res->bus == starget->channel) &&
0ee1d714 4552 (res->target == starget->id)) {
35a39691
BK
4553 return res;
4554 }
4555 }
4556
4557 return NULL;
4558}
4559
4560static struct ata_port_info sata_port_info;
4561
4562/**
4563 * ipr_target_alloc - Prepare for commands to a SCSI target
4564 * @starget: scsi target struct
4565 *
4566 * If the device is a SATA device, this function allocates an
4567 * ATA port with libata, else it does nothing.
4568 *
4569 * Return value:
4570 * 0 on success / non-0 on failure
4571 **/
4572static int ipr_target_alloc(struct scsi_target *starget)
4573{
4574 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4576 struct ipr_sata_port *sata_port;
4577 struct ata_port *ap;
4578 struct ipr_resource_entry *res;
4579 unsigned long lock_flags;
4580
4581 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4582 res = ipr_find_starget(starget);
4583 starget->hostdata = NULL;
4584
4585 if (res && ipr_is_gata(res)) {
4586 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4587 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4588 if (!sata_port)
4589 return -ENOMEM;
4590
4591 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4592 if (ap) {
4593 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4594 sata_port->ioa_cfg = ioa_cfg;
4595 sata_port->ap = ap;
4596 sata_port->res = res;
4597
4598 res->sata_port = sata_port;
4599 ap->private_data = sata_port;
4600 starget->hostdata = sata_port;
4601 } else {
4602 kfree(sata_port);
4603 return -ENOMEM;
4604 }
4605 }
4606 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4607
4608 return 0;
4609}
4610
4611/**
4612 * ipr_target_destroy - Destroy a SCSI target
4613 * @starget: scsi target struct
4614 *
4615 * If the device was a SATA device, this function frees the libata
4616 * ATA port, else it does nothing.
4617 *
4618 **/
4619static void ipr_target_destroy(struct scsi_target *starget)
4620{
4621 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4622 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4623 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4624
4625 if (ioa_cfg->sis64) {
0ee1d714
BK
4626 if (!ipr_find_starget(starget)) {
4627 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4628 clear_bit(starget->id, ioa_cfg->array_ids);
4629 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4630 clear_bit(starget->id, ioa_cfg->vset_ids);
4631 else if (starget->channel == 0)
4632 clear_bit(starget->id, ioa_cfg->target_ids);
4633 }
3e7ebdfa 4634 }
35a39691
BK
4635
4636 if (sata_port) {
4637 starget->hostdata = NULL;
4638 ata_sas_port_destroy(sata_port->ap);
4639 kfree(sata_port);
4640 }
4641}
4642
4643/**
4644 * ipr_find_sdev - Find device based on bus/target/lun.
4645 * @sdev: scsi device struct
4646 *
4647 * Return value:
4648 * resource entry pointer if found / NULL if not found
4649 **/
4650static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4651{
4652 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4653 struct ipr_resource_entry *res;
4654
4655 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4656 if ((res->bus == sdev->channel) &&
4657 (res->target == sdev->id) &&
4658 (res->lun == sdev->lun))
35a39691
BK
4659 return res;
4660 }
4661
4662 return NULL;
4663}
4664
1da177e4
LT
4665/**
4666 * ipr_slave_destroy - Unconfigure a SCSI device
4667 * @sdev: scsi device struct
4668 *
4669 * Return value:
4670 * nothing
4671 **/
4672static void ipr_slave_destroy(struct scsi_device *sdev)
4673{
4674 struct ipr_resource_entry *res;
4675 struct ipr_ioa_cfg *ioa_cfg;
4676 unsigned long lock_flags = 0;
4677
4678 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4679
4680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4681 res = (struct ipr_resource_entry *) sdev->hostdata;
4682 if (res) {
35a39691 4683 if (res->sata_port)
3e4ec344 4684 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4685 sdev->hostdata = NULL;
4686 res->sdev = NULL;
35a39691 4687 res->sata_port = NULL;
1da177e4
LT
4688 }
4689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4690}
4691
4692/**
4693 * ipr_slave_configure - Configure a SCSI device
4694 * @sdev: scsi device struct
4695 *
4696 * This function configures the specified scsi device.
4697 *
4698 * Return value:
4699 * 0 on success
4700 **/
4701static int ipr_slave_configure(struct scsi_device *sdev)
4702{
4703 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4704 struct ipr_resource_entry *res;
dd406ef8 4705 struct ata_port *ap = NULL;
1da177e4 4706 unsigned long lock_flags = 0;
3e7ebdfa 4707 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4708
4709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4710 res = sdev->hostdata;
4711 if (res) {
4712 if (ipr_is_af_dasd_device(res))
4713 sdev->type = TYPE_RAID;
0726ce26 4714 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4715 sdev->scsi_level = 4;
0726ce26
BK
4716 sdev->no_uld_attach = 1;
4717 }
1da177e4 4718 if (ipr_is_vset_device(res)) {
60654e25 4719 sdev->scsi_level = SCSI_SPC_3;
242f9dcb
JA
4720 blk_queue_rq_timeout(sdev->request_queue,
4721 IPR_VSET_RW_TIMEOUT);
086fa5ff 4722 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4723 }
dd406ef8
BK
4724 if (ipr_is_gata(res) && res->sata_port)
4725 ap = res->sata_port->ap;
4726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4727
4728 if (ap) {
db5ed4df 4729 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4730 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4731 }
4732
3e7ebdfa
WB
4733 if (ioa_cfg->sis64)
4734 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4735 ipr_format_res_path(ioa_cfg,
4736 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4737 return 0;
1da177e4
LT
4738 }
4739 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4740 return 0;
4741}
4742
35a39691
BK
4743/**
4744 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4745 * @sdev: scsi device struct
4746 *
4747 * This function initializes an ATA port so that future commands
4748 * sent through queuecommand will work.
4749 *
4750 * Return value:
4751 * 0 on success
4752 **/
4753static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4754{
4755 struct ipr_sata_port *sata_port = NULL;
4756 int rc = -ENXIO;
4757
4758 ENTER;
4759 if (sdev->sdev_target)
4760 sata_port = sdev->sdev_target->hostdata;
b2024459 4761 if (sata_port) {
35a39691 4762 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4763 if (rc == 0)
4764 rc = ata_sas_sync_probe(sata_port->ap);
4765 }
4766
35a39691
BK
4767 if (rc)
4768 ipr_slave_destroy(sdev);
4769
4770 LEAVE;
4771 return rc;
4772}
4773
1da177e4
LT
4774/**
4775 * ipr_slave_alloc - Prepare for commands to a device.
4776 * @sdev: scsi device struct
4777 *
4778 * This function saves a pointer to the resource entry
4779 * in the scsi device struct if the device exists. We
4780 * can then use this pointer in ipr_queuecommand when
4781 * handling new commands.
4782 *
4783 * Return value:
692aebfc 4784 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4785 **/
4786static int ipr_slave_alloc(struct scsi_device *sdev)
4787{
4788 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4789 struct ipr_resource_entry *res;
4790 unsigned long lock_flags;
692aebfc 4791 int rc = -ENXIO;
1da177e4
LT
4792
4793 sdev->hostdata = NULL;
4794
4795 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4796
35a39691
BK
4797 res = ipr_find_sdev(sdev);
4798 if (res) {
4799 res->sdev = sdev;
4800 res->add_to_ml = 0;
4801 res->in_erp = 0;
4802 sdev->hostdata = res;
4803 if (!ipr_is_naca_model(res))
4804 res->needs_sync_complete = 1;
4805 rc = 0;
4806 if (ipr_is_gata(res)) {
4807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4808 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4809 }
4810 }
4811
4812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4813
692aebfc 4814 return rc;
1da177e4
LT
4815}
4816
6cdb0817
BK
4817/**
4818 * ipr_match_lun - Match function for specified LUN
4819 * @ipr_cmd: ipr command struct
4820 * @device: device to match (sdev)
4821 *
4822 * Returns:
4823 * 1 if command matches sdev / 0 if command does not match sdev
4824 **/
4825static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4826{
4827 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4828 return 1;
4829 return 0;
4830}
4831
4832/**
4833 * ipr_wait_for_ops - Wait for matching commands to complete
4834 * @ipr_cmd: ipr command struct
4835 * @device: device to match (sdev)
4836 * @match: match function to use
4837 *
4838 * Returns:
4839 * SUCCESS / FAILED
4840 **/
4841static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4842 int (*match)(struct ipr_cmnd *, void *))
4843{
4844 struct ipr_cmnd *ipr_cmd;
4845 int wait;
4846 unsigned long flags;
4847 struct ipr_hrr_queue *hrrq;
4848 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4849 DECLARE_COMPLETION_ONSTACK(comp);
4850
4851 ENTER;
4852 do {
4853 wait = 0;
4854
4855 for_each_hrrq(hrrq, ioa_cfg) {
4856 spin_lock_irqsave(hrrq->lock, flags);
4857 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4858 if (match(ipr_cmd, device)) {
4859 ipr_cmd->eh_comp = &comp;
4860 wait++;
4861 }
4862 }
4863 spin_unlock_irqrestore(hrrq->lock, flags);
4864 }
4865
4866 if (wait) {
4867 timeout = wait_for_completion_timeout(&comp, timeout);
4868
4869 if (!timeout) {
4870 wait = 0;
4871
4872 for_each_hrrq(hrrq, ioa_cfg) {
4873 spin_lock_irqsave(hrrq->lock, flags);
4874 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4875 if (match(ipr_cmd, device)) {
4876 ipr_cmd->eh_comp = NULL;
4877 wait++;
4878 }
4879 }
4880 spin_unlock_irqrestore(hrrq->lock, flags);
4881 }
4882
4883 if (wait)
4884 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4885 LEAVE;
4886 return wait ? FAILED : SUCCESS;
4887 }
4888 }
4889 } while (wait);
4890
4891 LEAVE;
4892 return SUCCESS;
4893}
4894
70233ac5 4895static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
4896{
4897 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 4898 unsigned long lock_flags = 0;
4899 int rc = SUCCESS;
1da177e4
LT
4900
4901 ENTER;
70233ac5 4902 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4903 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 4904
96b04db9 4905 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 4906 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
4907 dev_err(&ioa_cfg->pdev->dev,
4908 "Adapter being reset as a result of error recovery.\n");
1da177e4 4909
a92fa25c
KSS
4910 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4911 ioa_cfg->sdt_state = GET_DUMP;
4912 }
1da177e4 4913
70233ac5 4914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4915 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4916 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 4917
70233ac5 4918 /* If we got hit with a host reset while we were already resetting
4919 the adapter for some reason, and the reset failed. */
4920 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4921 ipr_trace;
4922 rc = FAILED;
4923 }
df0ae249 4924
70233ac5 4925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4926 LEAVE;
df0ae249
JG
4927 return rc;
4928}
4929
c6513096
BK
4930/**
4931 * ipr_device_reset - Reset the device
4932 * @ioa_cfg: ioa config struct
4933 * @res: resource entry struct
4934 *
4935 * This function issues a device reset to the affected device.
4936 * If the device is a SCSI device, a LUN reset will be sent
4937 * to the device first. If that does not work, a target reset
35a39691
BK
4938 * will be sent. If the device is a SATA device, a PHY reset will
4939 * be sent.
c6513096
BK
4940 *
4941 * Return value:
4942 * 0 on success / non-zero on failure
4943 **/
4944static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4945 struct ipr_resource_entry *res)
4946{
4947 struct ipr_cmnd *ipr_cmd;
4948 struct ipr_ioarcb *ioarcb;
4949 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4950 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4951 u32 ioasc;
4952
4953 ENTER;
4954 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4955 ioarcb = &ipr_cmd->ioarcb;
4956 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4957
4958 if (ipr_cmd->ioa_cfg->sis64) {
4959 regs = &ipr_cmd->i.ata_ioadl.regs;
4960 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4961 } else
4962 regs = &ioarcb->u.add_data.u.regs;
c6513096 4963
3e7ebdfa 4964 ioarcb->res_handle = res->res_handle;
c6513096
BK
4965 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4966 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4967 if (ipr_is_gata(res)) {
4968 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4969 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4970 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4971 }
c6513096
BK
4972
4973 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4974 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4975 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4976 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4977 if (ipr_cmd->ioa_cfg->sis64)
4978 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4979 sizeof(struct ipr_ioasa_gata));
4980 else
4981 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4982 sizeof(struct ipr_ioasa_gata));
4983 }
c6513096
BK
4984
4985 LEAVE;
203fa3fe 4986 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4987}
4988
35a39691
BK
4989/**
4990 * ipr_sata_reset - Reset the SATA port
cc0680a5 4991 * @link: SATA link to reset
35a39691
BK
4992 * @classes: class of the attached device
4993 *
cc0680a5 4994 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4995 *
4996 * Return value:
4997 * 0 on success / non-zero on failure
4998 **/
cc0680a5 4999static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5000 unsigned long deadline)
35a39691 5001{
cc0680a5 5002 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5003 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5004 struct ipr_resource_entry *res;
5005 unsigned long lock_flags = 0;
5006 int rc = -ENXIO;
5007
5008 ENTER;
5009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5010 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5012 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5013 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5014 }
5015
35a39691
BK
5016 res = sata_port->res;
5017 if (res) {
5018 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5019 *classes = res->ata_class;
35a39691
BK
5020 }
5021
5022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5023 LEAVE;
5024 return rc;
5025}
5026
1da177e4
LT
5027/**
5028 * ipr_eh_dev_reset - Reset the device
5029 * @scsi_cmd: scsi command struct
5030 *
5031 * This function issues a device reset to the affected device.
5032 * A LUN reset will be sent to the device first. If that does
5033 * not work, a target reset will be sent.
5034 *
5035 * Return value:
5036 * SUCCESS / FAILED
5037 **/
203fa3fe 5038static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5039{
5040 struct ipr_cmnd *ipr_cmd;
5041 struct ipr_ioa_cfg *ioa_cfg;
5042 struct ipr_resource_entry *res;
35a39691
BK
5043 struct ata_port *ap;
5044 int rc = 0;
05a6538a 5045 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5046
5047 ENTER;
5048 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5049 res = scsi_cmd->device->hostdata;
5050
eeb88307 5051 if (!res)
1da177e4
LT
5052 return FAILED;
5053
5054 /*
5055 * If we are currently going through reset/reload, return failed. This will force the
5056 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5057 * reset to complete
5058 */
5059 if (ioa_cfg->in_reset_reload)
5060 return FAILED;
56d6aa33 5061 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5062 return FAILED;
5063
05a6538a 5064 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5065 spin_lock(&hrrq->_lock);
05a6538a 5066 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5067 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5068 if (ipr_cmd->scsi_cmd)
5069 ipr_cmd->done = ipr_scsi_eh_done;
5070 if (ipr_cmd->qc)
5071 ipr_cmd->done = ipr_sata_eh_done;
5072 if (ipr_cmd->qc &&
5073 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5074 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5075 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5076 }
7402ecef 5077 }
1da177e4 5078 }
56d6aa33 5079 spin_unlock(&hrrq->_lock);
1da177e4 5080 }
1da177e4 5081 res->resetting_device = 1;
fb3ed3cb 5082 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5083
5084 if (ipr_is_gata(res) && res->sata_port) {
5085 ap = res->sata_port->ap;
5086 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5087 ata_std_error_handler(ap);
35a39691 5088 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 5089
05a6538a 5090 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5091 spin_lock(&hrrq->_lock);
05a6538a 5092 list_for_each_entry(ipr_cmd,
5093 &hrrq->hrrq_pending_q, queue) {
5094 if (ipr_cmd->ioarcb.res_handle ==
5095 res->res_handle) {
5096 rc = -EIO;
5097 break;
5098 }
5af23d26 5099 }
56d6aa33 5100 spin_unlock(&hrrq->_lock);
5af23d26 5101 }
35a39691
BK
5102 } else
5103 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5104 res->resetting_device = 0;
0b1f8d44 5105 res->reset_occurred = 1;
1da177e4 5106
1da177e4 5107 LEAVE;
203fa3fe 5108 return rc ? FAILED : SUCCESS;
1da177e4
LT
5109}
5110
203fa3fe 5111static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5112{
5113 int rc;
6cdb0817
BK
5114 struct ipr_ioa_cfg *ioa_cfg;
5115
5116 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
94d0e7b8
JG
5117
5118 spin_lock_irq(cmd->device->host->host_lock);
5119 rc = __ipr_eh_dev_reset(cmd);
5120 spin_unlock_irq(cmd->device->host->host_lock);
5121
6cdb0817
BK
5122 if (rc == SUCCESS)
5123 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5124
94d0e7b8
JG
5125 return rc;
5126}
5127
1da177e4
LT
5128/**
5129 * ipr_bus_reset_done - Op done function for bus reset.
5130 * @ipr_cmd: ipr command struct
5131 *
5132 * This function is the op done function for a bus reset
5133 *
5134 * Return value:
5135 * none
5136 **/
5137static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5138{
5139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5140 struct ipr_resource_entry *res;
5141
5142 ENTER;
3e7ebdfa
WB
5143 if (!ioa_cfg->sis64)
5144 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5145 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5146 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5147 break;
5148 }
1da177e4 5149 }
1da177e4
LT
5150
5151 /*
5152 * If abort has not completed, indicate the reset has, else call the
5153 * abort's done function to wake the sleeping eh thread
5154 */
5155 if (ipr_cmd->sibling->sibling)
5156 ipr_cmd->sibling->sibling = NULL;
5157 else
5158 ipr_cmd->sibling->done(ipr_cmd->sibling);
5159
05a6538a 5160 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5161 LEAVE;
5162}
5163
5164/**
5165 * ipr_abort_timeout - An abort task has timed out
5166 * @ipr_cmd: ipr command struct
5167 *
5168 * This function handles when an abort task times out. If this
5169 * happens we issue a bus reset since we have resources tied
5170 * up that must be freed before returning to the midlayer.
5171 *
5172 * Return value:
5173 * none
5174 **/
5175static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5176{
5177 struct ipr_cmnd *reset_cmd;
5178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5179 struct ipr_cmd_pkt *cmd_pkt;
5180 unsigned long lock_flags = 0;
5181
5182 ENTER;
5183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5184 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186 return;
5187 }
5188
fb3ed3cb 5189 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5190 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5191 ipr_cmd->sibling = reset_cmd;
5192 reset_cmd->sibling = ipr_cmd;
5193 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5194 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5195 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5196 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5197 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5198
5199 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5201 LEAVE;
5202}
5203
5204/**
5205 * ipr_cancel_op - Cancel specified op
5206 * @scsi_cmd: scsi command struct
5207 *
5208 * This function cancels specified op.
5209 *
5210 * Return value:
5211 * SUCCESS / FAILED
5212 **/
203fa3fe 5213static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5214{
5215 struct ipr_cmnd *ipr_cmd;
5216 struct ipr_ioa_cfg *ioa_cfg;
5217 struct ipr_resource_entry *res;
5218 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5219 u32 ioasc, int_reg;
1da177e4 5220 int op_found = 0;
05a6538a 5221 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5222
5223 ENTER;
5224 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5225 res = scsi_cmd->device->hostdata;
5226
8fa728a2
JG
5227 /* If we are currently going through reset/reload, return failed.
5228 * This will force the mid-layer to call ipr_eh_host_reset,
5229 * which will then go to sleep and wait for the reset to complete
5230 */
56d6aa33 5231 if (ioa_cfg->in_reset_reload ||
5232 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5233 return FAILED;
a92fa25c
KSS
5234 if (!res)
5235 return FAILED;
5236
5237 /*
5238 * If we are aborting a timed out op, chances are that the timeout was caused
5239 * by a still not detected EEH error. In such cases, reading a register will
5240 * trigger the EEH recovery infrastructure.
5241 */
5242 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5243
5244 if (!ipr_is_gscsi(res))
1da177e4
LT
5245 return FAILED;
5246
05a6538a 5247 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5248 spin_lock(&hrrq->_lock);
05a6538a 5249 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5250 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5251 ipr_cmd->done = ipr_scsi_eh_done;
5252 op_found = 1;
5253 break;
5254 }
1da177e4 5255 }
56d6aa33 5256 spin_unlock(&hrrq->_lock);
1da177e4
LT
5257 }
5258
5259 if (!op_found)
5260 return SUCCESS;
5261
5262 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5263 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5264 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5265 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5266 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5267 ipr_cmd->u.sdev = scsi_cmd->device;
5268
fb3ed3cb
BK
5269 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5270 scsi_cmd->cmnd[0]);
1da177e4 5271 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5272 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5273
5274 /*
5275 * If the abort task timed out and we sent a bus reset, we will get
5276 * one the following responses to the abort
5277 */
5278 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5279 ioasc = 0;
5280 ipr_trace;
5281 }
5282
c4ee22a3 5283 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa
BK
5284 if (!ipr_is_naca_model(res))
5285 res->needs_sync_complete = 1;
1da177e4
LT
5286
5287 LEAVE;
203fa3fe 5288 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5289}
5290
5291/**
5292 * ipr_eh_abort - Abort a single op
5293 * @scsi_cmd: scsi command struct
5294 *
5295 * Return value:
f688f96d
BK
5296 * 0 if scan in progress / 1 if scan is complete
5297 **/
5298static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5299{
5300 unsigned long lock_flags;
5301 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5302 int rc = 0;
5303
5304 spin_lock_irqsave(shost->host_lock, lock_flags);
5305 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5306 rc = 1;
5307 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5308 rc = 1;
5309 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5310 return rc;
5311}
5312
5313/**
5314 * ipr_eh_host_reset - Reset the host adapter
5315 * @scsi_cmd: scsi command struct
5316 *
5317 * Return value:
1da177e4
LT
5318 * SUCCESS / FAILED
5319 **/
203fa3fe 5320static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5321{
8fa728a2
JG
5322 unsigned long flags;
5323 int rc;
6cdb0817 5324 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5325
5326 ENTER;
1da177e4 5327
6cdb0817
BK
5328 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5329
8fa728a2
JG
5330 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5331 rc = ipr_cancel_op(scsi_cmd);
5332 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5333
6cdb0817
BK
5334 if (rc == SUCCESS)
5335 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5336 LEAVE;
8fa728a2 5337 return rc;
1da177e4
LT
5338}
5339
5340/**
5341 * ipr_handle_other_interrupt - Handle "other" interrupts
5342 * @ioa_cfg: ioa config struct
634651fa 5343 * @int_reg: interrupt register
1da177e4
LT
5344 *
5345 * Return value:
5346 * IRQ_NONE / IRQ_HANDLED
5347 **/
634651fa 5348static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5349 u32 int_reg)
1da177e4
LT
5350{
5351 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5352 u32 int_mask_reg;
56d6aa33 5353
7dacb64f
WB
5354 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5355 int_reg &= ~int_mask_reg;
5356
5357 /* If an interrupt on the adapter did not occur, ignore it.
5358 * Or in the case of SIS 64, check for a stage change interrupt.
5359 */
5360 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5361 if (ioa_cfg->sis64) {
5362 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5363 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5364 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5365
5366 /* clear stage change */
5367 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5368 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5369 list_del(&ioa_cfg->reset_cmd->queue);
5370 del_timer(&ioa_cfg->reset_cmd->timer);
5371 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5372 return IRQ_HANDLED;
5373 }
5374 }
5375
5376 return IRQ_NONE;
5377 }
1da177e4
LT
5378
5379 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5380 /* Mask the interrupt */
5381 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5382
5383 /* Clear the interrupt */
5384 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5385 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5386
5387 list_del(&ioa_cfg->reset_cmd->queue);
5388 del_timer(&ioa_cfg->reset_cmd->timer);
5389 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5390 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5391 if (ioa_cfg->clear_isr) {
5392 if (ipr_debug && printk_ratelimit())
5393 dev_err(&ioa_cfg->pdev->dev,
5394 "Spurious interrupt detected. 0x%08X\n", int_reg);
5395 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5396 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5397 return IRQ_NONE;
5398 }
1da177e4
LT
5399 } else {
5400 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5401 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5402 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5403 dev_err(&ioa_cfg->pdev->dev,
5404 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5405 else
5406 dev_err(&ioa_cfg->pdev->dev,
5407 "Permanent IOA failure. 0x%08X\n", int_reg);
5408
5409 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5410 ioa_cfg->sdt_state = GET_DUMP;
5411
5412 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5414 }
56d6aa33 5415
1da177e4
LT
5416 return rc;
5417}
5418
3feeb89d
WB
5419/**
5420 * ipr_isr_eh - Interrupt service routine error handler
5421 * @ioa_cfg: ioa config struct
5422 * @msg: message to log
5423 *
5424 * Return value:
5425 * none
5426 **/
05a6538a 5427static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5428{
5429 ioa_cfg->errors_logged++;
05a6538a 5430 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5431
5432 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5433 ioa_cfg->sdt_state = GET_DUMP;
5434
5435 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5436}
5437
b53d124a 5438static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5439 struct list_head *doneq)
5440{
5441 u32 ioasc;
5442 u16 cmd_index;
5443 struct ipr_cmnd *ipr_cmd;
5444 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5445 int num_hrrq = 0;
5446
5447 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5448 if (!hrr_queue->allow_interrupts)
05a6538a 5449 return 0;
5450
5451 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5452 hrr_queue->toggle_bit) {
5453
5454 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5455 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5456 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5457
5458 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5459 cmd_index < hrr_queue->min_cmd_id)) {
5460 ipr_isr_eh(ioa_cfg,
5461 "Invalid response handle from IOA: ",
5462 cmd_index);
5463 break;
5464 }
5465
5466 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5467 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5468
5469 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5470
5471 list_move_tail(&ipr_cmd->queue, doneq);
5472
5473 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5474 hrr_queue->hrrq_curr++;
5475 } else {
5476 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5477 hrr_queue->toggle_bit ^= 1u;
5478 }
5479 num_hrrq++;
b53d124a 5480 if (budget > 0 && num_hrrq >= budget)
5481 break;
05a6538a 5482 }
b53d124a 5483
05a6538a 5484 return num_hrrq;
5485}
b53d124a 5486
5487static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5488{
5489 struct ipr_ioa_cfg *ioa_cfg;
5490 struct ipr_hrr_queue *hrrq;
5491 struct ipr_cmnd *ipr_cmd, *temp;
5492 unsigned long hrrq_flags;
5493 int completed_ops;
5494 LIST_HEAD(doneq);
5495
5496 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5497 ioa_cfg = hrrq->ioa_cfg;
5498
5499 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5500 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5501
5502 if (completed_ops < budget)
5503 blk_iopoll_complete(iop);
5504 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5505
5506 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5507 list_del(&ipr_cmd->queue);
5508 del_timer(&ipr_cmd->timer);
5509 ipr_cmd->fast_done(ipr_cmd);
5510 }
5511
5512 return completed_ops;
5513}
5514
1da177e4
LT
5515/**
5516 * ipr_isr - Interrupt service routine
5517 * @irq: irq number
5518 * @devp: pointer to ioa config struct
1da177e4
LT
5519 *
5520 * Return value:
5521 * IRQ_NONE / IRQ_HANDLED
5522 **/
7d12e780 5523static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5524{
05a6538a 5525 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5526 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5527 unsigned long hrrq_flags = 0;
7dacb64f 5528 u32 int_reg = 0;
3feeb89d 5529 int num_hrrq = 0;
7dacb64f 5530 int irq_none = 0;
172cd6e1 5531 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5532 irqreturn_t rc = IRQ_NONE;
172cd6e1 5533 LIST_HEAD(doneq);
1da177e4 5534
56d6aa33 5535 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5536 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5537 if (!hrrq->allow_interrupts) {
5538 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5539 return IRQ_NONE;
5540 }
5541
1da177e4 5542 while (1) {
b53d124a 5543 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5544 rc = IRQ_HANDLED;
1da177e4 5545
b53d124a 5546 if (!ioa_cfg->clear_isr)
5547 break;
7dd21308 5548
1da177e4 5549 /* Clear the PCI interrupt */
a5442ba4 5550 num_hrrq = 0;
3feeb89d 5551 do {
b53d124a 5552 writel(IPR_PCII_HRRQ_UPDATED,
5553 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5554 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5555 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5556 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5557
7dacb64f
WB
5558 } else if (rc == IRQ_NONE && irq_none == 0) {
5559 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5560 irq_none++;
a5442ba4
WB
5561 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5562 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5563 ipr_isr_eh(ioa_cfg,
5564 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5565 rc = IRQ_HANDLED;
b53d124a 5566 break;
1da177e4
LT
5567 } else
5568 break;
5569 }
5570
5571 if (unlikely(rc == IRQ_NONE))
634651fa 5572 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5573
56d6aa33 5574 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5575 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5576 list_del(&ipr_cmd->queue);
5577 del_timer(&ipr_cmd->timer);
5578 ipr_cmd->fast_done(ipr_cmd);
5579 }
05a6538a 5580 return rc;
5581}
5582
5583/**
5584 * ipr_isr_mhrrq - Interrupt service routine
5585 * @irq: irq number
5586 * @devp: pointer to ioa config struct
5587 *
5588 * Return value:
5589 * IRQ_NONE / IRQ_HANDLED
5590 **/
5591static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5592{
5593 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5594 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5595 unsigned long hrrq_flags = 0;
05a6538a 5596 struct ipr_cmnd *ipr_cmd, *temp;
5597 irqreturn_t rc = IRQ_NONE;
5598 LIST_HEAD(doneq);
172cd6e1 5599
56d6aa33 5600 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5601
5602 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5603 if (!hrrq->allow_interrupts) {
5604 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5605 return IRQ_NONE;
5606 }
5607
89f8b33c 5608 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5609 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5610 hrrq->toggle_bit) {
5611 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5612 blk_iopoll_sched(&hrrq->iopoll);
5613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5614 return IRQ_HANDLED;
5615 }
5616 } else {
5617 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5618 hrrq->toggle_bit)
05a6538a 5619
b53d124a 5620 if (ipr_process_hrrq(hrrq, -1, &doneq))
5621 rc = IRQ_HANDLED;
5622 }
05a6538a 5623
56d6aa33 5624 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5625
5626 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5627 list_del(&ipr_cmd->queue);
5628 del_timer(&ipr_cmd->timer);
5629 ipr_cmd->fast_done(ipr_cmd);
5630 }
1da177e4
LT
5631 return rc;
5632}
5633
a32c055f
WB
5634/**
5635 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5636 * @ioa_cfg: ioa config struct
5637 * @ipr_cmd: ipr command struct
5638 *
5639 * Return value:
5640 * 0 on success / -1 on failure
5641 **/
5642static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5643 struct ipr_cmnd *ipr_cmd)
5644{
5645 int i, nseg;
5646 struct scatterlist *sg;
5647 u32 length;
5648 u32 ioadl_flags = 0;
5649 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5650 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5651 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5652
5653 length = scsi_bufflen(scsi_cmd);
5654 if (!length)
5655 return 0;
5656
5657 nseg = scsi_dma_map(scsi_cmd);
5658 if (nseg < 0) {
51f52a47 5659 if (printk_ratelimit())
d73341bf 5660 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5661 return -1;
5662 }
5663
5664 ipr_cmd->dma_use_sg = nseg;
5665
438b0331 5666 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5667 ioarcb->ioadl_len =
5668 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5669
a32c055f
WB
5670 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5671 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5672 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5673 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5674 ioadl_flags = IPR_IOADL_FLAGS_READ;
5675
5676 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5677 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5678 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5679 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5680 }
5681
5682 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5683 return 0;
5684}
5685
1da177e4
LT
5686/**
5687 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5688 * @ioa_cfg: ioa config struct
5689 * @ipr_cmd: ipr command struct
5690 *
5691 * Return value:
5692 * 0 on success / -1 on failure
5693 **/
5694static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5695 struct ipr_cmnd *ipr_cmd)
5696{
63015bc9
FT
5697 int i, nseg;
5698 struct scatterlist *sg;
1da177e4
LT
5699 u32 length;
5700 u32 ioadl_flags = 0;
5701 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5702 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5703 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5704
63015bc9
FT
5705 length = scsi_bufflen(scsi_cmd);
5706 if (!length)
1da177e4
LT
5707 return 0;
5708
63015bc9
FT
5709 nseg = scsi_dma_map(scsi_cmd);
5710 if (nseg < 0) {
d73341bf 5711 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5712 return -1;
5713 }
51b1c7e1 5714
63015bc9
FT
5715 ipr_cmd->dma_use_sg = nseg;
5716
5717 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5718 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5719 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5720 ioarcb->data_transfer_length = cpu_to_be32(length);
5721 ioarcb->ioadl_len =
63015bc9
FT
5722 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5723 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5724 ioadl_flags = IPR_IOADL_FLAGS_READ;
5725 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5726 ioarcb->read_ioadl_len =
5727 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5728 }
1da177e4 5729
a32c055f
WB
5730 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5731 ioadl = ioarcb->u.add_data.u.ioadl;
5732 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5733 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5734 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5735 }
1da177e4 5736
63015bc9
FT
5737 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5738 ioadl[i].flags_and_data_len =
5739 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5740 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5741 }
5742
63015bc9
FT
5743 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5744 return 0;
1da177e4
LT
5745}
5746
1da177e4
LT
5747/**
5748 * ipr_erp_done - Process completion of ERP for a device
5749 * @ipr_cmd: ipr command struct
5750 *
5751 * This function copies the sense buffer into the scsi_cmd
5752 * struct and pushes the scsi_done function.
5753 *
5754 * Return value:
5755 * nothing
5756 **/
5757static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5758{
5759 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5760 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5761 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5762
5763 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5764 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5765 scmd_printk(KERN_ERR, scsi_cmd,
5766 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5767 } else {
5768 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5769 SCSI_SENSE_BUFFERSIZE);
5770 }
5771
5772 if (res) {
ee0a90fa
BK
5773 if (!ipr_is_naca_model(res))
5774 res->needs_sync_complete = 1;
1da177e4
LT
5775 res->in_erp = 0;
5776 }
63015bc9 5777 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5778 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5779 scsi_cmd->scsi_done(scsi_cmd);
5780}
5781
5782/**
5783 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5784 * @ipr_cmd: ipr command struct
5785 *
5786 * Return value:
5787 * none
5788 **/
5789static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5790{
51b1c7e1 5791 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5792 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5793 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5794
5795 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5796 ioarcb->data_transfer_length = 0;
1da177e4 5797 ioarcb->read_data_transfer_length = 0;
a32c055f 5798 ioarcb->ioadl_len = 0;
1da177e4 5799 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5800 ioasa->hdr.ioasc = 0;
5801 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5802
5803 if (ipr_cmd->ioa_cfg->sis64)
5804 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5805 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5806 else {
5807 ioarcb->write_ioadl_addr =
5808 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5809 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5810 }
1da177e4
LT
5811}
5812
5813/**
5814 * ipr_erp_request_sense - Send request sense to a device
5815 * @ipr_cmd: ipr command struct
5816 *
5817 * This function sends a request sense to a device as a result
5818 * of a check condition.
5819 *
5820 * Return value:
5821 * nothing
5822 **/
5823static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5824{
5825 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5826 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5827
5828 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5829 ipr_erp_done(ipr_cmd);
5830 return;
5831 }
5832
5833 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5834
5835 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5836 cmd_pkt->cdb[0] = REQUEST_SENSE;
5837 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5838 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5839 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5840 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5841
a32c055f
WB
5842 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5843 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5844
5845 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5846 IPR_REQUEST_SENSE_TIMEOUT * 2);
5847}
5848
5849/**
5850 * ipr_erp_cancel_all - Send cancel all to a device
5851 * @ipr_cmd: ipr command struct
5852 *
5853 * This function sends a cancel all to a device to clear the
5854 * queue. If we are running TCQ on the device, QERR is set to 1,
5855 * which means all outstanding ops have been dropped on the floor.
5856 * Cancel all will return them to us.
5857 *
5858 * Return value:
5859 * nothing
5860 **/
5861static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5862{
5863 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5864 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5865 struct ipr_cmd_pkt *cmd_pkt;
5866
5867 res->in_erp = 1;
5868
5869 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5870
17ea0126 5871 if (!scsi_cmd->device->simple_tags) {
1da177e4
LT
5872 ipr_erp_request_sense(ipr_cmd);
5873 return;
5874 }
5875
5876 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5877 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5878 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5879
5880 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5881 IPR_CANCEL_ALL_TIMEOUT);
5882}
5883
5884/**
5885 * ipr_dump_ioasa - Dump contents of IOASA
5886 * @ioa_cfg: ioa config struct
5887 * @ipr_cmd: ipr command struct
fe964d0a 5888 * @res: resource entry struct
1da177e4
LT
5889 *
5890 * This function is invoked by the interrupt handler when ops
5891 * fail. It will log the IOASA if appropriate. Only called
5892 * for GPDD ops.
5893 *
5894 * Return value:
5895 * none
5896 **/
5897static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5898 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5899{
5900 int i;
5901 u16 data_len;
b0692dd4 5902 u32 ioasc, fd_ioasc;
96d21f00 5903 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5904 __be32 *ioasa_data = (__be32 *)ioasa;
5905 int error_index;
5906
96d21f00
WB
5907 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5908 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5909
5910 if (0 == ioasc)
5911 return;
5912
5913 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5914 return;
5915
b0692dd4
BK
5916 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5917 error_index = ipr_get_error(fd_ioasc);
5918 else
5919 error_index = ipr_get_error(ioasc);
1da177e4
LT
5920
5921 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5922 /* Don't log an error if the IOA already logged one */
96d21f00 5923 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5924 return;
5925
cc9bd5d4
BK
5926 if (!ipr_is_gscsi(res))
5927 return;
5928
1da177e4
LT
5929 if (ipr_error_table[error_index].log_ioasa == 0)
5930 return;
5931 }
5932
fe964d0a 5933 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5934
96d21f00
WB
5935 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5936 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5937 data_len = sizeof(struct ipr_ioasa64);
5938 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5939 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5940
5941 ipr_err("IOASA Dump:\n");
5942
5943 for (i = 0; i < data_len / 4; i += 4) {
5944 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5945 be32_to_cpu(ioasa_data[i]),
5946 be32_to_cpu(ioasa_data[i+1]),
5947 be32_to_cpu(ioasa_data[i+2]),
5948 be32_to_cpu(ioasa_data[i+3]));
5949 }
5950}
5951
5952/**
5953 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5954 * @ioasa: IOASA
5955 * @sense_buf: sense data buffer
5956 *
5957 * Return value:
5958 * none
5959 **/
5960static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5961{
5962 u32 failing_lba;
5963 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5964 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5965 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5966 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5967
5968 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5969
5970 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5971 return;
5972
5973 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5974
5975 if (ipr_is_vset_device(res) &&
5976 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5977 ioasa->u.vset.failing_lba_hi != 0) {
5978 sense_buf[0] = 0x72;
5979 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5980 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5981 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5982
5983 sense_buf[7] = 12;
5984 sense_buf[8] = 0;
5985 sense_buf[9] = 0x0A;
5986 sense_buf[10] = 0x80;
5987
5988 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5989
5990 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5991 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5992 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5993 sense_buf[15] = failing_lba & 0x000000ff;
5994
5995 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5996
5997 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5998 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5999 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6000 sense_buf[19] = failing_lba & 0x000000ff;
6001 } else {
6002 sense_buf[0] = 0x70;
6003 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6004 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6005 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6006
6007 /* Illegal request */
6008 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6009 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6010 sense_buf[7] = 10; /* additional length */
6011
6012 /* IOARCB was in error */
6013 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6014 sense_buf[15] = 0xC0;
6015 else /* Parameter data was invalid */
6016 sense_buf[15] = 0x80;
6017
6018 sense_buf[16] =
6019 ((IPR_FIELD_POINTER_MASK &
96d21f00 6020 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6021 sense_buf[17] =
6022 (IPR_FIELD_POINTER_MASK &
96d21f00 6023 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6024 } else {
6025 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6026 if (ipr_is_vset_device(res))
6027 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6028 else
6029 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6030
6031 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6032 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6033 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6034 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6035 sense_buf[6] = failing_lba & 0x000000ff;
6036 }
6037
6038 sense_buf[7] = 6; /* additional length */
6039 }
6040 }
6041}
6042
ee0a90fa
BK
6043/**
6044 * ipr_get_autosense - Copy autosense data to sense buffer
6045 * @ipr_cmd: ipr command struct
6046 *
6047 * This function copies the autosense buffer to the buffer
6048 * in the scsi_cmd, if there is autosense available.
6049 *
6050 * Return value:
6051 * 1 if autosense was available / 0 if not
6052 **/
6053static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6054{
96d21f00
WB
6055 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6056 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6057
96d21f00 6058 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
6059 return 0;
6060
96d21f00
WB
6061 if (ipr_cmd->ioa_cfg->sis64)
6062 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6063 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6064 SCSI_SENSE_BUFFERSIZE));
6065 else
6066 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6067 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6068 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
6069 return 1;
6070}
6071
1da177e4
LT
6072/**
6073 * ipr_erp_start - Process an error response for a SCSI op
6074 * @ioa_cfg: ioa config struct
6075 * @ipr_cmd: ipr command struct
6076 *
6077 * This function determines whether or not to initiate ERP
6078 * on the affected device.
6079 *
6080 * Return value:
6081 * nothing
6082 **/
6083static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6084 struct ipr_cmnd *ipr_cmd)
6085{
6086 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6087 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6088 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6089 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6090
6091 if (!res) {
6092 ipr_scsi_eh_done(ipr_cmd);
6093 return;
6094 }
6095
8a048994 6096 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6097 ipr_gen_sense(ipr_cmd);
6098
cc9bd5d4
BK
6099 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6100
8a048994 6101 switch (masked_ioasc) {
1da177e4 6102 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
6103 if (ipr_is_naca_model(res))
6104 scsi_cmd->result |= (DID_ABORT << 16);
6105 else
6106 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6107 break;
6108 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6109 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6110 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6111 break;
6112 case IPR_IOASC_HW_SEL_TIMEOUT:
6113 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
6114 if (!ipr_is_naca_model(res))
6115 res->needs_sync_complete = 1;
1da177e4
LT
6116 break;
6117 case IPR_IOASC_SYNC_REQUIRED:
6118 if (!res->in_erp)
6119 res->needs_sync_complete = 1;
6120 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6121 break;
6122 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6123 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6124 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6125 break;
6126 case IPR_IOASC_BUS_WAS_RESET:
6127 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6128 /*
6129 * Report the bus reset and ask for a retry. The device
6130 * will give CC/UA the next command.
6131 */
6132 if (!res->resetting_device)
6133 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6134 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
6135 if (!ipr_is_naca_model(res))
6136 res->needs_sync_complete = 1;
1da177e4
LT
6137 break;
6138 case IPR_IOASC_HW_DEV_BUS_STATUS:
6139 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6140 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
6141 if (!ipr_get_autosense(ipr_cmd)) {
6142 if (!ipr_is_naca_model(res)) {
6143 ipr_erp_cancel_all(ipr_cmd);
6144 return;
6145 }
6146 }
1da177e4 6147 }
ee0a90fa
BK
6148 if (!ipr_is_naca_model(res))
6149 res->needs_sync_complete = 1;
1da177e4
LT
6150 break;
6151 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6152 break;
6153 default:
5b7304fb
BK
6154 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6155 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6156 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6157 res->needs_sync_complete = 1;
6158 break;
6159 }
6160
63015bc9 6161 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6162 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6163 scsi_cmd->scsi_done(scsi_cmd);
6164}
6165
6166/**
6167 * ipr_scsi_done - mid-layer done function
6168 * @ipr_cmd: ipr command struct
6169 *
6170 * This function is invoked by the interrupt handler for
6171 * ops generated by the SCSI mid-layer
6172 *
6173 * Return value:
6174 * none
6175 **/
6176static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6177{
6178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6179 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6180 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 6181 unsigned long hrrq_flags;
1da177e4 6182
96d21f00 6183 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6184
6185 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6186 scsi_dma_unmap(scsi_cmd);
6187
56d6aa33 6188 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
05a6538a 6189 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6190 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6191 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6192 } else {
56d6aa33 6193 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
1da177e4 6194 ipr_erp_start(ioa_cfg, ipr_cmd);
56d6aa33 6195 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6196 }
1da177e4
LT
6197}
6198
1da177e4
LT
6199/**
6200 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6201 * @shost: scsi host struct
1da177e4 6202 * @scsi_cmd: scsi command struct
1da177e4
LT
6203 *
6204 * This function queues a request generated by the mid-layer.
6205 *
6206 * Return value:
6207 * 0 on success
6208 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6209 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6210 **/
00bfef2c
BK
6211static int ipr_queuecommand(struct Scsi_Host *shost,
6212 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6213{
6214 struct ipr_ioa_cfg *ioa_cfg;
6215 struct ipr_resource_entry *res;
6216 struct ipr_ioarcb *ioarcb;
6217 struct ipr_cmnd *ipr_cmd;
56d6aa33 6218 unsigned long hrrq_flags, lock_flags;
d12f1576 6219 int rc;
05a6538a 6220 struct ipr_hrr_queue *hrrq;
6221 int hrrq_id;
1da177e4 6222
00bfef2c
BK
6223 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6224
1da177e4 6225 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6226 res = scsi_cmd->device->hostdata;
56d6aa33 6227
6228 if (ipr_is_gata(res) && res->sata_port) {
6229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6230 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6232 return rc;
6233 }
6234
05a6538a 6235 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6236 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6237
56d6aa33 6238 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6239 /*
6240 * We are currently blocking all devices due to a host reset
6241 * We have told the host to stop giving us new requests, but
6242 * ERP ops don't count. FIXME
6243 */
bfae7820 6244 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6245 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6246 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6247 }
1da177e4
LT
6248
6249 /*
6250 * FIXME - Create scsi_set_host_offline interface
6251 * and the ioa_is_dead check can be removed
6252 */
bfae7820 6253 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6254 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6255 goto err_nodev;
1da177e4
LT
6256 }
6257
05a6538a 6258 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6259 if (ipr_cmd == NULL) {
56d6aa33 6260 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6261 return SCSI_MLQUEUE_HOST_BUSY;
6262 }
56d6aa33 6263 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6264
172cd6e1 6265 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6266 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6267
6268 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6269 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6270 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6271
6272 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6273 if (scsi_cmd->underflow == 0)
6274 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6275
1da177e4 6276 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
0b1f8d44
WX
6277 if (ipr_is_gscsi(res) && res->reset_occurred) {
6278 res->reset_occurred = 0;
ab6c10b1 6279 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6280 }
1da177e4 6281 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6282 if (scsi_cmd->flags & SCMD_TAGGED)
6283 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6284 else
6285 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6286 }
6287
6288 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6289 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6290 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6291 }
1da177e4 6292
d12f1576
DC
6293 if (ioa_cfg->sis64)
6294 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6295 else
6296 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6297
56d6aa33 6298 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6299 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6300 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6301 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6302 if (!rc)
6303 scsi_dma_unmap(scsi_cmd);
a5fb407e 6304 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6305 }
6306
56d6aa33 6307 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6308 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6309 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6310 scsi_dma_unmap(scsi_cmd);
6311 goto err_nodev;
6312 }
6313
6314 ioarcb->res_handle = res->res_handle;
6315 if (res->needs_sync_complete) {
6316 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6317 res->needs_sync_complete = 0;
6318 }
05a6538a 6319 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6320 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6321 ipr_send_command(ipr_cmd);
56d6aa33 6322 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6323 return 0;
1da177e4 6324
00bfef2c 6325err_nodev:
56d6aa33 6326 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6327 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6328 scsi_cmd->result = (DID_NO_CONNECT << 16);
6329 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6330 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6331 return 0;
6332}
f281233d 6333
35a39691
BK
6334/**
6335 * ipr_ioctl - IOCTL handler
6336 * @sdev: scsi device struct
6337 * @cmd: IOCTL cmd
6338 * @arg: IOCTL arg
6339 *
6340 * Return value:
6341 * 0 on success / other on failure
6342 **/
bd705f2d 6343static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6344{
6345 struct ipr_resource_entry *res;
6346
6347 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6348 if (res && ipr_is_gata(res)) {
6349 if (cmd == HDIO_GET_IDENTITY)
6350 return -ENOTTY;
94be9a58 6351 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6352 }
35a39691
BK
6353
6354 return -EINVAL;
6355}
6356
1da177e4
LT
6357/**
6358 * ipr_info - Get information about the card/driver
6359 * @scsi_host: scsi host struct
6360 *
6361 * Return value:
6362 * pointer to buffer with description string
6363 **/
203fa3fe 6364static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6365{
6366 static char buffer[512];
6367 struct ipr_ioa_cfg *ioa_cfg;
6368 unsigned long lock_flags = 0;
6369
6370 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6371
6372 spin_lock_irqsave(host->host_lock, lock_flags);
6373 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6374 spin_unlock_irqrestore(host->host_lock, lock_flags);
6375
6376 return buffer;
6377}
6378
6379static struct scsi_host_template driver_template = {
6380 .module = THIS_MODULE,
6381 .name = "IPR",
6382 .info = ipr_ioa_info,
35a39691 6383 .ioctl = ipr_ioctl,
1da177e4
LT
6384 .queuecommand = ipr_queuecommand,
6385 .eh_abort_handler = ipr_eh_abort,
6386 .eh_device_reset_handler = ipr_eh_dev_reset,
6387 .eh_host_reset_handler = ipr_eh_host_reset,
6388 .slave_alloc = ipr_slave_alloc,
6389 .slave_configure = ipr_slave_configure,
6390 .slave_destroy = ipr_slave_destroy,
f688f96d 6391 .scan_finished = ipr_scan_finished,
35a39691
BK
6392 .target_alloc = ipr_target_alloc,
6393 .target_destroy = ipr_target_destroy,
1da177e4 6394 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6395 .bios_param = ipr_biosparam,
6396 .can_queue = IPR_MAX_COMMANDS,
6397 .this_id = -1,
6398 .sg_tablesize = IPR_MAX_SGLIST,
6399 .max_sectors = IPR_IOA_MAX_SECTORS,
6400 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6401 .use_clustering = ENABLE_CLUSTERING,
6402 .shost_attrs = ipr_ioa_attrs,
6403 .sdev_attrs = ipr_dev_attrs,
54b2b50c
MP
6404 .proc_name = IPR_NAME,
6405 .no_write_same = 1,
2ecb204d 6406 .use_blk_tags = 1,
1da177e4
LT
6407};
6408
35a39691
BK
6409/**
6410 * ipr_ata_phy_reset - libata phy_reset handler
6411 * @ap: ata port to reset
6412 *
6413 **/
6414static void ipr_ata_phy_reset(struct ata_port *ap)
6415{
6416 unsigned long flags;
6417 struct ipr_sata_port *sata_port = ap->private_data;
6418 struct ipr_resource_entry *res = sata_port->res;
6419 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6420 int rc;
6421
6422 ENTER;
6423 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6424 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6426 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6427 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6428 }
6429
56d6aa33 6430 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6431 goto out_unlock;
6432
6433 rc = ipr_device_reset(ioa_cfg, res);
6434
6435 if (rc) {
3e4ec344 6436 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6437 goto out_unlock;
6438 }
6439
3e7ebdfa
WB
6440 ap->link.device[0].class = res->ata_class;
6441 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6442 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6443
6444out_unlock:
6445 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6446 LEAVE;
6447}
6448
6449/**
6450 * ipr_ata_post_internal - Cleanup after an internal command
6451 * @qc: ATA queued command
6452 *
6453 * Return value:
6454 * none
6455 **/
6456static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6457{
6458 struct ipr_sata_port *sata_port = qc->ap->private_data;
6459 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6460 struct ipr_cmnd *ipr_cmd;
05a6538a 6461 struct ipr_hrr_queue *hrrq;
35a39691
BK
6462 unsigned long flags;
6463
6464 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6465 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6467 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6468 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6469 }
6470
05a6538a 6471 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6472 spin_lock(&hrrq->_lock);
05a6538a 6473 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6474 if (ipr_cmd->qc == qc) {
6475 ipr_device_reset(ioa_cfg, sata_port->res);
6476 break;
6477 }
35a39691 6478 }
56d6aa33 6479 spin_unlock(&hrrq->_lock);
35a39691
BK
6480 }
6481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6482}
6483
35a39691
BK
6484/**
6485 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6486 * @regs: destination
6487 * @tf: source ATA taskfile
6488 *
6489 * Return value:
6490 * none
6491 **/
6492static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6493 struct ata_taskfile *tf)
6494{
6495 regs->feature = tf->feature;
6496 regs->nsect = tf->nsect;
6497 regs->lbal = tf->lbal;
6498 regs->lbam = tf->lbam;
6499 regs->lbah = tf->lbah;
6500 regs->device = tf->device;
6501 regs->command = tf->command;
6502 regs->hob_feature = tf->hob_feature;
6503 regs->hob_nsect = tf->hob_nsect;
6504 regs->hob_lbal = tf->hob_lbal;
6505 regs->hob_lbam = tf->hob_lbam;
6506 regs->hob_lbah = tf->hob_lbah;
6507 regs->ctl = tf->ctl;
6508}
6509
6510/**
6511 * ipr_sata_done - done function for SATA commands
6512 * @ipr_cmd: ipr command struct
6513 *
6514 * This function is invoked by the interrupt handler for
6515 * ops generated by the SCSI mid-layer to SATA devices
6516 *
6517 * Return value:
6518 * none
6519 **/
6520static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6521{
6522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6523 struct ata_queued_cmd *qc = ipr_cmd->qc;
6524 struct ipr_sata_port *sata_port = qc->ap->private_data;
6525 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6526 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6527
56d6aa33 6528 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6529 if (ipr_cmd->ioa_cfg->sis64)
6530 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6531 sizeof(struct ipr_ioasa_gata));
6532 else
6533 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6534 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6535 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6536
96d21f00 6537 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6538 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6539
6540 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6541 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6542 else
96d21f00 6543 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6544 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6545 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6546 ata_qc_complete(qc);
6547}
6548
a32c055f
WB
6549/**
6550 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6551 * @ipr_cmd: ipr command struct
6552 * @qc: ATA queued command
6553 *
6554 **/
6555static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6556 struct ata_queued_cmd *qc)
6557{
6558 u32 ioadl_flags = 0;
6559 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6560 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6561 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6562 int len = qc->nbytes;
6563 struct scatterlist *sg;
6564 unsigned int si;
6565 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6566
6567 if (len == 0)
6568 return;
6569
6570 if (qc->dma_dir == DMA_TO_DEVICE) {
6571 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6572 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6573 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6574 ioadl_flags = IPR_IOADL_FLAGS_READ;
6575
6576 ioarcb->data_transfer_length = cpu_to_be32(len);
6577 ioarcb->ioadl_len =
6578 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6581
6582 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6583 ioadl64->flags = cpu_to_be32(ioadl_flags);
6584 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6585 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6586
6587 last_ioadl64 = ioadl64;
6588 ioadl64++;
6589 }
6590
6591 if (likely(last_ioadl64))
6592 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6593}
6594
35a39691
BK
6595/**
6596 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6597 * @ipr_cmd: ipr command struct
6598 * @qc: ATA queued command
6599 *
6600 **/
6601static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6602 struct ata_queued_cmd *qc)
6603{
6604 u32 ioadl_flags = 0;
6605 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6606 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6607 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6608 int len = qc->nbytes;
35a39691 6609 struct scatterlist *sg;
ff2aeb1e 6610 unsigned int si;
35a39691
BK
6611
6612 if (len == 0)
6613 return;
6614
6615 if (qc->dma_dir == DMA_TO_DEVICE) {
6616 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6617 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6618 ioarcb->data_transfer_length = cpu_to_be32(len);
6619 ioarcb->ioadl_len =
35a39691
BK
6620 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6621 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6622 ioadl_flags = IPR_IOADL_FLAGS_READ;
6623 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6624 ioarcb->read_ioadl_len =
6625 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6626 }
6627
ff2aeb1e 6628 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6629 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6630 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6631
6632 last_ioadl = ioadl;
6633 ioadl++;
35a39691 6634 }
3be6cbd7
JG
6635
6636 if (likely(last_ioadl))
6637 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6638}
6639
56d6aa33 6640/**
6641 * ipr_qc_defer - Get a free ipr_cmd
6642 * @qc: queued command
6643 *
6644 * Return value:
6645 * 0 if success
6646 **/
6647static int ipr_qc_defer(struct ata_queued_cmd *qc)
6648{
6649 struct ata_port *ap = qc->ap;
6650 struct ipr_sata_port *sata_port = ap->private_data;
6651 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6652 struct ipr_cmnd *ipr_cmd;
6653 struct ipr_hrr_queue *hrrq;
6654 int hrrq_id;
6655
6656 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6657 hrrq = &ioa_cfg->hrrq[hrrq_id];
6658
6659 qc->lldd_task = NULL;
6660 spin_lock(&hrrq->_lock);
6661 if (unlikely(hrrq->ioa_is_dead)) {
6662 spin_unlock(&hrrq->_lock);
6663 return 0;
6664 }
6665
6666 if (unlikely(!hrrq->allow_cmds)) {
6667 spin_unlock(&hrrq->_lock);
6668 return ATA_DEFER_LINK;
6669 }
6670
6671 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6672 if (ipr_cmd == NULL) {
6673 spin_unlock(&hrrq->_lock);
6674 return ATA_DEFER_LINK;
6675 }
6676
6677 qc->lldd_task = ipr_cmd;
6678 spin_unlock(&hrrq->_lock);
6679 return 0;
6680}
6681
35a39691
BK
6682/**
6683 * ipr_qc_issue - Issue a SATA qc to a device
6684 * @qc: queued command
6685 *
6686 * Return value:
6687 * 0 if success
6688 **/
6689static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6690{
6691 struct ata_port *ap = qc->ap;
6692 struct ipr_sata_port *sata_port = ap->private_data;
6693 struct ipr_resource_entry *res = sata_port->res;
6694 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6695 struct ipr_cmnd *ipr_cmd;
6696 struct ipr_ioarcb *ioarcb;
6697 struct ipr_ioarcb_ata_regs *regs;
6698
56d6aa33 6699 if (qc->lldd_task == NULL)
6700 ipr_qc_defer(qc);
6701
6702 ipr_cmd = qc->lldd_task;
6703 if (ipr_cmd == NULL)
0feeed82 6704 return AC_ERR_SYSTEM;
35a39691 6705
56d6aa33 6706 qc->lldd_task = NULL;
6707 spin_lock(&ipr_cmd->hrrq->_lock);
6708 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6709 ipr_cmd->hrrq->ioa_is_dead)) {
6710 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6711 spin_unlock(&ipr_cmd->hrrq->_lock);
6712 return AC_ERR_SYSTEM;
6713 }
6714
05a6538a 6715 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6716 ioarcb = &ipr_cmd->ioarcb;
35a39691 6717
a32c055f
WB
6718 if (ioa_cfg->sis64) {
6719 regs = &ipr_cmd->i.ata_ioadl.regs;
6720 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6721 } else
6722 regs = &ioarcb->u.add_data.u.regs;
6723
6724 memset(regs, 0, sizeof(*regs));
6725 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6726
56d6aa33 6727 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6728 ipr_cmd->qc = qc;
6729 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6730 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6731 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6732 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6733 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6734 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6735
a32c055f
WB
6736 if (ioa_cfg->sis64)
6737 ipr_build_ata_ioadl64(ipr_cmd, qc);
6738 else
6739 ipr_build_ata_ioadl(ipr_cmd, qc);
6740
35a39691
BK
6741 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6742 ipr_copy_sata_tf(regs, &qc->tf);
6743 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6744 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6745
6746 switch (qc->tf.protocol) {
6747 case ATA_PROT_NODATA:
6748 case ATA_PROT_PIO:
6749 break;
6750
6751 case ATA_PROT_DMA:
6752 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6753 break;
6754
0dc36888
TH
6755 case ATAPI_PROT_PIO:
6756 case ATAPI_PROT_NODATA:
35a39691
BK
6757 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6758 break;
6759
0dc36888 6760 case ATAPI_PROT_DMA:
35a39691
BK
6761 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6762 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6763 break;
6764
6765 default:
6766 WARN_ON(1);
56d6aa33 6767 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6768 return AC_ERR_INVALID;
35a39691
BK
6769 }
6770
a32c055f 6771 ipr_send_command(ipr_cmd);
56d6aa33 6772 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6773
35a39691
BK
6774 return 0;
6775}
6776
4c9bf4e7
TH
6777/**
6778 * ipr_qc_fill_rtf - Read result TF
6779 * @qc: ATA queued command
6780 *
6781 * Return value:
6782 * true
6783 **/
6784static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6785{
6786 struct ipr_sata_port *sata_port = qc->ap->private_data;
6787 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6788 struct ata_taskfile *tf = &qc->result_tf;
6789
6790 tf->feature = g->error;
6791 tf->nsect = g->nsect;
6792 tf->lbal = g->lbal;
6793 tf->lbam = g->lbam;
6794 tf->lbah = g->lbah;
6795 tf->device = g->device;
6796 tf->command = g->status;
6797 tf->hob_nsect = g->hob_nsect;
6798 tf->hob_lbal = g->hob_lbal;
6799 tf->hob_lbam = g->hob_lbam;
6800 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
6801
6802 return true;
6803}
6804
35a39691 6805static struct ata_port_operations ipr_sata_ops = {
35a39691 6806 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6807 .hardreset = ipr_sata_reset,
35a39691 6808 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6809 .qc_prep = ata_noop_qc_prep,
56d6aa33 6810 .qc_defer = ipr_qc_defer,
35a39691 6811 .qc_issue = ipr_qc_issue,
4c9bf4e7 6812 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6813 .port_start = ata_sas_port_start,
6814 .port_stop = ata_sas_port_stop
6815};
6816
6817static struct ata_port_info sata_port_info = {
9cbe056f 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6819 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6,
35a39691
BK
6822 .port_ops = &ipr_sata_ops
6823};
6824
1da177e4
LT
6825#ifdef CONFIG_PPC_PSERIES
6826static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6827 PVR_NORTHSTAR,
6828 PVR_PULSAR,
6829 PVR_POWER4,
6830 PVR_ICESTAR,
6831 PVR_SSTAR,
6832 PVR_POWER4p,
6833 PVR_630,
6834 PVR_630p
1da177e4
LT
6835};
6836
6837/**
6838 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6839 * @ioa_cfg: ioa cfg struct
6840 *
6841 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6842 * certain pSeries hardware. This function determines if the given
6843 * adapter is in one of these confgurations or not.
6844 *
6845 * Return value:
6846 * 1 if adapter is not supported / 0 if adapter is supported
6847 **/
6848static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6849{
1da177e4
LT
6850 int i;
6851
44c10138 6852 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6853 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6854 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6855 return 1;
1da177e4
LT
6856 }
6857 }
6858 return 0;
6859}
6860#else
6861#define ipr_invalid_adapter(ioa_cfg) 0
6862#endif
6863
6864/**
6865 * ipr_ioa_bringdown_done - IOA bring down completion.
6866 * @ipr_cmd: ipr command struct
6867 *
6868 * This function processes the completion of an adapter bring down.
6869 * It wakes any reset sleepers.
6870 *
6871 * Return value:
6872 * IPR_RC_JOB_RETURN
6873 **/
6874static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6875{
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 6877 int i;
1da177e4
LT
6878
6879 ENTER;
bfae7820
BK
6880 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6881 ipr_trace;
6882 spin_unlock_irq(ioa_cfg->host->host_lock);
6883 scsi_unblock_requests(ioa_cfg->host);
6884 spin_lock_irq(ioa_cfg->host->host_lock);
6885 }
6886
1da177e4
LT
6887 ioa_cfg->in_reset_reload = 0;
6888 ioa_cfg->reset_retries = 0;
96b04db9 6889 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6890 spin_lock(&ioa_cfg->hrrq[i]._lock);
6891 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6892 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6893 }
6894 wmb();
6895
05a6538a 6896 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6897 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
6898 LEAVE;
6899
6900 return IPR_RC_JOB_RETURN;
6901}
6902
6903/**
6904 * ipr_ioa_reset_done - IOA reset completion.
6905 * @ipr_cmd: ipr command struct
6906 *
6907 * This function processes the completion of an adapter reset.
6908 * It schedules any necessary mid-layer add/removes and
6909 * wakes any reset sleepers.
6910 *
6911 * Return value:
6912 * IPR_RC_JOB_RETURN
6913 **/
6914static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6915{
6916 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6917 struct ipr_resource_entry *res;
6918 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 6919 int i = 0, j;
1da177e4
LT
6920
6921 ENTER;
6922 ioa_cfg->in_reset_reload = 0;
56d6aa33 6923 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6924 spin_lock(&ioa_cfg->hrrq[j]._lock);
6925 ioa_cfg->hrrq[j].allow_cmds = 1;
6926 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6927 }
6928 wmb();
1da177e4 6929 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6930 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6931
6932 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 6933 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
6934 ipr_trace;
6935 break;
6936 }
6937 }
6938 schedule_work(&ioa_cfg->work_q);
6939
6940 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6941 list_del(&hostrcb->queue);
6942 if (i++ < IPR_NUM_LOG_HCAMS)
6943 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6944 else
6945 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6946 }
6947
6bb04170 6948 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6949 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6950
6951 ioa_cfg->reset_retries = 0;
05a6538a 6952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6953 wake_up_all(&ioa_cfg->reset_wait_q);
6954
30237853 6955 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6956 scsi_unblock_requests(ioa_cfg->host);
30237853 6957 spin_lock(ioa_cfg->host->host_lock);
1da177e4 6958
56d6aa33 6959 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
6960 scsi_block_requests(ioa_cfg->host);
6961
f688f96d 6962 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
6963 LEAVE;
6964 return IPR_RC_JOB_RETURN;
6965}
6966
6967/**
6968 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6969 * @supported_dev: supported device struct
6970 * @vpids: vendor product id struct
6971 *
6972 * Return value:
6973 * none
6974 **/
6975static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6976 struct ipr_std_inq_vpids *vpids)
6977{
6978 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6979 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6980 supported_dev->num_records = 1;
6981 supported_dev->data_length =
6982 cpu_to_be16(sizeof(struct ipr_supported_device));
6983 supported_dev->reserved = 0;
6984}
6985
6986/**
6987 * ipr_set_supported_devs - Send Set Supported Devices for a device
6988 * @ipr_cmd: ipr command struct
6989 *
a32c055f 6990 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6991 *
6992 * Return value:
6993 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6994 **/
6995static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6996{
6997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6998 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7000 struct ipr_resource_entry *res = ipr_cmd->u.res;
7001
7002 ipr_cmd->job_step = ipr_ioa_reset_done;
7003
7004 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7005 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7006 continue;
7007
7008 ipr_cmd->u.res = res;
3e7ebdfa 7009 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7010
7011 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7012 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7013 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7014
7015 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7016 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7017 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7018 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7019
a32c055f
WB
7020 ipr_init_ioadl(ipr_cmd,
7021 ioa_cfg->vpd_cbs_dma +
7022 offsetof(struct ipr_misc_cbs, supp_dev),
7023 sizeof(struct ipr_supported_device),
7024 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7025
7026 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7027 IPR_SET_SUP_DEVICE_TIMEOUT);
7028
3e7ebdfa
WB
7029 if (!ioa_cfg->sis64)
7030 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7031 LEAVE;
1da177e4
LT
7032 return IPR_RC_JOB_RETURN;
7033 }
7034
05a6538a 7035 LEAVE;
1da177e4
LT
7036 return IPR_RC_JOB_CONTINUE;
7037}
7038
7039/**
7040 * ipr_get_mode_page - Locate specified mode page
7041 * @mode_pages: mode page buffer
7042 * @page_code: page code to find
7043 * @len: minimum required length for mode page
7044 *
7045 * Return value:
7046 * pointer to mode page / NULL on failure
7047 **/
7048static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7049 u32 page_code, u32 len)
7050{
7051 struct ipr_mode_page_hdr *mode_hdr;
7052 u32 page_length;
7053 u32 length;
7054
7055 if (!mode_pages || (mode_pages->hdr.length == 0))
7056 return NULL;
7057
7058 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7059 mode_hdr = (struct ipr_mode_page_hdr *)
7060 (mode_pages->data + mode_pages->hdr.block_desc_len);
7061
7062 while (length) {
7063 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7064 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7065 return mode_hdr;
7066 break;
7067 } else {
7068 page_length = (sizeof(struct ipr_mode_page_hdr) +
7069 mode_hdr->page_length);
7070 length -= page_length;
7071 mode_hdr = (struct ipr_mode_page_hdr *)
7072 ((unsigned long)mode_hdr + page_length);
7073 }
7074 }
7075 return NULL;
7076}
7077
7078/**
7079 * ipr_check_term_power - Check for term power errors
7080 * @ioa_cfg: ioa config struct
7081 * @mode_pages: IOAFP mode pages buffer
7082 *
7083 * Check the IOAFP's mode page 28 for term power errors
7084 *
7085 * Return value:
7086 * nothing
7087 **/
7088static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7089 struct ipr_mode_pages *mode_pages)
7090{
7091 int i;
7092 int entry_length;
7093 struct ipr_dev_bus_entry *bus;
7094 struct ipr_mode_page28 *mode_page;
7095
7096 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7097 sizeof(struct ipr_mode_page28));
7098
7099 entry_length = mode_page->entry_length;
7100
7101 bus = mode_page->bus;
7102
7103 for (i = 0; i < mode_page->num_entries; i++) {
7104 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7105 dev_err(&ioa_cfg->pdev->dev,
7106 "Term power is absent on scsi bus %d\n",
7107 bus->res_addr.bus);
7108 }
7109
7110 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7111 }
7112}
7113
7114/**
7115 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7116 * @ioa_cfg: ioa config struct
7117 *
7118 * Looks through the config table checking for SES devices. If
7119 * the SES device is in the SES table indicating a maximum SCSI
7120 * bus speed, the speed is limited for the bus.
7121 *
7122 * Return value:
7123 * none
7124 **/
7125static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7126{
7127 u32 max_xfer_rate;
7128 int i;
7129
7130 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7131 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7132 ioa_cfg->bus_attr[i].bus_width);
7133
7134 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7135 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7136 }
7137}
7138
7139/**
7140 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7141 * @ioa_cfg: ioa config struct
7142 * @mode_pages: mode page 28 buffer
7143 *
7144 * Updates mode page 28 based on driver configuration
7145 *
7146 * Return value:
7147 * none
7148 **/
7149static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7150 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7151{
7152 int i, entry_length;
7153 struct ipr_dev_bus_entry *bus;
7154 struct ipr_bus_attributes *bus_attr;
7155 struct ipr_mode_page28 *mode_page;
7156
7157 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7158 sizeof(struct ipr_mode_page28));
7159
7160 entry_length = mode_page->entry_length;
7161
7162 /* Loop for each device bus entry */
7163 for (i = 0, bus = mode_page->bus;
7164 i < mode_page->num_entries;
7165 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7166 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7167 dev_err(&ioa_cfg->pdev->dev,
7168 "Invalid resource address reported: 0x%08X\n",
7169 IPR_GET_PHYS_LOC(bus->res_addr));
7170 continue;
7171 }
7172
7173 bus_attr = &ioa_cfg->bus_attr[i];
7174 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7175 bus->bus_width = bus_attr->bus_width;
7176 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7177 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7178 if (bus_attr->qas_enabled)
7179 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7180 else
7181 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7182 }
7183}
7184
7185/**
7186 * ipr_build_mode_select - Build a mode select command
7187 * @ipr_cmd: ipr command struct
7188 * @res_handle: resource handle to send command to
7189 * @parm: Byte 2 of Mode Sense command
7190 * @dma_addr: DMA buffer address
7191 * @xfer_len: data transfer length
7192 *
7193 * Return value:
7194 * none
7195 **/
7196static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7197 __be32 res_handle, u8 parm,
7198 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7199{
1da177e4
LT
7200 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7201
7202 ioarcb->res_handle = res_handle;
7203 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7204 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7205 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7206 ioarcb->cmd_pkt.cdb[1] = parm;
7207 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7208
a32c055f 7209 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7210}
7211
7212/**
7213 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7214 * @ipr_cmd: ipr command struct
7215 *
7216 * This function sets up the SCSI bus attributes and sends
7217 * a Mode Select for Page 28 to activate them.
7218 *
7219 * Return value:
7220 * IPR_RC_JOB_RETURN
7221 **/
7222static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7223{
7224 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7225 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7226 int length;
7227
7228 ENTER;
4733804c
BK
7229 ipr_scsi_bus_speed_limit(ioa_cfg);
7230 ipr_check_term_power(ioa_cfg, mode_pages);
7231 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7232 length = mode_pages->hdr.length + 1;
7233 mode_pages->hdr.length = 0;
1da177e4
LT
7234
7235 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7236 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7237 length);
7238
f72919ec
WB
7239 ipr_cmd->job_step = ipr_set_supported_devs;
7240 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7241 struct ipr_resource_entry, queue);
1da177e4
LT
7242 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7243
7244 LEAVE;
7245 return IPR_RC_JOB_RETURN;
7246}
7247
7248/**
7249 * ipr_build_mode_sense - Builds a mode sense command
7250 * @ipr_cmd: ipr command struct
7251 * @res: resource entry struct
7252 * @parm: Byte 2 of mode sense command
7253 * @dma_addr: DMA address of mode sense buffer
7254 * @xfer_len: Size of DMA buffer
7255 *
7256 * Return value:
7257 * none
7258 **/
7259static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7260 __be32 res_handle,
a32c055f 7261 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7262{
1da177e4
LT
7263 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7264
7265 ioarcb->res_handle = res_handle;
7266 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7267 ioarcb->cmd_pkt.cdb[2] = parm;
7268 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7269 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7270
a32c055f 7271 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7272}
7273
dfed823e
BK
7274/**
7275 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7276 * @ipr_cmd: ipr command struct
7277 *
7278 * This function handles the failure of an IOA bringup command.
7279 *
7280 * Return value:
7281 * IPR_RC_JOB_RETURN
7282 **/
7283static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7284{
7285 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7286 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7287
7288 dev_err(&ioa_cfg->pdev->dev,
7289 "0x%02X failed with IOASC: 0x%08X\n",
7290 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7291
7292 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7293 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7294 return IPR_RC_JOB_RETURN;
7295}
7296
7297/**
7298 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7299 * @ipr_cmd: ipr command struct
7300 *
7301 * This function handles the failure of a Mode Sense to the IOAFP.
7302 * Some adapters do not handle all mode pages.
7303 *
7304 * Return value:
7305 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7306 **/
7307static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7308{
f72919ec 7309 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7310 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7311
7312 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7313 ipr_cmd->job_step = ipr_set_supported_devs;
7314 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7315 struct ipr_resource_entry, queue);
dfed823e
BK
7316 return IPR_RC_JOB_CONTINUE;
7317 }
7318
7319 return ipr_reset_cmd_failed(ipr_cmd);
7320}
7321
1da177e4
LT
7322/**
7323 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7324 * @ipr_cmd: ipr command struct
7325 *
7326 * This function send a Page 28 mode sense to the IOA to
7327 * retrieve SCSI bus attributes.
7328 *
7329 * Return value:
7330 * IPR_RC_JOB_RETURN
7331 **/
7332static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7333{
7334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7335
7336 ENTER;
7337 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7338 0x28, ioa_cfg->vpd_cbs_dma +
7339 offsetof(struct ipr_misc_cbs, mode_pages),
7340 sizeof(struct ipr_mode_pages));
7341
7342 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7343 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7344
7345 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7346
7347 LEAVE;
7348 return IPR_RC_JOB_RETURN;
7349}
7350
ac09c349
BK
7351/**
7352 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7353 * @ipr_cmd: ipr command struct
7354 *
7355 * This function enables dual IOA RAID support if possible.
7356 *
7357 * Return value:
7358 * IPR_RC_JOB_RETURN
7359 **/
7360static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7361{
7362 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7363 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7364 struct ipr_mode_page24 *mode_page;
7365 int length;
7366
7367 ENTER;
7368 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7369 sizeof(struct ipr_mode_page24));
7370
7371 if (mode_page)
7372 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7373
7374 length = mode_pages->hdr.length + 1;
7375 mode_pages->hdr.length = 0;
7376
7377 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7378 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7379 length);
7380
7381 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7382 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7383
7384 LEAVE;
7385 return IPR_RC_JOB_RETURN;
7386}
7387
7388/**
7389 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7390 * @ipr_cmd: ipr command struct
7391 *
7392 * This function handles the failure of a Mode Sense to the IOAFP.
7393 * Some adapters do not handle all mode pages.
7394 *
7395 * Return value:
7396 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7397 **/
7398static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7399{
96d21f00 7400 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7401
7402 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7403 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7404 return IPR_RC_JOB_CONTINUE;
7405 }
7406
7407 return ipr_reset_cmd_failed(ipr_cmd);
7408}
7409
7410/**
7411 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7412 * @ipr_cmd: ipr command struct
7413 *
7414 * This function send a mode sense to the IOA to retrieve
7415 * the IOA Advanced Function Control mode page.
7416 *
7417 * Return value:
7418 * IPR_RC_JOB_RETURN
7419 **/
7420static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7421{
7422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7423
7424 ENTER;
7425 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7426 0x24, ioa_cfg->vpd_cbs_dma +
7427 offsetof(struct ipr_misc_cbs, mode_pages),
7428 sizeof(struct ipr_mode_pages));
7429
7430 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7431 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7432
7433 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7434
7435 LEAVE;
7436 return IPR_RC_JOB_RETURN;
7437}
7438
1da177e4
LT
7439/**
7440 * ipr_init_res_table - Initialize the resource table
7441 * @ipr_cmd: ipr command struct
7442 *
7443 * This function looks through the existing resource table, comparing
7444 * it with the config table. This function will take care of old/new
7445 * devices and schedule adding/removing them from the mid-layer
7446 * as appropriate.
7447 *
7448 * Return value:
7449 * IPR_RC_JOB_CONTINUE
7450 **/
7451static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7452{
7453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7454 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7455 struct ipr_config_table_entry_wrapper cfgtew;
7456 int entries, found, flag, i;
1da177e4
LT
7457 LIST_HEAD(old_res);
7458
7459 ENTER;
3e7ebdfa
WB
7460 if (ioa_cfg->sis64)
7461 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7462 else
7463 flag = ioa_cfg->u.cfg_table->hdr.flags;
7464
7465 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7466 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7467
7468 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7469 list_move_tail(&res->queue, &old_res);
7470
3e7ebdfa 7471 if (ioa_cfg->sis64)
438b0331 7472 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7473 else
7474 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7475
7476 for (i = 0; i < entries; i++) {
7477 if (ioa_cfg->sis64)
7478 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7479 else
7480 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7481 found = 0;
7482
7483 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7484 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7485 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7486 found = 1;
7487 break;
7488 }
7489 }
7490
7491 if (!found) {
7492 if (list_empty(&ioa_cfg->free_res_q)) {
7493 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7494 break;
7495 }
7496
7497 found = 1;
7498 res = list_entry(ioa_cfg->free_res_q.next,
7499 struct ipr_resource_entry, queue);
7500 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7501 ipr_init_res_entry(res, &cfgtew);
1da177e4 7502 res->add_to_ml = 1;
56115598
WB
7503 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7504 res->sdev->allow_restart = 1;
1da177e4
LT
7505
7506 if (found)
3e7ebdfa 7507 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7508 }
7509
7510 list_for_each_entry_safe(res, temp, &old_res, queue) {
7511 if (res->sdev) {
7512 res->del_from_ml = 1;
3e7ebdfa 7513 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7514 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7515 }
7516 }
7517
3e7ebdfa
WB
7518 list_for_each_entry_safe(res, temp, &old_res, queue) {
7519 ipr_clear_res_target(res);
7520 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7521 }
7522
ac09c349
BK
7523 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7524 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7525 else
7526 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7527
7528 LEAVE;
7529 return IPR_RC_JOB_CONTINUE;
7530}
7531
7532/**
7533 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7534 * @ipr_cmd: ipr command struct
7535 *
7536 * This function sends a Query IOA Configuration command
7537 * to the adapter to retrieve the IOA configuration table.
7538 *
7539 * Return value:
7540 * IPR_RC_JOB_RETURN
7541 **/
7542static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7543{
7544 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7545 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7546 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7547 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7548
7549 ENTER;
ac09c349
BK
7550 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7551 ioa_cfg->dual_raid = 1;
1da177e4
LT
7552 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7553 ucode_vpd->major_release, ucode_vpd->card_type,
7554 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7555 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7556 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7557
7558 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7559 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7560 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7561 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7562
3e7ebdfa 7563 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7564 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7565
7566 ipr_cmd->job_step = ipr_init_res_table;
7567
7568 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7569
7570 LEAVE;
7571 return IPR_RC_JOB_RETURN;
7572}
7573
7574/**
7575 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7576 * @ipr_cmd: ipr command struct
7577 *
7578 * This utility function sends an inquiry to the adapter.
7579 *
7580 * Return value:
7581 * none
7582 **/
7583static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7584 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7585{
7586 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7587
7588 ENTER;
7589 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7590 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7591
7592 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7593 ioarcb->cmd_pkt.cdb[1] = flags;
7594 ioarcb->cmd_pkt.cdb[2] = page;
7595 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7596
a32c055f 7597 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7598
7599 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7600 LEAVE;
7601}
7602
62275040
BK
7603/**
7604 * ipr_inquiry_page_supported - Is the given inquiry page supported
7605 * @page0: inquiry page 0 buffer
7606 * @page: page code.
7607 *
7608 * This function determines if the specified inquiry page is supported.
7609 *
7610 * Return value:
7611 * 1 if page is supported / 0 if not
7612 **/
7613static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7614{
7615 int i;
7616
7617 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7618 if (page0->page[i] == page)
7619 return 1;
7620
7621 return 0;
7622}
7623
ac09c349
BK
7624/**
7625 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7626 * @ipr_cmd: ipr command struct
7627 *
7628 * This function sends a Page 0xD0 inquiry to the adapter
7629 * to retrieve adapter capabilities.
7630 *
7631 * Return value:
7632 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7633 **/
7634static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7635{
7636 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7637 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7638 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7639
7640 ENTER;
7641 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7642 memset(cap, 0, sizeof(*cap));
7643
7644 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7645 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7646 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7647 sizeof(struct ipr_inquiry_cap));
7648 return IPR_RC_JOB_RETURN;
7649 }
7650
7651 LEAVE;
7652 return IPR_RC_JOB_CONTINUE;
7653}
7654
1da177e4
LT
7655/**
7656 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7657 * @ipr_cmd: ipr command struct
7658 *
7659 * This function sends a Page 3 inquiry to the adapter
7660 * to retrieve software VPD information.
7661 *
7662 * Return value:
7663 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7664 **/
7665static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7666{
7667 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7668
7669 ENTER;
7670
ac09c349 7671 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7672
7673 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7674 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7675 sizeof(struct ipr_inquiry_page3));
7676
7677 LEAVE;
7678 return IPR_RC_JOB_RETURN;
7679}
7680
7681/**
7682 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7683 * @ipr_cmd: ipr command struct
7684 *
7685 * This function sends a Page 0 inquiry to the adapter
7686 * to retrieve supported inquiry pages.
7687 *
7688 * Return value:
7689 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7690 **/
7691static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7692{
7693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7694 char type[5];
7695
7696 ENTER;
7697
7698 /* Grab the type out of the VPD and store it away */
7699 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7700 type[4] = '\0';
7701 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7702
f688f96d
BK
7703 if (ipr_invalid_adapter(ioa_cfg)) {
7704 dev_err(&ioa_cfg->pdev->dev,
7705 "Adapter not supported in this hardware configuration.\n");
7706
7707 if (!ipr_testmode) {
7708 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7710 list_add_tail(&ipr_cmd->queue,
7711 &ioa_cfg->hrrq->hrrq_free_q);
7712 return IPR_RC_JOB_RETURN;
7713 }
7714 }
7715
62275040 7716 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7717
62275040
BK
7718 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7719 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7720 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7721
7722 LEAVE;
7723 return IPR_RC_JOB_RETURN;
7724}
7725
7726/**
7727 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7728 * @ipr_cmd: ipr command struct
7729 *
7730 * This function sends a standard inquiry to the adapter.
7731 *
7732 * Return value:
7733 * IPR_RC_JOB_RETURN
7734 **/
7735static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7736{
7737 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7738
7739 ENTER;
62275040 7740 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7741
7742 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7743 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7744 sizeof(struct ipr_ioa_vpd));
7745
7746 LEAVE;
7747 return IPR_RC_JOB_RETURN;
7748}
7749
7750/**
214777ba 7751 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7752 * @ipr_cmd: ipr command struct
7753 *
7754 * This function send an Identify Host Request Response Queue
7755 * command to establish the HRRQ with the adapter.
7756 *
7757 * Return value:
7758 * IPR_RC_JOB_RETURN
7759 **/
214777ba 7760static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7761{
7762 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7763 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7764 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7765
7766 ENTER;
05a6538a 7767 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7768 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7769
56d6aa33 7770 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7771 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7772
05a6538a 7773 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7774 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7775
05a6538a 7776 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7777 if (ioa_cfg->sis64)
7778 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7779
05a6538a 7780 if (ioa_cfg->nvectors == 1)
7781 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7782 else
7783 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7784
7785 ioarcb->cmd_pkt.cdb[2] =
7786 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7787 ioarcb->cmd_pkt.cdb[3] =
7788 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7789 ioarcb->cmd_pkt.cdb[4] =
7790 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7791 ioarcb->cmd_pkt.cdb[5] =
7792 ((u64) hrrq->host_rrq_dma) & 0xff;
7793 ioarcb->cmd_pkt.cdb[7] =
7794 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7795 ioarcb->cmd_pkt.cdb[8] =
7796 (sizeof(u32) * hrrq->size) & 0xff;
7797
7798 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7799 ioarcb->cmd_pkt.cdb[9] =
7800 ioa_cfg->identify_hrrq_index;
1da177e4 7801
05a6538a 7802 if (ioa_cfg->sis64) {
7803 ioarcb->cmd_pkt.cdb[10] =
7804 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7805 ioarcb->cmd_pkt.cdb[11] =
7806 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7807 ioarcb->cmd_pkt.cdb[12] =
7808 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7809 ioarcb->cmd_pkt.cdb[13] =
7810 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7811 }
7812
7813 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7814 ioarcb->cmd_pkt.cdb[14] =
7815 ioa_cfg->identify_hrrq_index;
05a6538a 7816
7817 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7818 IPR_INTERNAL_TIMEOUT);
7819
56d6aa33 7820 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7821 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7822
7823 LEAVE;
7824 return IPR_RC_JOB_RETURN;
05a6538a 7825 }
7826
1da177e4 7827 LEAVE;
05a6538a 7828 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7829}
7830
7831/**
7832 * ipr_reset_timer_done - Adapter reset timer function
7833 * @ipr_cmd: ipr command struct
7834 *
7835 * Description: This function is used in adapter reset processing
7836 * for timing events. If the reset_cmd pointer in the IOA
7837 * config struct is not this adapter's we are doing nested
7838 * resets and fail_all_ops will take care of freeing the
7839 * command block.
7840 *
7841 * Return value:
7842 * none
7843 **/
7844static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7845{
7846 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7847 unsigned long lock_flags = 0;
7848
7849 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7850
7851 if (ioa_cfg->reset_cmd == ipr_cmd) {
7852 list_del(&ipr_cmd->queue);
7853 ipr_cmd->done(ipr_cmd);
7854 }
7855
7856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7857}
7858
7859/**
7860 * ipr_reset_start_timer - Start a timer for adapter reset job
7861 * @ipr_cmd: ipr command struct
7862 * @timeout: timeout value
7863 *
7864 * Description: This function is used in adapter reset processing
7865 * for timing events. If the reset_cmd pointer in the IOA
7866 * config struct is not this adapter's we are doing nested
7867 * resets and fail_all_ops will take care of freeing the
7868 * command block.
7869 *
7870 * Return value:
7871 * none
7872 **/
7873static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7874 unsigned long timeout)
7875{
05a6538a 7876
7877 ENTER;
7878 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7879 ipr_cmd->done = ipr_reset_ioa_job;
7880
7881 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7882 ipr_cmd->timer.expires = jiffies + timeout;
7883 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7884 add_timer(&ipr_cmd->timer);
7885}
7886
7887/**
7888 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7889 * @ioa_cfg: ioa cfg struct
7890 *
7891 * Return value:
7892 * nothing
7893 **/
7894static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7895{
05a6538a 7896 struct ipr_hrr_queue *hrrq;
1da177e4 7897
05a6538a 7898 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7899 spin_lock(&hrrq->_lock);
05a6538a 7900 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7901
7902 /* Initialize Host RRQ pointers */
7903 hrrq->hrrq_start = hrrq->host_rrq;
7904 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7905 hrrq->hrrq_curr = hrrq->hrrq_start;
7906 hrrq->toggle_bit = 1;
56d6aa33 7907 spin_unlock(&hrrq->_lock);
05a6538a 7908 }
56d6aa33 7909 wmb();
05a6538a 7910
56d6aa33 7911 ioa_cfg->identify_hrrq_index = 0;
7912 if (ioa_cfg->hrrq_num == 1)
7913 atomic_set(&ioa_cfg->hrrq_index, 0);
7914 else
7915 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
7916
7917 /* Zero out config table */
3e7ebdfa 7918 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7919}
7920
214777ba
WB
7921/**
7922 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7923 * @ipr_cmd: ipr command struct
7924 *
7925 * Return value:
7926 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7927 **/
7928static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7929{
7930 unsigned long stage, stage_time;
7931 u32 feedback;
7932 volatile u32 int_reg;
7933 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7934 u64 maskval = 0;
7935
7936 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7937 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7938 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7939
7940 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7941
7942 /* sanity check the stage_time value */
438b0331
WB
7943 if (stage_time == 0)
7944 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7945 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7946 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7947 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7948 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7949
7950 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7951 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7952 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7953 stage_time = ioa_cfg->transop_timeout;
7954 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7955 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7956 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7957 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7958 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7959 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7960 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7961 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7962 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7963 return IPR_RC_JOB_CONTINUE;
7964 }
214777ba
WB
7965 }
7966
7967 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7968 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7969 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7970 ipr_cmd->done = ipr_reset_ioa_job;
7971 add_timer(&ipr_cmd->timer);
05a6538a 7972
7973 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7974
7975 return IPR_RC_JOB_RETURN;
7976}
7977
1da177e4
LT
7978/**
7979 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7980 * @ipr_cmd: ipr command struct
7981 *
7982 * This function reinitializes some control blocks and
7983 * enables destructive diagnostics on the adapter.
7984 *
7985 * Return value:
7986 * IPR_RC_JOB_RETURN
7987 **/
7988static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7989{
7990 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7991 volatile u32 int_reg;
7be96900 7992 volatile u64 maskval;
56d6aa33 7993 int i;
1da177e4
LT
7994
7995 ENTER;
214777ba 7996 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7997 ipr_init_ioa_mem(ioa_cfg);
7998
56d6aa33 7999 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8000 spin_lock(&ioa_cfg->hrrq[i]._lock);
8001 ioa_cfg->hrrq[i].allow_interrupts = 1;
8002 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8003 }
8004 wmb();
8701f185
WB
8005 if (ioa_cfg->sis64) {
8006 /* Set the adapter to the correct endian mode. */
8007 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8008 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8009 }
8010
7be96900 8011 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8012
8013 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8014 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8015 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8016 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8017 return IPR_RC_JOB_CONTINUE;
8018 }
8019
8020 /* Enable destructive diagnostics on IOA */
214777ba
WB
8021 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8022
7be96900
WB
8023 if (ioa_cfg->sis64) {
8024 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8025 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8026 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8027 } else
8028 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8029
1da177e4
LT
8030 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8031
8032 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8033
214777ba
WB
8034 if (ioa_cfg->sis64) {
8035 ipr_cmd->job_step = ipr_reset_next_stage;
8036 return IPR_RC_JOB_CONTINUE;
8037 }
8038
1da177e4 8039 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 8040 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
8041 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8042 ipr_cmd->done = ipr_reset_ioa_job;
8043 add_timer(&ipr_cmd->timer);
05a6538a 8044 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8045
8046 LEAVE;
8047 return IPR_RC_JOB_RETURN;
8048}
8049
8050/**
8051 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8052 * @ipr_cmd: ipr command struct
8053 *
8054 * This function is invoked when an adapter dump has run out
8055 * of processing time.
8056 *
8057 * Return value:
8058 * IPR_RC_JOB_CONTINUE
8059 **/
8060static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8061{
8062 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8063
8064 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8065 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8066 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8067 ioa_cfg->sdt_state = ABORT_DUMP;
8068
4c647e90 8069 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8070 ipr_cmd->job_step = ipr_reset_alert;
8071
8072 return IPR_RC_JOB_CONTINUE;
8073}
8074
8075/**
8076 * ipr_unit_check_no_data - Log a unit check/no data error log
8077 * @ioa_cfg: ioa config struct
8078 *
8079 * Logs an error indicating the adapter unit checked, but for some
8080 * reason, we were unable to fetch the unit check buffer.
8081 *
8082 * Return value:
8083 * nothing
8084 **/
8085static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8086{
8087 ioa_cfg->errors_logged++;
8088 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8089}
8090
8091/**
8092 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8093 * @ioa_cfg: ioa config struct
8094 *
8095 * Fetches the unit check buffer from the adapter by clocking the data
8096 * through the mailbox register.
8097 *
8098 * Return value:
8099 * nothing
8100 **/
8101static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8102{
8103 unsigned long mailbox;
8104 struct ipr_hostrcb *hostrcb;
8105 struct ipr_uc_sdt sdt;
8106 int rc, length;
65f56475 8107 u32 ioasc;
1da177e4
LT
8108
8109 mailbox = readl(ioa_cfg->ioa_mailbox);
8110
dcbad00e 8111 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8112 ipr_unit_check_no_data(ioa_cfg);
8113 return;
8114 }
8115
8116 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8117 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8118 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8119
dcbad00e
WB
8120 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8121 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8122 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8123 ipr_unit_check_no_data(ioa_cfg);
8124 return;
8125 }
8126
8127 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8128 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8129 length = be32_to_cpu(sdt.entry[0].end_token);
8130 else
8131 length = (be32_to_cpu(sdt.entry[0].end_token) -
8132 be32_to_cpu(sdt.entry[0].start_token)) &
8133 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8134
8135 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8136 struct ipr_hostrcb, queue);
8137 list_del(&hostrcb->queue);
8138 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8139
8140 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8141 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8142 (__be32 *)&hostrcb->hcam,
8143 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8144
65f56475 8145 if (!rc) {
1da177e4 8146 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8147 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8148 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8149 ioa_cfg->sdt_state == GET_DUMP)
8150 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8151 } else
1da177e4
LT
8152 ipr_unit_check_no_data(ioa_cfg);
8153
8154 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8155}
8156
110def85
WB
8157/**
8158 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8159 * @ipr_cmd: ipr command struct
8160 *
8161 * Description: This function will call to get the unit check buffer.
8162 *
8163 * Return value:
8164 * IPR_RC_JOB_RETURN
8165 **/
8166static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8167{
8168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8169
8170 ENTER;
8171 ioa_cfg->ioa_unit_checked = 0;
8172 ipr_get_unit_check_buffer(ioa_cfg);
8173 ipr_cmd->job_step = ipr_reset_alert;
8174 ipr_reset_start_timer(ipr_cmd, 0);
8175
8176 LEAVE;
8177 return IPR_RC_JOB_RETURN;
8178}
8179
1da177e4
LT
8180/**
8181 * ipr_reset_restore_cfg_space - Restore PCI config space.
8182 * @ipr_cmd: ipr command struct
8183 *
8184 * Description: This function restores the saved PCI config space of
8185 * the adapter, fails all outstanding ops back to the callers, and
8186 * fetches the dump/unit check if applicable to this reset.
8187 *
8188 * Return value:
8189 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8190 **/
8191static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8192{
8193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8194 u32 int_reg;
1da177e4
LT
8195
8196 ENTER;
99c965dd 8197 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8198 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8199
8200 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8201 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8202 return IPR_RC_JOB_CONTINUE;
8203 }
8204
8205 ipr_fail_all_ops(ioa_cfg);
8206
8701f185
WB
8207 if (ioa_cfg->sis64) {
8208 /* Set the adapter to the correct endian mode. */
8209 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8210 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8211 }
8212
1da177e4 8213 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8214 if (ioa_cfg->sis64) {
8215 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8216 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8217 return IPR_RC_JOB_RETURN;
8218 } else {
8219 ioa_cfg->ioa_unit_checked = 0;
8220 ipr_get_unit_check_buffer(ioa_cfg);
8221 ipr_cmd->job_step = ipr_reset_alert;
8222 ipr_reset_start_timer(ipr_cmd, 0);
8223 return IPR_RC_JOB_RETURN;
8224 }
1da177e4
LT
8225 }
8226
8227 if (ioa_cfg->in_ioa_bringdown) {
8228 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8229 } else {
8230 ipr_cmd->job_step = ipr_reset_enable_ioa;
8231
8232 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8233 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8234 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8235 if (ioa_cfg->sis64)
8236 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8237 else
8238 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8239 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8240 schedule_work(&ioa_cfg->work_q);
8241 return IPR_RC_JOB_RETURN;
8242 }
8243 }
8244
438b0331 8245 LEAVE;
1da177e4
LT
8246 return IPR_RC_JOB_CONTINUE;
8247}
8248
e619e1a7
BK
8249/**
8250 * ipr_reset_bist_done - BIST has completed on the adapter.
8251 * @ipr_cmd: ipr command struct
8252 *
8253 * Description: Unblock config space and resume the reset process.
8254 *
8255 * Return value:
8256 * IPR_RC_JOB_CONTINUE
8257 **/
8258static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8259{
fb51ccbf
JK
8260 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8261
e619e1a7 8262 ENTER;
fb51ccbf
JK
8263 if (ioa_cfg->cfg_locked)
8264 pci_cfg_access_unlock(ioa_cfg->pdev);
8265 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8266 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8267 LEAVE;
8268 return IPR_RC_JOB_CONTINUE;
8269}
8270
1da177e4
LT
8271/**
8272 * ipr_reset_start_bist - Run BIST on the adapter.
8273 * @ipr_cmd: ipr command struct
8274 *
8275 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8276 *
8277 * Return value:
8278 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8279 **/
8280static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8281{
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8283 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8284
8285 ENTER;
cb237ef7
WB
8286 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8287 writel(IPR_UPROCI_SIS64_START_BIST,
8288 ioa_cfg->regs.set_uproc_interrupt_reg32);
8289 else
8290 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8291
8292 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8293 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8294 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8295 rc = IPR_RC_JOB_RETURN;
cb237ef7 8296 } else {
fb51ccbf
JK
8297 if (ioa_cfg->cfg_locked)
8298 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8299 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8300 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8301 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8302 }
8303
8304 LEAVE;
8305 return rc;
8306}
8307
463fc696
BK
8308/**
8309 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8310 * @ipr_cmd: ipr command struct
8311 *
8312 * Description: This clears PCI reset to the adapter and delays two seconds.
8313 *
8314 * Return value:
8315 * IPR_RC_JOB_RETURN
8316 **/
8317static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8318{
8319 ENTER;
8320 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8321 ipr_cmd->job_step = ipr_reset_bist_done;
8322 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8323 LEAVE;
8324 return IPR_RC_JOB_RETURN;
8325}
8326
8327/**
8328 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8329 * @ipr_cmd: ipr command struct
8330 *
8331 * Description: This asserts PCI reset to the adapter.
8332 *
8333 * Return value:
8334 * IPR_RC_JOB_RETURN
8335 **/
8336static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8337{
8338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8339 struct pci_dev *pdev = ioa_cfg->pdev;
8340
8341 ENTER;
463fc696
BK
8342 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8343 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8344 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8345 LEAVE;
8346 return IPR_RC_JOB_RETURN;
8347}
8348
fb51ccbf
JK
8349/**
8350 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8351 * @ipr_cmd: ipr command struct
8352 *
8353 * Description: This attempts to block config access to the IOA.
8354 *
8355 * Return value:
8356 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8357 **/
8358static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8359{
8360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8361 int rc = IPR_RC_JOB_CONTINUE;
8362
8363 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8364 ioa_cfg->cfg_locked = 1;
8365 ipr_cmd->job_step = ioa_cfg->reset;
8366 } else {
8367 if (ipr_cmd->u.time_left) {
8368 rc = IPR_RC_JOB_RETURN;
8369 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8370 ipr_reset_start_timer(ipr_cmd,
8371 IPR_CHECK_FOR_RESET_TIMEOUT);
8372 } else {
8373 ipr_cmd->job_step = ioa_cfg->reset;
8374 dev_err(&ioa_cfg->pdev->dev,
8375 "Timed out waiting to lock config access. Resetting anyway.\n");
8376 }
8377 }
8378
8379 return rc;
8380}
8381
8382/**
8383 * ipr_reset_block_config_access - Block config access to the IOA
8384 * @ipr_cmd: ipr command struct
8385 *
8386 * Description: This attempts to block config access to the IOA
8387 *
8388 * Return value:
8389 * IPR_RC_JOB_CONTINUE
8390 **/
8391static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8392{
8393 ipr_cmd->ioa_cfg->cfg_locked = 0;
8394 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8395 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8396 return IPR_RC_JOB_CONTINUE;
8397}
8398
1da177e4
LT
8399/**
8400 * ipr_reset_allowed - Query whether or not IOA can be reset
8401 * @ioa_cfg: ioa config struct
8402 *
8403 * Return value:
8404 * 0 if reset not allowed / non-zero if reset is allowed
8405 **/
8406static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8407{
8408 volatile u32 temp_reg;
8409
8410 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8411 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8412}
8413
8414/**
8415 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8416 * @ipr_cmd: ipr command struct
8417 *
8418 * Description: This function waits for adapter permission to run BIST,
8419 * then runs BIST. If the adapter does not give permission after a
8420 * reasonable time, we will reset the adapter anyway. The impact of
8421 * resetting the adapter without warning the adapter is the risk of
8422 * losing the persistent error log on the adapter. If the adapter is
8423 * reset while it is writing to the flash on the adapter, the flash
8424 * segment will have bad ECC and be zeroed.
8425 *
8426 * Return value:
8427 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8428 **/
8429static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8430{
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432 int rc = IPR_RC_JOB_RETURN;
8433
8434 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8435 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8436 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8437 } else {
fb51ccbf 8438 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8439 rc = IPR_RC_JOB_CONTINUE;
8440 }
8441
8442 return rc;
8443}
8444
8445/**
8701f185 8446 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8447 * @ipr_cmd: ipr command struct
8448 *
8449 * Description: This function alerts the adapter that it will be reset.
8450 * If memory space is not currently enabled, proceed directly
8451 * to running BIST on the adapter. The timer must always be started
8452 * so we guarantee we do not run BIST from ipr_isr.
8453 *
8454 * Return value:
8455 * IPR_RC_JOB_RETURN
8456 **/
8457static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8458{
8459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8460 u16 cmd_reg;
8461 int rc;
8462
8463 ENTER;
8464 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8465
8466 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8467 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8468 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8469 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8470 } else {
fb51ccbf 8471 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8472 }
8473
8474 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8475 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8476
8477 LEAVE;
8478 return IPR_RC_JOB_RETURN;
8479}
8480
8481/**
8482 * ipr_reset_ucode_download_done - Microcode download completion
8483 * @ipr_cmd: ipr command struct
8484 *
8485 * Description: This function unmaps the microcode download buffer.
8486 *
8487 * Return value:
8488 * IPR_RC_JOB_CONTINUE
8489 **/
8490static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8491{
8492 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8493 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8494
d73341bf 8495 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
8496 sglist->num_sg, DMA_TO_DEVICE);
8497
8498 ipr_cmd->job_step = ipr_reset_alert;
8499 return IPR_RC_JOB_CONTINUE;
8500}
8501
8502/**
8503 * ipr_reset_ucode_download - Download microcode to the adapter
8504 * @ipr_cmd: ipr command struct
8505 *
8506 * Description: This function checks to see if it there is microcode
8507 * to download to the adapter. If there is, a download is performed.
8508 *
8509 * Return value:
8510 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8511 **/
8512static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8513{
8514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8515 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8516
8517 ENTER;
8518 ipr_cmd->job_step = ipr_reset_alert;
8519
8520 if (!sglist)
8521 return IPR_RC_JOB_CONTINUE;
8522
8523 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8524 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8525 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8526 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8527 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8528 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8529 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8530
a32c055f
WB
8531 if (ioa_cfg->sis64)
8532 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8533 else
8534 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8535 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8536
8537 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8538 IPR_WRITE_BUFFER_TIMEOUT);
8539
8540 LEAVE;
8541 return IPR_RC_JOB_RETURN;
8542}
8543
8544/**
8545 * ipr_reset_shutdown_ioa - Shutdown the adapter
8546 * @ipr_cmd: ipr command struct
8547 *
8548 * Description: This function issues an adapter shutdown of the
8549 * specified type to the specified adapter as part of the
8550 * adapter reset job.
8551 *
8552 * Return value:
8553 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8554 **/
8555static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8556{
8557 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8558 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8559 unsigned long timeout;
8560 int rc = IPR_RC_JOB_CONTINUE;
8561
8562 ENTER;
56d6aa33 8563 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8564 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8565 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8566 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8567 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8568 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8569
ac09c349
BK
8570 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8571 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8572 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8573 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8574 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8575 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8576 else
ac09c349 8577 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8578
8579 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8580
8581 rc = IPR_RC_JOB_RETURN;
8582 ipr_cmd->job_step = ipr_reset_ucode_download;
8583 } else
8584 ipr_cmd->job_step = ipr_reset_alert;
8585
8586 LEAVE;
8587 return rc;
8588}
8589
8590/**
8591 * ipr_reset_ioa_job - Adapter reset job
8592 * @ipr_cmd: ipr command struct
8593 *
8594 * Description: This function is the job router for the adapter reset job.
8595 *
8596 * Return value:
8597 * none
8598 **/
8599static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8600{
8601 u32 rc, ioasc;
1da177e4
LT
8602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8603
8604 do {
96d21f00 8605 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8606
8607 if (ioa_cfg->reset_cmd != ipr_cmd) {
8608 /*
8609 * We are doing nested adapter resets and this is
8610 * not the current reset job.
8611 */
05a6538a 8612 list_add_tail(&ipr_cmd->queue,
8613 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8614 return;
8615 }
8616
8617 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8618 rc = ipr_cmd->job_step_failed(ipr_cmd);
8619 if (rc == IPR_RC_JOB_RETURN)
8620 return;
1da177e4
LT
8621 }
8622
8623 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8624 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8625 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8626 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8627}
8628
8629/**
8630 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8631 * @ioa_cfg: ioa config struct
8632 * @job_step: first job step of reset job
8633 * @shutdown_type: shutdown type
8634 *
8635 * Description: This function will initiate the reset of the given adapter
8636 * starting at the selected job step.
8637 * If the caller needs to wait on the completion of the reset,
8638 * the caller must sleep on the reset_wait_q.
8639 *
8640 * Return value:
8641 * none
8642 **/
8643static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8644 int (*job_step) (struct ipr_cmnd *),
8645 enum ipr_shutdown_type shutdown_type)
8646{
8647 struct ipr_cmnd *ipr_cmd;
56d6aa33 8648 int i;
1da177e4
LT
8649
8650 ioa_cfg->in_reset_reload = 1;
56d6aa33 8651 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8652 spin_lock(&ioa_cfg->hrrq[i]._lock);
8653 ioa_cfg->hrrq[i].allow_cmds = 0;
8654 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8655 }
8656 wmb();
bfae7820
BK
8657 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8658 scsi_block_requests(ioa_cfg->host);
1da177e4
LT
8659
8660 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8661 ioa_cfg->reset_cmd = ipr_cmd;
8662 ipr_cmd->job_step = job_step;
8663 ipr_cmd->u.shutdown_type = shutdown_type;
8664
8665 ipr_reset_ioa_job(ipr_cmd);
8666}
8667
8668/**
8669 * ipr_initiate_ioa_reset - Initiate an adapter reset
8670 * @ioa_cfg: ioa config struct
8671 * @shutdown_type: shutdown type
8672 *
8673 * Description: This function will initiate the reset of the given adapter.
8674 * If the caller needs to wait on the completion of the reset,
8675 * the caller must sleep on the reset_wait_q.
8676 *
8677 * Return value:
8678 * none
8679 **/
8680static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8681 enum ipr_shutdown_type shutdown_type)
8682{
56d6aa33 8683 int i;
8684
8685 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8686 return;
8687
41e9a696
BK
8688 if (ioa_cfg->in_reset_reload) {
8689 if (ioa_cfg->sdt_state == GET_DUMP)
8690 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8691 else if (ioa_cfg->sdt_state == READ_DUMP)
8692 ioa_cfg->sdt_state = ABORT_DUMP;
8693 }
1da177e4
LT
8694
8695 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8696 dev_err(&ioa_cfg->pdev->dev,
8697 "IOA taken offline - error recovery failed\n");
8698
8699 ioa_cfg->reset_retries = 0;
56d6aa33 8700 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8701 spin_lock(&ioa_cfg->hrrq[i]._lock);
8702 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8703 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8704 }
8705 wmb();
1da177e4
LT
8706
8707 if (ioa_cfg->in_ioa_bringdown) {
8708 ioa_cfg->reset_cmd = NULL;
8709 ioa_cfg->in_reset_reload = 0;
8710 ipr_fail_all_ops(ioa_cfg);
8711 wake_up_all(&ioa_cfg->reset_wait_q);
8712
bfae7820
BK
8713 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8714 spin_unlock_irq(ioa_cfg->host->host_lock);
8715 scsi_unblock_requests(ioa_cfg->host);
8716 spin_lock_irq(ioa_cfg->host->host_lock);
8717 }
1da177e4
LT
8718 return;
8719 } else {
8720 ioa_cfg->in_ioa_bringdown = 1;
8721 shutdown_type = IPR_SHUTDOWN_NONE;
8722 }
8723 }
8724
8725 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8726 shutdown_type);
8727}
8728
f8a88b19
LV
8729/**
8730 * ipr_reset_freeze - Hold off all I/O activity
8731 * @ipr_cmd: ipr command struct
8732 *
8733 * Description: If the PCI slot is frozen, hold off all I/O
8734 * activity; then, as soon as the slot is available again,
8735 * initiate an adapter reset.
8736 */
8737static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8738{
56d6aa33 8739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8740 int i;
8741
f8a88b19 8742 /* Disallow new interrupts, avoid loop */
56d6aa33 8743 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8744 spin_lock(&ioa_cfg->hrrq[i]._lock);
8745 ioa_cfg->hrrq[i].allow_interrupts = 0;
8746 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8747 }
8748 wmb();
05a6538a 8749 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8750 ipr_cmd->done = ipr_reset_ioa_job;
8751 return IPR_RC_JOB_RETURN;
8752}
8753
6270e593
BK
8754/**
8755 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8756 * @pdev: PCI device struct
8757 *
8758 * Description: This routine is called to tell us that the MMIO
8759 * access to the IOA has been restored
8760 */
8761static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8762{
8763 unsigned long flags = 0;
8764 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8765
8766 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8767 if (!ioa_cfg->probe_done)
8768 pci_save_state(pdev);
8769 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8770 return PCI_ERS_RESULT_NEED_RESET;
8771}
8772
f8a88b19
LV
8773/**
8774 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8775 * @pdev: PCI device struct
8776 *
8777 * Description: This routine is called to tell us that the PCI bus
8778 * is down. Can't do anything here, except put the device driver
8779 * into a holding pattern, waiting for the PCI bus to come back.
8780 */
8781static void ipr_pci_frozen(struct pci_dev *pdev)
8782{
8783 unsigned long flags = 0;
8784 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8785
8786 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8787 if (ioa_cfg->probe_done)
8788 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
8789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8790}
8791
8792/**
8793 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8794 * @pdev: PCI device struct
8795 *
8796 * Description: This routine is called by the pci error recovery
8797 * code after the PCI slot has been reset, just before we
8798 * should resume normal operations.
8799 */
8800static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8801{
8802 unsigned long flags = 0;
8803 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8804
8805 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8806 if (ioa_cfg->probe_done) {
8807 if (ioa_cfg->needs_warm_reset)
8808 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8809 else
8810 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8811 IPR_SHUTDOWN_NONE);
8812 } else
8813 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8815 return PCI_ERS_RESULT_RECOVERED;
8816}
8817
8818/**
8819 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8820 * @pdev: PCI device struct
8821 *
8822 * Description: This routine is called when the PCI bus has
8823 * permanently failed.
8824 */
8825static void ipr_pci_perm_failure(struct pci_dev *pdev)
8826{
8827 unsigned long flags = 0;
8828 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 8829 int i;
f8a88b19
LV
8830
8831 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8832 if (ioa_cfg->probe_done) {
8833 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8834 ioa_cfg->sdt_state = ABORT_DUMP;
8835 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8836 ioa_cfg->in_ioa_bringdown = 1;
8837 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8838 spin_lock(&ioa_cfg->hrrq[i]._lock);
8839 ioa_cfg->hrrq[i].allow_cmds = 0;
8840 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8841 }
8842 wmb();
8843 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8844 } else
8845 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8847}
8848
8849/**
8850 * ipr_pci_error_detected - Called when a PCI error is detected.
8851 * @pdev: PCI device struct
8852 * @state: PCI channel state
8853 *
8854 * Description: Called when a PCI error is detected.
8855 *
8856 * Return value:
8857 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8858 */
8859static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8860 pci_channel_state_t state)
8861{
8862 switch (state) {
8863 case pci_channel_io_frozen:
8864 ipr_pci_frozen(pdev);
6270e593 8865 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
8866 case pci_channel_io_perm_failure:
8867 ipr_pci_perm_failure(pdev);
8868 return PCI_ERS_RESULT_DISCONNECT;
8869 break;
8870 default:
8871 break;
8872 }
8873 return PCI_ERS_RESULT_NEED_RESET;
8874}
8875
1da177e4
LT
8876/**
8877 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8878 * @ioa_cfg: ioa cfg struct
8879 *
8880 * Description: This is the second phase of adapter intialization
8881 * This function takes care of initilizing the adapter to the point
8882 * where it can accept new commands.
8883
8884 * Return value:
b1c11812 8885 * 0 on success / -EIO on failure
1da177e4 8886 **/
6f039790 8887static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8888{
8889 int rc = 0;
8890 unsigned long host_lock_flags = 0;
8891
8892 ENTER;
8893 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8894 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 8895 ioa_cfg->probe_done = 1;
ce155cce
BK
8896 if (ioa_cfg->needs_hard_reset) {
8897 ioa_cfg->needs_hard_reset = 0;
8898 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8899 } else
8900 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8901 IPR_SHUTDOWN_NONE);
1da177e4 8902 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
8903
8904 LEAVE;
8905 return rc;
8906}
8907
8908/**
8909 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8910 * @ioa_cfg: ioa config struct
8911 *
8912 * Return value:
8913 * none
8914 **/
8915static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8916{
8917 int i;
8918
8919 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8920 if (ioa_cfg->ipr_cmnd_list[i])
d73341bf 8921 dma_pool_free(ioa_cfg->ipr_cmd_pool,
1da177e4
LT
8922 ioa_cfg->ipr_cmnd_list[i],
8923 ioa_cfg->ipr_cmnd_list_dma[i]);
8924
8925 ioa_cfg->ipr_cmnd_list[i] = NULL;
8926 }
8927
8928 if (ioa_cfg->ipr_cmd_pool)
d73341bf 8929 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8930
89aad428
BK
8931 kfree(ioa_cfg->ipr_cmnd_list);
8932 kfree(ioa_cfg->ipr_cmnd_list_dma);
8933 ioa_cfg->ipr_cmnd_list = NULL;
8934 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8935 ioa_cfg->ipr_cmd_pool = NULL;
8936}
8937
8938/**
8939 * ipr_free_mem - Frees memory allocated for an adapter
8940 * @ioa_cfg: ioa cfg struct
8941 *
8942 * Return value:
8943 * nothing
8944 **/
8945static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8946{
8947 int i;
8948
8949 kfree(ioa_cfg->res_entries);
d73341bf
AB
8950 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8951 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 8952 ipr_free_cmd_blks(ioa_cfg);
05a6538a 8953
8954 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
8955 dma_free_coherent(&ioa_cfg->pdev->dev,
8956 sizeof(u32) * ioa_cfg->hrrq[i].size,
8957 ioa_cfg->hrrq[i].host_rrq,
8958 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 8959
d73341bf
AB
8960 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8961 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4
LT
8962
8963 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
8964 dma_free_coherent(&ioa_cfg->pdev->dev,
8965 sizeof(struct ipr_hostrcb),
8966 ioa_cfg->hostrcb[i],
8967 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
8968 }
8969
8970 ipr_free_dump(ioa_cfg);
1da177e4
LT
8971 kfree(ioa_cfg->trace);
8972}
8973
8974/**
8975 * ipr_free_all_resources - Free all allocated resources for an adapter.
8976 * @ipr_cmd: ipr command struct
8977 *
8978 * This function frees all allocated resources for the
8979 * specified adapter.
8980 *
8981 * Return value:
8982 * none
8983 **/
8984static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8985{
8986 struct pci_dev *pdev = ioa_cfg->pdev;
8987
8988 ENTER;
05a6538a 8989 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8990 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8991 int i;
8992 for (i = 0; i < ioa_cfg->nvectors; i++)
8993 free_irq(ioa_cfg->vectors_info[i].vec,
8994 &ioa_cfg->hrrq[i]);
8995 } else
8996 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8997
56d6aa33 8998 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 8999 pci_disable_msi(pdev);
56d6aa33 9000 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9001 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 9002 pci_disable_msix(pdev);
56d6aa33 9003 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9004 }
05a6538a 9005
1da177e4
LT
9006 iounmap(ioa_cfg->hdw_dma_regs);
9007 pci_release_regions(pdev);
9008 ipr_free_mem(ioa_cfg);
9009 scsi_host_put(ioa_cfg->host);
9010 pci_disable_device(pdev);
9011 LEAVE;
9012}
9013
9014/**
9015 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9016 * @ioa_cfg: ioa config struct
9017 *
9018 * Return value:
9019 * 0 on success / -ENOMEM on allocation failure
9020 **/
6f039790 9021static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9022{
9023 struct ipr_cmnd *ipr_cmd;
9024 struct ipr_ioarcb *ioarcb;
9025 dma_addr_t dma_addr;
05a6538a 9026 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9027
d73341bf 9028 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9029 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9030
9031 if (!ioa_cfg->ipr_cmd_pool)
9032 return -ENOMEM;
9033
89aad428
BK
9034 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9035 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9036
9037 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9038 ipr_free_cmd_blks(ioa_cfg);
9039 return -ENOMEM;
9040 }
9041
05a6538a 9042 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9043 if (ioa_cfg->hrrq_num > 1) {
9044 if (i == 0) {
9045 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9046 ioa_cfg->hrrq[i].min_cmd_id = 0;
9047 ioa_cfg->hrrq[i].max_cmd_id =
9048 (entries_each_hrrq - 1);
9049 } else {
9050 entries_each_hrrq =
9051 IPR_NUM_BASE_CMD_BLKS/
9052 (ioa_cfg->hrrq_num - 1);
9053 ioa_cfg->hrrq[i].min_cmd_id =
9054 IPR_NUM_INTERNAL_CMD_BLKS +
9055 (i - 1) * entries_each_hrrq;
9056 ioa_cfg->hrrq[i].max_cmd_id =
9057 (IPR_NUM_INTERNAL_CMD_BLKS +
9058 i * entries_each_hrrq - 1);
9059 }
9060 } else {
9061 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9062 ioa_cfg->hrrq[i].min_cmd_id = 0;
9063 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9064 }
9065 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9066 }
9067
9068 BUG_ON(ioa_cfg->hrrq_num == 0);
9069
9070 i = IPR_NUM_CMD_BLKS -
9071 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9072 if (i > 0) {
9073 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9074 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9075 }
9076
1da177e4 9077 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
d73341bf 9078 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
9079
9080 if (!ipr_cmd) {
9081 ipr_free_cmd_blks(ioa_cfg);
9082 return -ENOMEM;
9083 }
9084
9085 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9086 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9087 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9088
9089 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9090 ipr_cmd->dma_addr = dma_addr;
9091 if (ioa_cfg->sis64)
9092 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9093 else
9094 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9095
1da177e4 9096 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9097 if (ioa_cfg->sis64) {
9098 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9099 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9100 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9101 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9102 } else {
9103 ioarcb->write_ioadl_addr =
9104 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9105 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9106 ioarcb->ioasa_host_pci_addr =
96d21f00 9107 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9108 }
1da177e4
LT
9109 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9110 ipr_cmd->cmd_index = i;
9111 ipr_cmd->ioa_cfg = ioa_cfg;
9112 ipr_cmd->sense_buffer_dma = dma_addr +
9113 offsetof(struct ipr_cmnd, sense_buffer);
9114
05a6538a 9115 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9116 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9117 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9118 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9119 hrrq_id++;
1da177e4
LT
9120 }
9121
9122 return 0;
9123}
9124
9125/**
9126 * ipr_alloc_mem - Allocate memory for an adapter
9127 * @ioa_cfg: ioa config struct
9128 *
9129 * Return value:
9130 * 0 on success / non-zero for error
9131 **/
6f039790 9132static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9133{
9134 struct pci_dev *pdev = ioa_cfg->pdev;
9135 int i, rc = -ENOMEM;
9136
9137 ENTER;
0bc42e35 9138 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 9139 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9140
9141 if (!ioa_cfg->res_entries)
9142 goto out;
9143
3e7ebdfa 9144 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9145 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9146 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9147 }
1da177e4 9148
d73341bf
AB
9149 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9150 sizeof(struct ipr_misc_cbs),
9151 &ioa_cfg->vpd_cbs_dma,
9152 GFP_KERNEL);
1da177e4
LT
9153
9154 if (!ioa_cfg->vpd_cbs)
9155 goto out_free_res_entries;
9156
9157 if (ipr_alloc_cmd_blks(ioa_cfg))
9158 goto out_free_vpd_cbs;
9159
05a6538a 9160 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9161 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9162 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9163 &ioa_cfg->hrrq[i].host_rrq_dma,
9164 GFP_KERNEL);
05a6538a 9165
9166 if (!ioa_cfg->hrrq[i].host_rrq) {
9167 while (--i > 0)
d73341bf 9168 dma_free_coherent(&pdev->dev,
05a6538a 9169 sizeof(u32) * ioa_cfg->hrrq[i].size,
9170 ioa_cfg->hrrq[i].host_rrq,
9171 ioa_cfg->hrrq[i].host_rrq_dma);
9172 goto out_ipr_free_cmd_blocks;
9173 }
9174 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9175 }
1da177e4 9176
d73341bf
AB
9177 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9178 ioa_cfg->cfg_table_size,
9179 &ioa_cfg->cfg_table_dma,
9180 GFP_KERNEL);
1da177e4 9181
3e7ebdfa 9182 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9183 goto out_free_host_rrq;
9184
9185 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9186 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9187 sizeof(struct ipr_hostrcb),
9188 &ioa_cfg->hostrcb_dma[i],
9189 GFP_KERNEL);
1da177e4
LT
9190
9191 if (!ioa_cfg->hostrcb[i])
9192 goto out_free_hostrcb_dma;
9193
9194 ioa_cfg->hostrcb[i]->hostrcb_dma =
9195 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9196 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9197 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9198 }
9199
0bc42e35 9200 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9201 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9202
9203 if (!ioa_cfg->trace)
9204 goto out_free_hostrcb_dma;
9205
1da177e4
LT
9206 rc = 0;
9207out:
9208 LEAVE;
9209 return rc;
9210
9211out_free_hostrcb_dma:
9212 while (i-- > 0) {
d73341bf
AB
9213 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9214 ioa_cfg->hostrcb[i],
9215 ioa_cfg->hostrcb_dma[i]);
1da177e4 9216 }
d73341bf
AB
9217 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9218 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9219out_free_host_rrq:
05a6538a 9220 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9221 dma_free_coherent(&pdev->dev,
9222 sizeof(u32) * ioa_cfg->hrrq[i].size,
9223 ioa_cfg->hrrq[i].host_rrq,
9224 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9225 }
1da177e4
LT
9226out_ipr_free_cmd_blocks:
9227 ipr_free_cmd_blks(ioa_cfg);
9228out_free_vpd_cbs:
d73341bf
AB
9229 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9230 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9231out_free_res_entries:
9232 kfree(ioa_cfg->res_entries);
9233 goto out;
9234}
9235
9236/**
9237 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9238 * @ioa_cfg: ioa config struct
9239 *
9240 * Return value:
9241 * none
9242 **/
6f039790 9243static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9244{
9245 int i;
9246
9247 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9248 ioa_cfg->bus_attr[i].bus = i;
9249 ioa_cfg->bus_attr[i].qas_enabled = 0;
9250 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9251 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9252 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9253 else
9254 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9255 }
9256}
9257
6270e593
BK
9258/**
9259 * ipr_init_regs - Initialize IOA registers
9260 * @ioa_cfg: ioa config struct
9261 *
9262 * Return value:
9263 * none
9264 **/
9265static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9266{
9267 const struct ipr_interrupt_offsets *p;
9268 struct ipr_interrupts *t;
9269 void __iomem *base;
9270
9271 p = &ioa_cfg->chip_cfg->regs;
9272 t = &ioa_cfg->regs;
9273 base = ioa_cfg->hdw_dma_regs;
9274
9275 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9276 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9277 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9278 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9279 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9280 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9281 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9282 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9283 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9284 t->ioarrin_reg = base + p->ioarrin_reg;
9285 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9286 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9287 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9288 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9289 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9290 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9291
9292 if (ioa_cfg->sis64) {
9293 t->init_feedback_reg = base + p->init_feedback_reg;
9294 t->dump_addr_reg = base + p->dump_addr_reg;
9295 t->dump_data_reg = base + p->dump_data_reg;
9296 t->endian_swap_reg = base + p->endian_swap_reg;
9297 }
9298}
9299
1da177e4
LT
9300/**
9301 * ipr_init_ioa_cfg - Initialize IOA config struct
9302 * @ioa_cfg: ioa config struct
9303 * @host: scsi host struct
9304 * @pdev: PCI dev struct
9305 *
9306 * Return value:
9307 * none
9308 **/
6f039790
GKH
9309static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9310 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9311{
6270e593 9312 int i;
1da177e4
LT
9313
9314 ioa_cfg->host = host;
9315 ioa_cfg->pdev = pdev;
9316 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9317 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9318 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9319 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9320 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9321 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9322 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9323 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9324
1da177e4
LT
9325 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9326 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9327 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9328 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9329 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9330 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9331 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9332 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9333 ioa_cfg->sdt_state = INACTIVE;
9334
9335 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9336 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9337
3e7ebdfa
WB
9338 if (ioa_cfg->sis64) {
9339 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9340 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9341 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9342 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9343 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9344 + ((sizeof(struct ipr_config_table_entry64)
9345 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9346 } else {
9347 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9348 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9349 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9350 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9351 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9352 + ((sizeof(struct ipr_config_table_entry)
9353 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9354 }
6270e593 9355
f688f96d 9356 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9357 host->unique_id = host->host_no;
9358 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9359 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9360 pci_set_drvdata(pdev, ioa_cfg);
9361
6270e593
BK
9362 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9363 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9364 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9365 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9366 if (i == 0)
9367 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9368 else
9369 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9370 }
1da177e4
LT
9371}
9372
9373/**
1be7bd82 9374 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9375 * @dev_id: PCI device id struct
9376 *
9377 * Return value:
1be7bd82 9378 * ptr to chip information on success / NULL on failure
1da177e4 9379 **/
6f039790 9380static const struct ipr_chip_t *
1be7bd82 9381ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9382{
9383 int i;
9384
1da177e4
LT
9385 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9386 if (ipr_chip[i].vendor == dev_id->vendor &&
9387 ipr_chip[i].device == dev_id->device)
1be7bd82 9388 return &ipr_chip[i];
1da177e4
LT
9389 return NULL;
9390}
9391
6270e593
BK
9392/**
9393 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9394 * during probe time
9395 * @ioa_cfg: ioa config struct
9396 *
9397 * Return value:
9398 * None
9399 **/
9400static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9401{
9402 struct pci_dev *pdev = ioa_cfg->pdev;
9403
9404 if (pci_channel_offline(pdev)) {
9405 wait_event_timeout(ioa_cfg->eeh_wait_q,
9406 !pci_channel_offline(pdev),
9407 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9408 pci_restore_state(pdev);
9409 }
9410}
9411
05a6538a 9412static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9413{
9414 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
60e76b77 9415 int i, vectors;
05a6538a 9416
9417 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9418 entries[i].entry = i;
9419
60e76b77
AG
9420 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9421 entries, 1, ipr_number_of_msix);
9422 if (vectors < 0) {
6270e593 9423 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9424 return vectors;
05a6538a 9425 }
9426
60e76b77
AG
9427 for (i = 0; i < vectors; i++)
9428 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9429 ioa_cfg->nvectors = vectors;
05a6538a 9430
60e76b77 9431 return 0;
05a6538a 9432}
9433
9434static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9435{
60e76b77 9436 int i, vectors;
05a6538a 9437
60e76b77
AG
9438 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9439 if (vectors < 0) {
6270e593 9440 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9441 return vectors;
05a6538a 9442 }
9443
60e76b77
AG
9444 for (i = 0; i < vectors; i++)
9445 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9446 ioa_cfg->nvectors = vectors;
05a6538a 9447
60e76b77 9448 return 0;
05a6538a 9449}
9450
9451static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9452{
9453 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9454
9455 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9456 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9457 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9458 ioa_cfg->vectors_info[vec_idx].
9459 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9460 }
9461}
9462
9463static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9464{
9465 int i, rc;
9466
9467 for (i = 1; i < ioa_cfg->nvectors; i++) {
9468 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9469 ipr_isr_mhrrq,
9470 0,
9471 ioa_cfg->vectors_info[i].desc,
9472 &ioa_cfg->hrrq[i]);
9473 if (rc) {
9474 while (--i >= 0)
9475 free_irq(ioa_cfg->vectors_info[i].vec,
9476 &ioa_cfg->hrrq[i]);
9477 return rc;
9478 }
9479 }
9480 return 0;
9481}
9482
95fecd90
WB
9483/**
9484 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9485 * @pdev: PCI device struct
9486 *
9487 * Description: Simply set the msi_received flag to 1 indicating that
9488 * Message Signaled Interrupts are supported.
9489 *
9490 * Return value:
9491 * 0 on success / non-zero on failure
9492 **/
6f039790 9493static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9494{
9495 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9496 unsigned long lock_flags = 0;
9497 irqreturn_t rc = IRQ_HANDLED;
9498
05a6538a 9499 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9500 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9501
9502 ioa_cfg->msi_received = 1;
9503 wake_up(&ioa_cfg->msi_wait_q);
9504
9505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9506 return rc;
9507}
9508
9509/**
9510 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9511 * @pdev: PCI device struct
9512 *
60e76b77 9513 * Description: The return value from pci_enable_msi_range() can not always be
95fecd90
WB
9514 * trusted. This routine sets up and initiates a test interrupt to determine
9515 * if the interrupt is received via the ipr_test_intr() service routine.
9516 * If the tests fails, the driver will fall back to LSI.
9517 *
9518 * Return value:
9519 * 0 on success / non-zero on failure
9520 **/
6f039790 9521static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9522{
9523 int rc;
9524 volatile u32 int_reg;
9525 unsigned long lock_flags = 0;
9526
9527 ENTER;
9528
9529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9530 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9531 ioa_cfg->msi_received = 0;
9532 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9533 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9534 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9536
f19799f4 9537 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9538 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9539 else
9540 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90
WB
9541 if (rc) {
9542 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9543 return rc;
9544 } else if (ipr_debug)
9545 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9546
214777ba 9547 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9548 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9549 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9551 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9552
95fecd90
WB
9553 if (!ioa_cfg->msi_received) {
9554 /* MSI test failed */
9555 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9556 rc = -EOPNOTSUPP;
9557 } else if (ipr_debug)
9558 dev_info(&pdev->dev, "MSI test succeeded.\n");
9559
9560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9561
f19799f4 9562 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9563 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9564 else
9565 free_irq(pdev->irq, ioa_cfg);
95fecd90
WB
9566
9567 LEAVE;
9568
9569 return rc;
9570}
9571
05a6538a 9572 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9573 * @pdev: PCI device struct
9574 * @dev_id: PCI device id struct
9575 *
9576 * Return value:
9577 * 0 on success / non-zero on failure
9578 **/
6f039790
GKH
9579static int ipr_probe_ioa(struct pci_dev *pdev,
9580 const struct pci_device_id *dev_id)
1da177e4
LT
9581{
9582 struct ipr_ioa_cfg *ioa_cfg;
9583 struct Scsi_Host *host;
9584 unsigned long ipr_regs_pci;
9585 void __iomem *ipr_regs;
a2a65a3e 9586 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9587 volatile u32 mask, uproc, interrupts;
feccada9 9588 unsigned long lock_flags, driver_lock_flags;
1da177e4
LT
9589
9590 ENTER;
9591
1da177e4 9592 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
9593 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9594
9595 if (!host) {
9596 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9597 rc = -ENOMEM;
6270e593 9598 goto out;
1da177e4
LT
9599 }
9600
9601 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9602 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9603 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9604
1be7bd82 9605 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9606
1be7bd82 9607 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9608 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9609 dev_id->vendor, dev_id->device);
9610 goto out_scsi_host_put;
9611 }
9612
a32c055f
WB
9613 /* set SIS 32 or SIS 64 */
9614 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9615 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9616 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9617 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9618
5469cb5b
BK
9619 if (ipr_transop_timeout)
9620 ioa_cfg->transop_timeout = ipr_transop_timeout;
9621 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9622 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9623 else
9624 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9625
44c10138 9626 ioa_cfg->revid = pdev->revision;
463fc696 9627
6270e593
BK
9628 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9629
1da177e4
LT
9630 ipr_regs_pci = pci_resource_start(pdev, 0);
9631
9632 rc = pci_request_regions(pdev, IPR_NAME);
9633 if (rc < 0) {
9634 dev_err(&pdev->dev,
9635 "Couldn't register memory range of registers\n");
9636 goto out_scsi_host_put;
9637 }
9638
6270e593
BK
9639 rc = pci_enable_device(pdev);
9640
9641 if (rc || pci_channel_offline(pdev)) {
9642 if (pci_channel_offline(pdev)) {
9643 ipr_wait_for_pci_err_recovery(ioa_cfg);
9644 rc = pci_enable_device(pdev);
9645 }
9646
9647 if (rc) {
9648 dev_err(&pdev->dev, "Cannot enable adapter\n");
9649 ipr_wait_for_pci_err_recovery(ioa_cfg);
9650 goto out_release_regions;
9651 }
9652 }
9653
25729a7f 9654 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9655
9656 if (!ipr_regs) {
9657 dev_err(&pdev->dev,
9658 "Couldn't map memory range of registers\n");
9659 rc = -ENOMEM;
6270e593 9660 goto out_disable;
1da177e4
LT
9661 }
9662
9663 ioa_cfg->hdw_dma_regs = ipr_regs;
9664 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9665 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9666
6270e593 9667 ipr_init_regs(ioa_cfg);
1da177e4 9668
a32c055f 9669 if (ioa_cfg->sis64) {
869404cb 9670 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 9671 if (rc < 0) {
869404cb
AB
9672 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9673 rc = dma_set_mask_and_coherent(&pdev->dev,
9674 DMA_BIT_MASK(32));
a32c055f 9675 }
a32c055f 9676 } else
869404cb 9677 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 9678
1da177e4 9679 if (rc < 0) {
869404cb 9680 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
9681 goto cleanup_nomem;
9682 }
9683
9684 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9685 ioa_cfg->chip_cfg->cache_line_size);
9686
9687 if (rc != PCIBIOS_SUCCESSFUL) {
9688 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 9689 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
9690 rc = -EIO;
9691 goto cleanup_nomem;
9692 }
9693
6270e593
BK
9694 /* Issue MMIO read to ensure card is not in EEH */
9695 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9696 ipr_wait_for_pci_err_recovery(ioa_cfg);
9697
05a6538a 9698 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9699 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9700 IPR_MAX_MSIX_VECTORS);
9701 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9702 }
9703
9704 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9705 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9706 ioa_cfg->intr_flag = IPR_USE_MSIX;
9707 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9708 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9709 ioa_cfg->intr_flag = IPR_USE_MSI;
9710 else {
9711 ioa_cfg->intr_flag = IPR_USE_LSI;
9712 ioa_cfg->nvectors = 1;
9713 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9714 }
9715
6270e593
BK
9716 pci_set_master(pdev);
9717
9718 if (pci_channel_offline(pdev)) {
9719 ipr_wait_for_pci_err_recovery(ioa_cfg);
9720 pci_set_master(pdev);
9721 if (pci_channel_offline(pdev)) {
9722 rc = -EIO;
9723 goto out_msi_disable;
9724 }
9725 }
9726
05a6538a 9727 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9728 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9729 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9730 if (rc == -EOPNOTSUPP) {
6270e593 9731 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9732 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9733 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9734 pci_disable_msi(pdev);
9735 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9736 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9737 pci_disable_msix(pdev);
9738 }
9739
9740 ioa_cfg->intr_flag = IPR_USE_LSI;
9741 ioa_cfg->nvectors = 1;
9742 }
95fecd90
WB
9743 else if (rc)
9744 goto out_msi_disable;
05a6538a 9745 else {
9746 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9747 dev_info(&pdev->dev,
9748 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9749 ioa_cfg->nvectors, pdev->irq);
9750 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9751 dev_info(&pdev->dev,
9752 "Request for %d MSIXs succeeded.",
9753 ioa_cfg->nvectors);
9754 }
9755 }
9756
9757 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9758 (unsigned int)num_online_cpus(),
9759 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9760
1da177e4 9761 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9762 goto out_msi_disable;
1da177e4
LT
9763
9764 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9765 goto out_msi_disable;
1da177e4
LT
9766
9767 rc = ipr_alloc_mem(ioa_cfg);
9768 if (rc < 0) {
9769 dev_err(&pdev->dev,
9770 "Couldn't allocate enough memory for device driver!\n");
f170c684 9771 goto out_msi_disable;
1da177e4
LT
9772 }
9773
6270e593
BK
9774 /* Save away PCI config space for use following IOA reset */
9775 rc = pci_save_state(pdev);
9776
9777 if (rc != PCIBIOS_SUCCESSFUL) {
9778 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9779 rc = -EIO;
9780 goto cleanup_nolog;
9781 }
9782
ce155cce
BK
9783 /*
9784 * If HRRQ updated interrupt is not masked, or reset alert is set,
9785 * the card is in an unknown state and needs a hard reset
9786 */
214777ba
WB
9787 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9788 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9789 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9790 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9791 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9792 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9793 ioa_cfg->needs_hard_reset = 1;
9794 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9795 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9796
56d6aa33 9797 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9798 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 9799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9800
05a6538a 9801 if (ioa_cfg->intr_flag == IPR_USE_MSI
9802 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9803 name_msi_vectors(ioa_cfg);
9804 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9805 0,
9806 ioa_cfg->vectors_info[0].desc,
9807 &ioa_cfg->hrrq[0]);
9808 if (!rc)
9809 rc = ipr_request_other_msi_irqs(ioa_cfg);
9810 } else {
9811 rc = request_irq(pdev->irq, ipr_isr,
9812 IRQF_SHARED,
9813 IPR_NAME, &ioa_cfg->hrrq[0]);
9814 }
1da177e4
LT
9815 if (rc) {
9816 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9817 pdev->irq, rc);
9818 goto cleanup_nolog;
9819 }
9820
463fc696
BK
9821 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9822 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9823 ioa_cfg->needs_warm_reset = 1;
9824 ioa_cfg->reset = ipr_reset_slot_reset;
9825 } else
9826 ioa_cfg->reset = ipr_reset_start_bist;
9827
feccada9 9828 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 9829 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 9830 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
9831
9832 LEAVE;
9833out:
9834 return rc;
9835
9836cleanup_nolog:
9837 ipr_free_mem(ioa_cfg);
95fecd90 9838out_msi_disable:
6270e593 9839 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9840 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9841 pci_disable_msi(pdev);
9842 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9843 pci_disable_msix(pdev);
f170c684
JL
9844cleanup_nomem:
9845 iounmap(ipr_regs);
6270e593
BK
9846out_disable:
9847 pci_disable_device(pdev);
1da177e4
LT
9848out_release_regions:
9849 pci_release_regions(pdev);
9850out_scsi_host_put:
9851 scsi_host_put(host);
1da177e4
LT
9852 goto out;
9853}
9854
1da177e4
LT
9855/**
9856 * ipr_initiate_ioa_bringdown - Bring down an adapter
9857 * @ioa_cfg: ioa config struct
9858 * @shutdown_type: shutdown type
9859 *
9860 * Description: This function will initiate bringing down the adapter.
9861 * This consists of issuing an IOA shutdown to the adapter
9862 * to flush the cache, and running BIST.
9863 * If the caller needs to wait on the completion of the reset,
9864 * the caller must sleep on the reset_wait_q.
9865 *
9866 * Return value:
9867 * none
9868 **/
9869static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9870 enum ipr_shutdown_type shutdown_type)
9871{
9872 ENTER;
9873 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9874 ioa_cfg->sdt_state = ABORT_DUMP;
9875 ioa_cfg->reset_retries = 0;
9876 ioa_cfg->in_ioa_bringdown = 1;
9877 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9878 LEAVE;
9879}
9880
9881/**
9882 * __ipr_remove - Remove a single adapter
9883 * @pdev: pci device struct
9884 *
9885 * Adapter hot plug remove entry point.
9886 *
9887 * Return value:
9888 * none
9889 **/
9890static void __ipr_remove(struct pci_dev *pdev)
9891{
9892 unsigned long host_lock_flags = 0;
9893 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 9894 int i;
feccada9 9895 unsigned long driver_lock_flags;
1da177e4
LT
9896 ENTER;
9897
9898 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9899 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9901 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9902 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9903 }
9904
bfae7820
BK
9905 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9906 spin_lock(&ioa_cfg->hrrq[i]._lock);
9907 ioa_cfg->hrrq[i].removing_ioa = 1;
9908 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9909 }
9910 wmb();
1da177e4
LT
9911 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9912
9913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9914 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9915 flush_work(&ioa_cfg->work_q);
9077a944 9916 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
9917 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9918
feccada9 9919 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 9920 list_del(&ioa_cfg->queue);
feccada9 9921 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
9922
9923 if (ioa_cfg->sdt_state == ABORT_DUMP)
9924 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9926
9927 ipr_free_all_resources(ioa_cfg);
9928
9929 LEAVE;
9930}
9931
9932/**
9933 * ipr_remove - IOA hot plug remove entry point
9934 * @pdev: pci device struct
9935 *
9936 * Adapter hot plug remove entry point.
9937 *
9938 * Return value:
9939 * none
9940 **/
6f039790 9941static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9942{
9943 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9944
9945 ENTER;
9946
ee959b00 9947 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9948 &ipr_trace_attr);
ee959b00 9949 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9950 &ipr_dump_attr);
9951 scsi_remove_host(ioa_cfg->host);
9952
9953 __ipr_remove(pdev);
9954
9955 LEAVE;
9956}
9957
9958/**
9959 * ipr_probe - Adapter hot plug add entry point
9960 *
9961 * Return value:
9962 * 0 on success / non-zero on failure
9963 **/
6f039790 9964static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9965{
9966 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 9967 int rc, i;
1da177e4
LT
9968
9969 rc = ipr_probe_ioa(pdev, dev_id);
9970
9971 if (rc)
9972 return rc;
9973
9974 ioa_cfg = pci_get_drvdata(pdev);
9975 rc = ipr_probe_ioa_part2(ioa_cfg);
9976
9977 if (rc) {
9978 __ipr_remove(pdev);
9979 return rc;
9980 }
9981
9982 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9983
9984 if (rc) {
9985 __ipr_remove(pdev);
9986 return rc;
9987 }
9988
ee959b00 9989 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9990 &ipr_trace_attr);
9991
9992 if (rc) {
9993 scsi_remove_host(ioa_cfg->host);
9994 __ipr_remove(pdev);
9995 return rc;
9996 }
9997
ee959b00 9998 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9999 &ipr_dump_attr);
10000
10001 if (rc) {
ee959b00 10002 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10003 &ipr_trace_attr);
10004 scsi_remove_host(ioa_cfg->host);
10005 __ipr_remove(pdev);
10006 return rc;
10007 }
10008
10009 scsi_scan_host(ioa_cfg->host);
b53d124a 10010 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10011
89f8b33c 10012 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10013 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10014 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10015 ioa_cfg->iopoll_weight, ipr_iopoll);
10016 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10017 }
10018 }
10019
1da177e4
LT
10020 schedule_work(&ioa_cfg->work_q);
10021 return 0;
10022}
10023
10024/**
10025 * ipr_shutdown - Shutdown handler.
d18c3db5 10026 * @pdev: pci device struct
1da177e4
LT
10027 *
10028 * This function is invoked upon system shutdown/reboot. It will issue
10029 * an adapter shutdown to the adapter to flush the write cache.
10030 *
10031 * Return value:
10032 * none
10033 **/
d18c3db5 10034static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10035{
d18c3db5 10036 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10037 unsigned long lock_flags = 0;
b53d124a 10038 int i;
1da177e4
LT
10039
10040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10041 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10042 ioa_cfg->iopoll_weight = 0;
10043 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10044 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10045 }
10046
203fa3fe 10047 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10049 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10050 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10051 }
10052
1da177e4
LT
10053 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10055 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10056}
10057
6f039790 10058static struct pci_device_id ipr_pci_table[] = {
1da177e4 10059 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10060 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10061 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10062 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10063 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10064 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10065 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10066 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10067 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10068 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10069 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10070 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10071 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10072 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10073 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10074 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10075 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10076 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10077 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10078 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10079 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10080 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10081 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10082 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10083 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10084 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10085 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10086 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10087 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10088 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10089 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10090 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10091 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10092 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10093 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10094 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10095 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10096 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10097 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10098 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10099 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10100 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10101 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10102 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10103 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10104 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10105 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10106 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10107 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10108 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10109 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10110 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10111 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10112 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10113 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10114 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10115 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10116 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10117 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10118 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10119 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10120 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10121 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10122 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10123 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10124 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10125 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10126 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10127 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10128 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10129 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10130 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10131 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10132 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10133 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10135 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10137 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10139 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10140 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10141 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10142 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10143 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10144 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10145 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10146 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10147 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10148 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10149 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10151 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10152 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10153 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10154 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10155 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10156 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10157 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10159 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10160 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10161 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10162 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10163 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
1da177e4
LT
10164 { }
10165};
10166MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10167
a55b2d21 10168static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10169 .error_detected = ipr_pci_error_detected,
6270e593 10170 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10171 .slot_reset = ipr_pci_slot_reset,
10172};
10173
1da177e4
LT
10174static struct pci_driver ipr_driver = {
10175 .name = IPR_NAME,
10176 .id_table = ipr_pci_table,
10177 .probe = ipr_probe,
6f039790 10178 .remove = ipr_remove,
d18c3db5 10179 .shutdown = ipr_shutdown,
f8a88b19 10180 .err_handler = &ipr_err_handler,
1da177e4
LT
10181};
10182
f72919ec
WB
10183/**
10184 * ipr_halt_done - Shutdown prepare completion
10185 *
10186 * Return value:
10187 * none
10188 **/
10189static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10190{
05a6538a 10191 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10192}
10193
10194/**
10195 * ipr_halt - Issue shutdown prepare to all adapters
10196 *
10197 * Return value:
10198 * NOTIFY_OK on success / NOTIFY_DONE on failure
10199 **/
10200static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10201{
10202 struct ipr_cmnd *ipr_cmd;
10203 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10204 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10205
10206 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10207 return NOTIFY_DONE;
10208
feccada9 10209 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10210
10211 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10212 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
56d6aa33 10213 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
f72919ec
WB
10214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10215 continue;
10216 }
10217
10218 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10219 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10220 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10221 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10222 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10223
10224 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10226 }
feccada9 10227 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10228
10229 return NOTIFY_OK;
10230}
10231
10232static struct notifier_block ipr_notifier = {
10233 ipr_halt, NULL, 0
10234};
10235
1da177e4
LT
10236/**
10237 * ipr_init - Module entry point
10238 *
10239 * Return value:
10240 * 0 on success / negative value on failure
10241 **/
10242static int __init ipr_init(void)
10243{
10244 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10245 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10246
f72919ec 10247 register_reboot_notifier(&ipr_notifier);
dcbccbde 10248 return pci_register_driver(&ipr_driver);
1da177e4
LT
10249}
10250
10251/**
10252 * ipr_exit - Module unload
10253 *
10254 * Module unload entry point.
10255 *
10256 * Return value:
10257 * none
10258 **/
10259static void __exit ipr_exit(void)
10260{
f72919ec 10261 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10262 pci_unregister_driver(&ipr_driver);
10263}
10264
10265module_init(ipr_init);
10266module_exit(ipr_exit);