]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
scsi: ->queue_rq can't sleep
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
05a6538a 101static unsigned int ipr_number_of_msix = 2;
1da177e4
LT
102static DEFINE_SPINLOCK(ipr_driver_lock);
103
104/* This table describes the differences between DMA controller chips */
105static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 107 .mailbox = 0x0042C,
89aad428 108 .max_cmds = 100,
1da177e4 109 .cache_line_size = 0x20,
7dd21308 110 .clear_isr = 1,
b53d124a 111 .iopoll_weight = 0,
1da177e4
LT
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
214777ba 115 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 116 .sense_interrupt_mask_reg = 0x0022C,
214777ba 117 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 118 .clr_interrupt_reg = 0x00228,
214777ba 119 .clr_interrupt_reg32 = 0x00228,
1da177e4 120 .sense_interrupt_reg = 0x00224,
214777ba 121 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
214777ba 124 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 125 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
89aad428 133 .max_cmds = 100,
1da177e4 134 .cache_line_size = 0x20,
7dd21308 135 .clear_isr = 1,
b53d124a 136 .iopoll_weight = 0,
1da177e4
LT
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
214777ba 140 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 141 .sense_interrupt_mask_reg = 0x00288,
214777ba 142 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 143 .clr_interrupt_reg = 0x00284,
214777ba 144 .clr_interrupt_reg32 = 0x00284,
1da177e4 145 .sense_interrupt_reg = 0x00280,
214777ba 146 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
214777ba 149 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 150 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
154 }
155 },
a74c1639 156 { /* CRoC */
110def85 157 .mailbox = 0x00044,
89aad428 158 .max_cmds = 1000,
a74c1639 159 .cache_line_size = 0x20,
7dd21308 160 .clear_isr = 0,
b53d124a 161 .iopoll_weight = 64,
a74c1639
WB
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
214777ba 165 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 166 .sense_interrupt_mask_reg = 0x00010,
214777ba 167 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 168 .clr_interrupt_reg = 0x00008,
214777ba 169 .clr_interrupt_reg32 = 0x0000C,
a74c1639 170 .sense_interrupt_reg = 0x00000,
214777ba 171 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
214777ba 174 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 175 .set_uproc_interrupt_reg = 0x00020,
214777ba 176 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 177 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
dcbad00e 180 .dump_addr_reg = 0x00064,
8701f185
WB
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
a74c1639
WB
183 }
184 },
1da177e4
LT
185};
186
187static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
197};
198
203fa3fe 199static int ipr_max_bus_speeds[] = {
1da177e4
LT
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201};
202
203MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205module_param_named(max_speed, ipr_max_speed, uint, 0);
206MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207module_param_named(log_level, ipr_log_level, uint, 0);
208MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209module_param_named(testmode, ipr_testmode, int, 0);
210MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 211module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
212MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 215module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 216MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
217module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
219module_param_named(max_devs, ipr_max_devs, int, 0);
220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
6634ff7c 223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
1da177e4
LT
224MODULE_LICENSE("GPL");
225MODULE_VERSION(IPR_DRIVER_VERSION);
226
1da177e4
LT
227/* A constant array of IOASCs/URCs/Error Messages */
228static const
229struct ipr_error_table_t ipr_error_table[] = {
933916f3 230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
933916f3 238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 239 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 241 "4101: Soft device bus fabric error"},
5aa3a333
WB
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF9: Device sector reassign successful"},
933916f3 258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "7001: IOA sector reassignment successful"},
933916f3 262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFF6: Device hardware error recovered by the IOA"},
933916f3 270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FFF6: Device hardware error recovered by the device"},
933916f3 272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFFA: Undefined device response recovered by the IOA"},
933916f3 276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "FFF6: Device bus error, message or command phase"},
933916f3 278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 279 "FFFE: Task Management Function failed"},
933916f3 280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "FFF6: Failure prediction threshold exceeded"},
933916f3 282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 283 "8009: Impending cache battery pack failure"},
ed7bd661 284 {0x02040100, 0, 0,
285 "Logical Unit in process of becoming ready"},
286 {0x02040200, 0, 0,
287 "Initializing command required"},
1da177e4
LT
288 {0x02040400, 0, 0,
289 "34FF: Disk device format in progress"},
ed7bd661 290 {0x02040C00, 0, 0,
291 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
1da177e4
LT
294 {0x023F0000, 0, 0,
295 "Synchronization required"},
ed7bd661 296 {0x02408500, 0, 0,
297 "IOA microcode download required"},
298 {0x02408600, 0, 0,
299 "Device bus connection is prohibited by host"},
1da177e4
LT
300 {0x024E0000, 0, 0,
301 "No ready, IOA shutdown"},
302 {0x025A0000, 0, 0,
303 "Not ready, IOA has been shutdown"},
933916f3 304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
305 "3020: Storage subsystem configuration error"},
306 {0x03110B00, 0, 0,
307 "FFF5: Medium error, data unreadable, recommend reassign"},
308 {0x03110C00, 0, 0,
309 "7000: Medium error, data unreadable, do not reassign"},
933916f3 310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 311 "FFF3: Disk media format bad"},
933916f3 312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 313 "3002: Addressed device failed to respond to selection"},
933916f3 314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "3100: Device bus error"},
933916f3 316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
317 "3109: IOA timed out a device command"},
318 {0x04088000, 0, 0,
319 "3120: SCSI bus is not operational"},
933916f3 320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 321 "4100: Hard device bus fabric error"},
5aa3a333
WB
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
933916f3 336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "9000: IOA reserved area data check"},
933916f3 338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "9001: IOA reserved area invalid data pattern"},
933916f3 340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "9002: IOA reserved area LRC error"},
5aa3a333
WB
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
933916f3 344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "102E: Out of alternate sectors for disk storage"},
933916f3 346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 347 "FFF4: Data transfer underlength error"},
933916f3 348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "FFF4: Data transfer overlength error"},
933916f3 350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "3400: Logical unit failure"},
933916f3 352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "FFF4: Device microcode is corrupt"},
933916f3 354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
355 "8150: PCI bus error"},
356 {0x04430000, 1, 0,
357 "Unsupported device bus message received"},
933916f3 358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "FFF4: Disk device problem"},
933916f3 360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 361 "8150: Permanent IOA failure"},
933916f3 362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "3010: Disk device returned wrong response to IOA"},
933916f3 364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
365 "8151: IOA microcode error"},
366 {0x04448500, 0, 0,
367 "Device bus status error"},
933916f3 368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 369 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
370 {0x04448700, 0, 0,
371 "ATA device status error"},
1da177e4
LT
372 {0x04490000, 0, 0,
373 "Message reject received from the device"},
933916f3 374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 375 "8008: A permanent cache battery pack failure occurred"},
933916f3 376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 377 "9090: Disk unit has been modified after the last known status"},
933916f3 378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 379 "9081: IOA detected device error"},
933916f3 380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 381 "9082: IOA detected device error"},
933916f3 382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 383 "3110: Device bus error, message or command phase"},
933916f3 384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 385 "3110: SAS Command / Task Management Function failed"},
933916f3 386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 387 "9091: Incorrect hardware configuration change has been detected"},
933916f3 388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 389 "9073: Invalid multi-adapter configuration"},
933916f3 390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 391 "4010: Incorrect connection between cascaded expanders"},
933916f3 392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 393 "4020: Connections exceed IOA design limits"},
933916f3 394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 395 "4030: Incorrect multipath connection"},
933916f3 396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 397 "4110: Unsupported enclosure function"},
ed7bd661 398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
933916f3 400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
401 "FFF4: Command to logical unit failed"},
402 {0x05240000, 1, 0,
403 "Illegal request, invalid request type or request packet"},
404 {0x05250000, 0, 0,
405 "Illegal request, invalid resource handle"},
b0df54bb
BK
406 {0x05258000, 0, 0,
407 "Illegal request, commands not allowed to this device"},
408 {0x05258100, 0, 0,
409 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
410 {0x05258200, 0, 0,
411 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
412 {0x05260000, 0, 0,
413 "Illegal request, invalid field in parameter list"},
414 {0x05260100, 0, 0,
415 "Illegal request, parameter not supported"},
416 {0x05260200, 0, 0,
417 "Illegal request, parameter value invalid"},
418 {0x052C0000, 0, 0,
419 "Illegal request, command sequence error"},
b0df54bb
BK
420 {0x052C8000, 1, 0,
421 "Illegal request, dual adapter support not enabled"},
ed7bd661 422 {0x052C8100, 1, 0,
423 "Illegal request, another cable connector was physically disabled"},
424 {0x054E8000, 1, 0,
425 "Illegal request, inconsistent group id/group count"},
933916f3 426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 427 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 429 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
933916f3 434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 435 "3140: Device bus not ready to ready transition"},
933916f3 436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
437 "FFFB: SCSI bus was reset"},
438 {0x06290500, 0, 0,
439 "FFFE: SCSI bus transition to single ended"},
440 {0x06290600, 0, 0,
441 "FFFE: SCSI bus transition to LVD"},
933916f3 442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 443 "FFFB: SCSI bus was reset by another initiator"},
933916f3 444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 445 "3029: A device replacement has occurred"},
ed7bd661 446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
933916f3 448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "9051: IOA cache data exists for a missing or failed device"},
933916f3 450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 453 "9025: Disk unit is not supported at its physical location"},
933916f3 454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "3020: IOA detected a SCSI bus configuration error"},
933916f3 456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 457 "3150: SCSI bus configuration error"},
933916f3 458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 459 "9074: Asymmetric advanced function disk configuration"},
933916f3 460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 461 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 463 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 465 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 467 "9076: Configuration error, missing remote IOA"},
933916f3 468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 469 "4050: Enclosure does not support a required multipath function"},
ed7bd661 470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
933916f3 480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9041: Array protection temporarily suspended"},
933916f3 482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9042: Corrupt array parity detected on specified device"},
933916f3 484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 487 "9071: Link operational transition"},
933916f3 488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 489 "9072: Link not operational transition"},
933916f3 490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9032: Array exposed but still protected"},
e435340c
BK
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
933916f3 494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 495 "4061: Multipath redundancy level got better"},
933916f3 496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 497 "4060: Multipath redundancy level got worse"},
1da177e4
LT
498 {0x07270000, 0, 0,
499 "Failure due to other device"},
933916f3 500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 501 "9008: IOA does not support functions expected by devices"},
933916f3 502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 503 "9010: Cache data associated with attached devices cannot be found"},
933916f3 504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 505 "9011: Cache data belongs to devices other than those attached"},
933916f3 506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 507 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 509 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 511 "9022: Exposed array is missing a required device"},
933916f3 512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 513 "9023: Array member(s) not at required physical locations"},
933916f3 514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 515 "9024: Array not functional due to present hardware configuration"},
933916f3 516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 517 "9026: Array not functional due to present hardware configuration"},
933916f3 518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 519 "9027: Array is missing a device and parity is out of sync"},
933916f3 520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 521 "9028: Maximum number of arrays already exist"},
933916f3 522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 523 "9050: Required cache data cannot be located for a disk unit"},
933916f3 524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 525 "9052: Cache data exists for a device that has been modified"},
933916f3 526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 527 "9054: IOA resources not available due to previous problems"},
933916f3 528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 529 "9092: Disk unit requires initialization before use"},
933916f3 530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 531 "9029: Incorrect hardware configuration change has been detected"},
933916f3 532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 533 "9060: One or more disk pairs are missing from an array"},
933916f3 534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 535 "9061: One or more disks are missing from an array"},
933916f3 536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 537 "9062: One or more disks are missing from an array"},
933916f3 538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 539 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 540 {0x07279A00, 0, 0,
541 "Data protect, other volume set problem"},
1da177e4
LT
542 {0x0B260000, 0, 0,
543 "Aborted command, invalid descriptor"},
ed7bd661 544 {0x0B3F9000, 0, 0,
545 "Target operating conditions have changed, dual adapter takeover"},
546 {0x0B530200, 0, 0,
547 "Aborted command, medium removal prevented"},
1da177e4 548 {0x0B5A0000, 0, 0,
ed7bd661 549 "Command terminated by host"},
550 {0x0B5B8000, 0, 0,
551 "Aborted command, command terminated by host"}
1da177e4
LT
552};
553
554static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568};
569
570/*
571 * Function Prototypes
572 */
573static int ipr_reset_alert(struct ipr_cmnd *);
574static void ipr_process_ccn(struct ipr_cmnd *);
575static void ipr_process_error(struct ipr_cmnd *);
576static void ipr_reset_ioa_job(struct ipr_cmnd *);
577static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
579
580#ifdef CONFIG_SCSI_IPR_TRACE
581/**
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
584 * @type: trace type
585 * @add_data: additional data
586 *
587 * Return value:
588 * none
589 **/
590static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
592{
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
56d6aa33 596 trace_entry = &ioa_cfg->trace[atomic_add_return
597 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
1da177e4
LT
598 trace_entry->time = jiffies;
599 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 trace_entry->type = type;
a32c055f
WB
601 if (ipr_cmd->ioa_cfg->sis64)
602 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 else
604 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 605 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
606 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 trace_entry->u.add_data = add_data;
56d6aa33 608 wmb();
1da177e4
LT
609}
610#else
203fa3fe 611#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
612#endif
613
172cd6e1
BK
614/**
615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
617 *
618 * Return value:
619 * none
620 **/
621static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622{
623 unsigned long lock_flags;
624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 ipr_cmd->done(ipr_cmd);
628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629}
630
1da177e4
LT
631/**
632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
634 *
635 * Return value:
636 * none
637 **/
638static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639{
640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
641 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 643 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 644 int hrrq_id;
1da177e4 645
05a6538a 646 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 647 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 648 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 649 ioarcb->data_transfer_length = 0;
1da177e4 650 ioarcb->read_data_transfer_length = 0;
a32c055f 651 ioarcb->ioadl_len = 0;
1da177e4 652 ioarcb->read_ioadl_len = 0;
a32c055f 653
96d21f00 654 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
655 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
657 ioasa64->u.gata.status = 0;
658 } else {
a32c055f
WB
659 ioarcb->write_ioadl_addr =
660 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 662 ioasa->u.gata.status = 0;
a32c055f
WB
663 }
664
96d21f00
WB
665 ioasa->hdr.ioasc = 0;
666 ioasa->hdr.residual_data_len = 0;
1da177e4 667 ipr_cmd->scsi_cmd = NULL;
35a39691 668 ipr_cmd->qc = NULL;
1da177e4
LT
669 ipr_cmd->sense_buffer[0] = 0;
670 ipr_cmd->dma_use_sg = 0;
671}
672
673/**
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
676 *
677 * Return value:
678 * none
679 **/
172cd6e1
BK
680static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
682{
683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL;
172cd6e1 686 ipr_cmd->fast_done = fast_done;
1da177e4
LT
687 init_timer(&ipr_cmd->timer);
688}
689
690/**
00bfef2c 691 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
692 * @ioa_cfg: ioa config struct
693 *
694 * Return value:
695 * pointer to ipr command struct
696 **/
697static
05a6538a 698struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 699{
05a6538a 700 struct ipr_cmnd *ipr_cmd = NULL;
701
702 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 struct ipr_cmnd, queue);
705 list_del(&ipr_cmd->queue);
706 }
1da177e4 707
1da177e4
LT
708
709 return ipr_cmd;
710}
711
00bfef2c
BK
712/**
713 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714 * @ioa_cfg: ioa config struct
715 *
716 * Return value:
717 * pointer to ipr command struct
718 **/
719static
720struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721{
05a6538a 722 struct ipr_cmnd *ipr_cmd =
723 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 724 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
725 return ipr_cmd;
726}
727
1da177e4
LT
728/**
729 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730 * @ioa_cfg: ioa config struct
731 * @clr_ints: interrupts to clear
732 *
733 * This function masks all interrupts on the adapter, then clears the
734 * interrupts specified in the mask
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740 u32 clr_ints)
741{
742 volatile u32 int_reg;
56d6aa33 743 int i;
1da177e4
LT
744
745 /* Stop new interrupts */
56d6aa33 746 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 spin_lock(&ioa_cfg->hrrq[i]._lock);
748 ioa_cfg->hrrq[i].allow_interrupts = 0;
749 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 }
751 wmb();
1da177e4
LT
752
753 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
754 if (ioa_cfg->sis64)
755 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 else
757 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
758
759 /* Clear any pending interrupts */
214777ba
WB
760 if (ioa_cfg->sis64)
761 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
763 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764}
765
766/**
767 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * @ioa_cfg: ioa config struct
769 *
770 * Return value:
771 * 0 on success / -EIO on failure
772 **/
773static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774{
775 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
7dce0e1c
BK
777 if (pcix_cmd_reg == 0)
778 return 0;
1da177e4
LT
779
780 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 return -EIO;
784 }
785
786 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787 return 0;
788}
789
790/**
791 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * @ioa_cfg: ioa config struct
793 *
794 * Return value:
795 * 0 on success / -EIO on failure
796 **/
797static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798{
799 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801 if (pcix_cmd_reg) {
802 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805 return -EIO;
806 }
1da177e4
LT
807 }
808
809 return 0;
810}
811
35a39691
BK
812/**
813 * ipr_sata_eh_done - done function for aborted SATA commands
814 * @ipr_cmd: ipr command struct
815 *
816 * This function is invoked for ops generated to SATA
817 * devices which are being aborted.
818 *
819 * Return value:
820 * none
821 **/
822static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823{
35a39691
BK
824 struct ata_queued_cmd *qc = ipr_cmd->qc;
825 struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827 qc->err_mask |= AC_ERR_OTHER;
828 sata_port->ioasa.status |= ATA_BUSY;
05a6538a 829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
830 ata_qc_complete(qc);
831}
832
1da177e4
LT
833/**
834 * ipr_scsi_eh_done - mid-layer done function for aborted ops
835 * @ipr_cmd: ipr command struct
836 *
837 * This function is invoked by the interrupt handler for
838 * ops generated by the SCSI mid-layer which are being aborted.
839 *
840 * Return value:
841 * none
842 **/
843static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844{
1da177e4
LT
845 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847 scsi_cmd->result |= (DID_ERROR << 16);
848
63015bc9 849 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 850 scsi_cmd->scsi_done(scsi_cmd);
05a6538a 851 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
852}
853
854/**
855 * ipr_fail_all_ops - Fails all outstanding ops.
856 * @ioa_cfg: ioa config struct
857 *
858 * This function fails all outstanding ops.
859 *
860 * Return value:
861 * none
862 **/
863static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864{
865 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 866 struct ipr_hrr_queue *hrrq;
1da177e4
LT
867
868 ENTER;
05a6538a 869 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 870 spin_lock(&hrrq->_lock);
05a6538a 871 list_for_each_entry_safe(ipr_cmd,
872 temp, &hrrq->hrrq_pending_q, queue) {
873 list_del(&ipr_cmd->queue);
1da177e4 874
05a6538a 875 ipr_cmd->s.ioasa.hdr.ioasc =
876 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 ipr_cmd->s.ioasa.hdr.ilid =
878 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 879
05a6538a 880 if (ipr_cmd->scsi_cmd)
881 ipr_cmd->done = ipr_scsi_eh_done;
882 else if (ipr_cmd->qc)
883 ipr_cmd->done = ipr_sata_eh_done;
1da177e4 884
05a6538a 885 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 IPR_IOASC_IOA_WAS_RESET);
887 del_timer(&ipr_cmd->timer);
888 ipr_cmd->done(ipr_cmd);
889 }
56d6aa33 890 spin_unlock(&hrrq->_lock);
1da177e4 891 }
1da177e4
LT
892 LEAVE;
893}
894
a32c055f
WB
895/**
896 * ipr_send_command - Send driver initiated requests.
897 * @ipr_cmd: ipr command struct
898 *
899 * This function sends a command to the adapter using the correct write call.
900 * In the case of sis64, calculate the ioarcb size required. Then or in the
901 * appropriate bits.
902 *
903 * Return value:
904 * none
905 **/
906static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907{
908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911 if (ioa_cfg->sis64) {
912 /* The default size is 256 bytes */
913 send_dma_addr |= 0x1;
914
915 /* If the number of ioadls * size of ioadl > 128 bytes,
916 then use a 512 byte ioarcb */
917 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 send_dma_addr |= 0x4;
919 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920 } else
921 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922}
923
1da177e4
LT
924/**
925 * ipr_do_req - Send driver initiated requests.
926 * @ipr_cmd: ipr command struct
927 * @done: done function
928 * @timeout_func: timeout function
929 * @timeout: timeout value
930 *
931 * This function sends the specified command to the adapter with the
932 * timeout given. The done function is invoked on command completion.
933 *
934 * Return value:
935 * none
936 **/
937static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 void (*done) (struct ipr_cmnd *),
939 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940{
05a6538a 941 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
942
943 ipr_cmd->done = done;
944
945 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 ipr_cmd->timer.expires = jiffies + timeout;
947 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949 add_timer(&ipr_cmd->timer);
950
951 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
a32c055f 953 ipr_send_command(ipr_cmd);
1da177e4
LT
954}
955
956/**
957 * ipr_internal_cmd_done - Op done function for an internally generated op.
958 * @ipr_cmd: ipr command struct
959 *
960 * This function is the op done function for an internally generated,
961 * blocking op. It simply wakes the sleeping thread.
962 *
963 * Return value:
964 * none
965 **/
966static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967{
968 if (ipr_cmd->sibling)
969 ipr_cmd->sibling = NULL;
970 else
971 complete(&ipr_cmd->completion);
972}
973
a32c055f
WB
974/**
975 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976 * @ipr_cmd: ipr command struct
977 * @dma_addr: dma address
978 * @len: transfer length
979 * @flags: ioadl flag value
980 *
981 * This function initializes an ioadl in the case where there is only a single
982 * descriptor.
983 *
984 * Return value:
985 * nothing
986 **/
987static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988 u32 len, int flags)
989{
990 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993 ipr_cmd->dma_use_sg = 1;
994
995 if (ipr_cmd->ioa_cfg->sis64) {
996 ioadl64->flags = cpu_to_be32(flags);
997 ioadl64->data_len = cpu_to_be32(len);
998 ioadl64->address = cpu_to_be64(dma_addr);
999
1000 ipr_cmd->ioarcb.ioadl_len =
1001 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003 } else {
1004 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 ioadl->address = cpu_to_be32(dma_addr);
1006
1007 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 ipr_cmd->ioarcb.read_ioadl_len =
1009 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011 } else {
1012 ipr_cmd->ioarcb.ioadl_len =
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015 }
1016 }
1017}
1018
1da177e4
LT
1019/**
1020 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021 * @ipr_cmd: ipr command struct
1022 * @timeout_func: function to invoke if command times out
1023 * @timeout: timeout
1024 *
1025 * Return value:
1026 * none
1027 **/
1028static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030 u32 timeout)
1031{
1032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034 init_completion(&ipr_cmd->completion);
1035 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037 spin_unlock_irq(ioa_cfg->host->host_lock);
1038 wait_for_completion(&ipr_cmd->completion);
1039 spin_lock_irq(ioa_cfg->host->host_lock);
1040}
1041
05a6538a 1042static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043{
1044 if (ioa_cfg->hrrq_num == 1)
56d6aa33 1045 return 0;
1046 else
1047 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
05a6538a 1048}
1049
1da177e4
LT
1050/**
1051 * ipr_send_hcam - Send an HCAM to the adapter.
1052 * @ioa_cfg: ioa config struct
1053 * @type: HCAM type
1054 * @hostrcb: hostrcb struct
1055 *
1056 * This function will send a Host Controlled Async command to the adapter.
1057 * If HCAMs are currently not allowed to be issued to the adapter, it will
1058 * place the hostrcb on the free queue.
1059 *
1060 * Return value:
1061 * none
1062 **/
1063static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 struct ipr_hostrcb *hostrcb)
1065{
1066 struct ipr_cmnd *ipr_cmd;
1067 struct ipr_ioarcb *ioarcb;
1068
56d6aa33 1069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1070 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1072 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074 ipr_cmd->u.hostrcb = hostrcb;
1075 ioarcb = &ipr_cmd->ioarcb;
1076
1077 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 ioarcb->cmd_pkt.cdb[1] = type;
1081 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
a32c055f
WB
1084 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1086
1087 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 ipr_cmd->done = ipr_process_ccn;
1089 else
1090 ipr_cmd->done = ipr_process_error;
1091
1092 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
a32c055f 1094 ipr_send_command(ipr_cmd);
1da177e4
LT
1095 } else {
1096 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097 }
1098}
1099
3e7ebdfa
WB
1100/**
1101 * ipr_update_ata_class - Update the ata class in the resource entry
1102 * @res: resource entry struct
1103 * @proto: cfgte device bus protocol value
1104 *
1105 * Return value:
1106 * none
1107 **/
1108static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109{
203fa3fe 1110 switch (proto) {
3e7ebdfa
WB
1111 case IPR_PROTO_SATA:
1112 case IPR_PROTO_SAS_STP:
1113 res->ata_class = ATA_DEV_ATA;
1114 break;
1115 case IPR_PROTO_SATA_ATAPI:
1116 case IPR_PROTO_SAS_STP_ATAPI:
1117 res->ata_class = ATA_DEV_ATAPI;
1118 break;
1119 default:
1120 res->ata_class = ATA_DEV_UNKNOWN;
1121 break;
1122 };
1123}
1124
1da177e4
LT
1125/**
1126 * ipr_init_res_entry - Initialize a resource entry struct.
1127 * @res: resource entry struct
3e7ebdfa 1128 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1129 *
1130 * Return value:
1131 * none
1132 **/
3e7ebdfa
WB
1133static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1135{
3e7ebdfa
WB
1136 int found = 0;
1137 unsigned int proto;
1138 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 struct ipr_resource_entry *gscsi_res = NULL;
1140
ee0a90fa 1141 res->needs_sync_complete = 0;
1da177e4
LT
1142 res->in_erp = 0;
1143 res->add_to_ml = 0;
1144 res->del_from_ml = 0;
1145 res->resetting_device = 0;
0b1f8d44 1146 res->reset_occurred = 0;
1da177e4 1147 res->sdev = NULL;
35a39691 1148 res->sata_port = NULL;
3e7ebdfa
WB
1149
1150 if (ioa_cfg->sis64) {
1151 proto = cfgtew->u.cfgte64->proto;
1152 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1154 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1155
1156 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 sizeof(res->res_path));
1158
1159 res->bus = 0;
0cb992ed
WB
1160 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1162 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167 found = 1;
1168 res->target = gscsi_res->target;
1169 break;
1170 }
1171 }
1172 if (!found) {
1173 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 ioa_cfg->max_devs_supported);
1175 set_bit(res->target, ioa_cfg->target_ids);
1176 }
3e7ebdfa
WB
1177 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179 res->target = 0;
1180 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 ioa_cfg->max_devs_supported);
1184 set_bit(res->target, ioa_cfg->array_ids);
1185 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 res->bus = IPR_VSET_VIRTUAL_BUS;
1187 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 ioa_cfg->max_devs_supported);
1189 set_bit(res->target, ioa_cfg->vset_ids);
1190 } else {
1191 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 ioa_cfg->max_devs_supported);
1193 set_bit(res->target, ioa_cfg->target_ids);
1194 }
1195 } else {
1196 proto = cfgtew->u.cfgte->proto;
1197 res->qmodel = IPR_QUEUEING_MODEL(res);
1198 res->flags = cfgtew->u.cfgte->flags;
1199 if (res->flags & IPR_IS_IOA_RESOURCE)
1200 res->type = IPR_RES_TYPE_IOAFP;
1201 else
1202 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 res->target = cfgtew->u.cfgte->res_addr.target;
1206 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1207 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1208 }
1209
1210 ipr_update_ata_class(res, proto);
1211}
1212
1213/**
1214 * ipr_is_same_device - Determine if two devices are the same.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1217 *
1218 * Return value:
1219 * 1 if the devices are the same / 0 otherwise
1220 **/
1221static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 struct ipr_config_table_entry_wrapper *cfgtew)
1223{
1224 if (res->ioa_cfg->sis64) {
1225 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1227 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1228 sizeof(cfgtew->u.cfgte64->lun))) {
1229 return 1;
1230 }
1231 } else {
1232 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 res->target == cfgtew->u.cfgte->res_addr.target &&
1234 res->lun == cfgtew->u.cfgte->res_addr.lun)
1235 return 1;
1236 }
1237
1238 return 0;
1239}
1240
1241/**
b3b3b407 1242 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1243 * @res_path: resource path
1244 * @buf: buffer
b3b3b407 1245 * @len: length of buffer provided
3e7ebdfa
WB
1246 *
1247 * Return value:
1248 * pointer to buffer
1249 **/
b3b3b407 1250static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1251{
1252 int i;
5adcbeb3 1253 char *p = buffer;
3e7ebdfa 1254
46d74563 1255 *p = '\0';
5adcbeb3
WB
1256 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1259
1260 return buffer;
1261}
1262
b3b3b407
BK
1263/**
1264 * ipr_format_res_path - Format the resource path for printing.
1265 * @ioa_cfg: ioa config struct
1266 * @res_path: resource path
1267 * @buf: buffer
1268 * @len: length of buffer provided
1269 *
1270 * Return value:
1271 * pointer to buffer
1272 **/
1273static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 u8 *res_path, char *buffer, int len)
1275{
1276 char *p = buffer;
1277
1278 *p = '\0';
1279 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 __ipr_format_res_path(res_path, p, len - (buffer - p));
1281 return buffer;
1282}
1283
3e7ebdfa
WB
1284/**
1285 * ipr_update_res_entry - Update the resource entry.
1286 * @res: resource entry struct
1287 * @cfgtew: config table entry wrapper struct
1288 *
1289 * Return value:
1290 * none
1291 **/
1292static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 struct ipr_config_table_entry_wrapper *cfgtew)
1294{
1295 char buffer[IPR_MAX_RES_PATH_LENGTH];
1296 unsigned int proto;
1297 int new_path = 0;
1298
1299 if (res->ioa_cfg->sis64) {
1300 res->flags = cfgtew->u.cfgte64->flags;
1301 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1302 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1303
1304 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 sizeof(struct ipr_std_inq_data));
1306
1307 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 proto = cfgtew->u.cfgte64->proto;
1309 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 sizeof(res->dev_lun.scsi_lun));
1314
1315 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 sizeof(res->res_path))) {
1317 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 sizeof(res->res_path));
1319 new_path = 1;
1320 }
1321
1322 if (res->sdev && new_path)
1323 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1324 ipr_format_res_path(res->ioa_cfg,
1325 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1326 } else {
1327 res->flags = cfgtew->u.cfgte->flags;
1328 if (res->flags & IPR_IS_IOA_RESOURCE)
1329 res->type = IPR_RES_TYPE_IOAFP;
1330 else
1331 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 sizeof(struct ipr_std_inq_data));
1335
1336 res->qmodel = IPR_QUEUEING_MODEL(res);
1337 proto = cfgtew->u.cfgte->proto;
1338 res->res_handle = cfgtew->u.cfgte->res_handle;
1339 }
1340
1341 ipr_update_ata_class(res, proto);
1342}
1343
1344/**
1345 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346 * for the resource.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354{
1355 struct ipr_resource_entry *gscsi_res = NULL;
1356 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358 if (!ioa_cfg->sis64)
1359 return;
1360
1361 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 clear_bit(res->target, ioa_cfg->array_ids);
1363 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 clear_bit(res->target, ioa_cfg->vset_ids);
1365 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368 return;
1369 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371 } else if (res->bus == 0)
1372 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1373}
1374
1375/**
1376 * ipr_handle_config_change - Handle a config change from the adapter
1377 * @ioa_cfg: ioa config struct
1378 * @hostrcb: hostrcb
1379 *
1380 * Return value:
1381 * none
1382 **/
1383static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1384 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1385{
1386 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1387 struct ipr_config_table_entry_wrapper cfgtew;
1388 __be32 cc_res_handle;
1389
1da177e4
LT
1390 u32 is_ndn = 1;
1391
3e7ebdfa
WB
1392 if (ioa_cfg->sis64) {
1393 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395 } else {
1396 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398 }
1da177e4
LT
1399
1400 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1401 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1402 is_ndn = 0;
1403 break;
1404 }
1405 }
1406
1407 if (is_ndn) {
1408 if (list_empty(&ioa_cfg->free_res_q)) {
1409 ipr_send_hcam(ioa_cfg,
1410 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411 hostrcb);
1412 return;
1413 }
1414
1415 res = list_entry(ioa_cfg->free_res_q.next,
1416 struct ipr_resource_entry, queue);
1417
1418 list_del(&res->queue);
3e7ebdfa 1419 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1420 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421 }
1422
3e7ebdfa 1423 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1424
1425 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426 if (res->sdev) {
1da177e4 1427 res->del_from_ml = 1;
3e7ebdfa 1428 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1429 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1430 } else {
1431 ipr_clear_res_target(res);
1da177e4 1432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1433 }
5767a1c4 1434 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1435 res->add_to_ml = 1;
f688f96d 1436 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1437 }
1438
1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1440}
1441
1442/**
1443 * ipr_process_ccn - Op done function for a CCN.
1444 * @ipr_cmd: ipr command struct
1445 *
1446 * This function is the op done function for a configuration
1447 * change notification host controlled async from the adapter.
1448 *
1449 * Return value:
1450 * none
1451 **/
1452static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1453{
1454 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1455 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1456 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1457
1458 list_del(&hostrcb->queue);
05a6538a 1459 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1460
1461 if (ioasc) {
1462 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1463 dev_err(&ioa_cfg->pdev->dev,
1464 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1465
1466 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1467 } else {
1468 ipr_handle_config_change(ioa_cfg, hostrcb);
1469 }
1470}
1471
8cf093e2
BK
1472/**
1473 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1474 * @i: index into buffer
1475 * @buf: string to modify
1476 *
1477 * This function will strip all trailing whitespace, pad the end
1478 * of the string with a single space, and NULL terminate the string.
1479 *
1480 * Return value:
1481 * new length of string
1482 **/
1483static int strip_and_pad_whitespace(int i, char *buf)
1484{
1485 while (i && buf[i] == ' ')
1486 i--;
1487 buf[i+1] = ' ';
1488 buf[i+2] = '\0';
1489 return i + 2;
1490}
1491
1492/**
1493 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1494 * @prefix: string to print at start of printk
1495 * @hostrcb: hostrcb pointer
1496 * @vpd: vendor/product id/sn struct
1497 *
1498 * Return value:
1499 * none
1500 **/
1501static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1502 struct ipr_vpd *vpd)
1503{
1504 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1505 int i = 0;
1506
1507 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1508 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1509
1510 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1511 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1512
1513 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1514 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1515
1516 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1517}
1518
1da177e4
LT
1519/**
1520 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1521 * @vpd: vendor/product id/sn struct
1da177e4
LT
1522 *
1523 * Return value:
1524 * none
1525 **/
cfc32139 1526static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1527{
1528 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1529 + IPR_SERIAL_NUM_LEN];
1530
cfc32139
BK
1531 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1532 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1533 IPR_PROD_ID_LEN);
1534 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1535 ipr_err("Vendor/Product ID: %s\n", buffer);
1536
cfc32139 1537 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1538 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1539 ipr_err(" Serial Number: %s\n", buffer);
1540}
1541
8cf093e2
BK
1542/**
1543 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1544 * @prefix: string to print at start of printk
1545 * @hostrcb: hostrcb pointer
1546 * @vpd: vendor/product id/sn/wwn struct
1547 *
1548 * Return value:
1549 * none
1550 **/
1551static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1552 struct ipr_ext_vpd *vpd)
1553{
1554 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1555 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1556 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1557}
1558
ee0f05b8
BK
1559/**
1560 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1561 * @vpd: vendor/product id/sn/wwn struct
1562 *
1563 * Return value:
1564 * none
1565 **/
1566static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1567{
1568 ipr_log_vpd(&vpd->vpd);
1569 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1570 be32_to_cpu(vpd->wwid[1]));
1571}
1572
1573/**
1574 * ipr_log_enhanced_cache_error - Log a cache error.
1575 * @ioa_cfg: ioa config struct
1576 * @hostrcb: hostrcb struct
1577 *
1578 * Return value:
1579 * none
1580 **/
1581static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1582 struct ipr_hostrcb *hostrcb)
1583{
4565e370
WB
1584 struct ipr_hostrcb_type_12_error *error;
1585
1586 if (ioa_cfg->sis64)
1587 error = &hostrcb->hcam.u.error64.u.type_12_error;
1588 else
1589 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1590
1591 ipr_err("-----Current Configuration-----\n");
1592 ipr_err("Cache Directory Card Information:\n");
1593 ipr_log_ext_vpd(&error->ioa_vpd);
1594 ipr_err("Adapter Card Information:\n");
1595 ipr_log_ext_vpd(&error->cfc_vpd);
1596
1597 ipr_err("-----Expected Configuration-----\n");
1598 ipr_err("Cache Directory Card Information:\n");
1599 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1600 ipr_err("Adapter Card Information:\n");
1601 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1602
1603 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1604 be32_to_cpu(error->ioa_data[0]),
1605 be32_to_cpu(error->ioa_data[1]),
1606 be32_to_cpu(error->ioa_data[2]));
1607}
1608
1da177e4
LT
1609/**
1610 * ipr_log_cache_error - Log a cache error.
1611 * @ioa_cfg: ioa config struct
1612 * @hostrcb: hostrcb struct
1613 *
1614 * Return value:
1615 * none
1616 **/
1617static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1618 struct ipr_hostrcb *hostrcb)
1619{
1620 struct ipr_hostrcb_type_02_error *error =
1621 &hostrcb->hcam.u.error.u.type_02_error;
1622
1623 ipr_err("-----Current Configuration-----\n");
1624 ipr_err("Cache Directory Card Information:\n");
cfc32139 1625 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1626 ipr_err("Adapter Card Information:\n");
cfc32139 1627 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1628
1629 ipr_err("-----Expected Configuration-----\n");
1630 ipr_err("Cache Directory Card Information:\n");
cfc32139 1631 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1632 ipr_err("Adapter Card Information:\n");
cfc32139 1633 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1634
1635 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1636 be32_to_cpu(error->ioa_data[0]),
1637 be32_to_cpu(error->ioa_data[1]),
1638 be32_to_cpu(error->ioa_data[2]));
1639}
1640
ee0f05b8
BK
1641/**
1642 * ipr_log_enhanced_config_error - Log a configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int errors_logged, i;
1653 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1654 struct ipr_hostrcb_type_13_error *error;
1655
1656 error = &hostrcb->hcam.u.error.u.type_13_error;
1657 errors_logged = be32_to_cpu(error->errors_logged);
1658
1659 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1660 be32_to_cpu(error->errors_detected), errors_logged);
1661
1662 dev_entry = error->dev;
1663
1664 for (i = 0; i < errors_logged; i++, dev_entry++) {
1665 ipr_err_separator;
1666
1667 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1668 ipr_log_ext_vpd(&dev_entry->vpd);
1669
1670 ipr_err("-----New Device Information-----\n");
1671 ipr_log_ext_vpd(&dev_entry->new_vpd);
1672
1673 ipr_err("Cache Directory Card Information:\n");
1674 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1675
1676 ipr_err("Adapter Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1678 }
1679}
1680
4565e370
WB
1681/**
1682 * ipr_log_sis64_config_error - Log a device error.
1683 * @ioa_cfg: ioa config struct
1684 * @hostrcb: hostrcb struct
1685 *
1686 * Return value:
1687 * none
1688 **/
1689static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1690 struct ipr_hostrcb *hostrcb)
1691{
1692 int errors_logged, i;
1693 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1694 struct ipr_hostrcb_type_23_error *error;
1695 char buffer[IPR_MAX_RES_PATH_LENGTH];
1696
1697 error = &hostrcb->hcam.u.error64.u.type_23_error;
1698 errors_logged = be32_to_cpu(error->errors_logged);
1699
1700 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1701 be32_to_cpu(error->errors_detected), errors_logged);
1702
1703 dev_entry = error->dev;
1704
1705 for (i = 0; i < errors_logged; i++, dev_entry++) {
1706 ipr_err_separator;
1707
1708 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1709 __ipr_format_res_path(dev_entry->res_path,
1710 buffer, sizeof(buffer)));
4565e370
WB
1711 ipr_log_ext_vpd(&dev_entry->vpd);
1712
1713 ipr_err("-----New Device Information-----\n");
1714 ipr_log_ext_vpd(&dev_entry->new_vpd);
1715
1716 ipr_err("Cache Directory Card Information:\n");
1717 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1718
1719 ipr_err("Adapter Card Information:\n");
1720 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1721 }
1722}
1723
1da177e4
LT
1724/**
1725 * ipr_log_config_error - Log a configuration error.
1726 * @ioa_cfg: ioa config struct
1727 * @hostrcb: hostrcb struct
1728 *
1729 * Return value:
1730 * none
1731 **/
1732static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1733 struct ipr_hostrcb *hostrcb)
1734{
1735 int errors_logged, i;
1736 struct ipr_hostrcb_device_data_entry *dev_entry;
1737 struct ipr_hostrcb_type_03_error *error;
1738
1739 error = &hostrcb->hcam.u.error.u.type_03_error;
1740 errors_logged = be32_to_cpu(error->errors_logged);
1741
1742 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1743 be32_to_cpu(error->errors_detected), errors_logged);
1744
cfc32139 1745 dev_entry = error->dev;
1da177e4
LT
1746
1747 for (i = 0; i < errors_logged; i++, dev_entry++) {
1748 ipr_err_separator;
1749
fa15b1f6 1750 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1751 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1752
1753 ipr_err("-----New Device Information-----\n");
cfc32139 1754 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1755
1756 ipr_err("Cache Directory Card Information:\n");
cfc32139 1757 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1758
1759 ipr_err("Adapter Card Information:\n");
cfc32139 1760 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1761
1762 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1763 be32_to_cpu(dev_entry->ioa_data[0]),
1764 be32_to_cpu(dev_entry->ioa_data[1]),
1765 be32_to_cpu(dev_entry->ioa_data[2]),
1766 be32_to_cpu(dev_entry->ioa_data[3]),
1767 be32_to_cpu(dev_entry->ioa_data[4]));
1768 }
1769}
1770
ee0f05b8
BK
1771/**
1772 * ipr_log_enhanced_array_error - Log an array configuration error.
1773 * @ioa_cfg: ioa config struct
1774 * @hostrcb: hostrcb struct
1775 *
1776 * Return value:
1777 * none
1778 **/
1779static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1780 struct ipr_hostrcb *hostrcb)
1781{
1782 int i, num_entries;
1783 struct ipr_hostrcb_type_14_error *error;
1784 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1785 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1786
1787 error = &hostrcb->hcam.u.error.u.type_14_error;
1788
1789 ipr_err_separator;
1790
1791 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1792 error->protection_level,
1793 ioa_cfg->host->host_no,
1794 error->last_func_vset_res_addr.bus,
1795 error->last_func_vset_res_addr.target,
1796 error->last_func_vset_res_addr.lun);
1797
1798 ipr_err_separator;
1799
1800 array_entry = error->array_member;
1801 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1802 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1803
1804 for (i = 0; i < num_entries; i++, array_entry++) {
1805 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1806 continue;
1807
1808 if (be32_to_cpu(error->exposed_mode_adn) == i)
1809 ipr_err("Exposed Array Member %d:\n", i);
1810 else
1811 ipr_err("Array Member %d:\n", i);
1812
1813 ipr_log_ext_vpd(&array_entry->vpd);
1814 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1815 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1816 "Expected Location");
1817
1818 ipr_err_separator;
1819 }
1820}
1821
1da177e4
LT
1822/**
1823 * ipr_log_array_error - Log an array configuration error.
1824 * @ioa_cfg: ioa config struct
1825 * @hostrcb: hostrcb struct
1826 *
1827 * Return value:
1828 * none
1829 **/
1830static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1831 struct ipr_hostrcb *hostrcb)
1832{
1833 int i;
1834 struct ipr_hostrcb_type_04_error *error;
1835 struct ipr_hostrcb_array_data_entry *array_entry;
1836 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1837
1838 error = &hostrcb->hcam.u.error.u.type_04_error;
1839
1840 ipr_err_separator;
1841
1842 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1843 error->protection_level,
1844 ioa_cfg->host->host_no,
1845 error->last_func_vset_res_addr.bus,
1846 error->last_func_vset_res_addr.target,
1847 error->last_func_vset_res_addr.lun);
1848
1849 ipr_err_separator;
1850
1851 array_entry = error->array_member;
1852
1853 for (i = 0; i < 18; i++) {
cfc32139 1854 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1855 continue;
1856
fa15b1f6 1857 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1858 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1859 else
1da177e4 1860 ipr_err("Array Member %d:\n", i);
1da177e4 1861
cfc32139 1862 ipr_log_vpd(&array_entry->vpd);
1da177e4 1863
fa15b1f6
BK
1864 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1865 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1866 "Expected Location");
1da177e4
LT
1867
1868 ipr_err_separator;
1869
1870 if (i == 9)
1871 array_entry = error->array_member2;
1872 else
1873 array_entry++;
1874 }
1875}
1876
1877/**
b0df54bb 1878 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1879 * @ioa_cfg: ioa config struct
b0df54bb
BK
1880 * @data: IOA error data
1881 * @len: data length
1da177e4
LT
1882 *
1883 * Return value:
1884 * none
1885 **/
ac719aba 1886static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1887{
1888 int i;
1da177e4 1889
b0df54bb 1890 if (len == 0)
1da177e4
LT
1891 return;
1892
ac719aba
BK
1893 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1894 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1895
b0df54bb 1896 for (i = 0; i < len / 4; i += 4) {
1da177e4 1897 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1898 be32_to_cpu(data[i]),
1899 be32_to_cpu(data[i+1]),
1900 be32_to_cpu(data[i+2]),
1901 be32_to_cpu(data[i+3]));
1da177e4
LT
1902 }
1903}
1904
ee0f05b8
BK
1905/**
1906 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1907 * @ioa_cfg: ioa config struct
1908 * @hostrcb: hostrcb struct
1909 *
1910 * Return value:
1911 * none
1912 **/
1913static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1914 struct ipr_hostrcb *hostrcb)
1915{
1916 struct ipr_hostrcb_type_17_error *error;
1917
4565e370
WB
1918 if (ioa_cfg->sis64)
1919 error = &hostrcb->hcam.u.error64.u.type_17_error;
1920 else
1921 error = &hostrcb->hcam.u.error.u.type_17_error;
1922
ee0f05b8 1923 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1924 strim(error->failure_reason);
ee0f05b8 1925
8cf093e2
BK
1926 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1927 be32_to_cpu(hostrcb->hcam.u.error.prc));
1928 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1929 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1930 be32_to_cpu(hostrcb->hcam.length) -
1931 (offsetof(struct ipr_hostrcb_error, u) +
1932 offsetof(struct ipr_hostrcb_type_17_error, data)));
1933}
1934
b0df54bb
BK
1935/**
1936 * ipr_log_dual_ioa_error - Log a dual adapter error.
1937 * @ioa_cfg: ioa config struct
1938 * @hostrcb: hostrcb struct
1939 *
1940 * Return value:
1941 * none
1942 **/
1943static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1944 struct ipr_hostrcb *hostrcb)
1945{
1946 struct ipr_hostrcb_type_07_error *error;
1947
1948 error = &hostrcb->hcam.u.error.u.type_07_error;
1949 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1950 strim(error->failure_reason);
b0df54bb 1951
8cf093e2
BK
1952 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1953 be32_to_cpu(hostrcb->hcam.u.error.prc));
1954 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1955 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1956 be32_to_cpu(hostrcb->hcam.length) -
1957 (offsetof(struct ipr_hostrcb_error, u) +
1958 offsetof(struct ipr_hostrcb_type_07_error, data)));
1959}
1960
49dc6a18
BK
1961static const struct {
1962 u8 active;
1963 char *desc;
1964} path_active_desc[] = {
1965 { IPR_PATH_NO_INFO, "Path" },
1966 { IPR_PATH_ACTIVE, "Active path" },
1967 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1968};
1969
1970static const struct {
1971 u8 state;
1972 char *desc;
1973} path_state_desc[] = {
1974 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1975 { IPR_PATH_HEALTHY, "is healthy" },
1976 { IPR_PATH_DEGRADED, "is degraded" },
1977 { IPR_PATH_FAILED, "is failed" }
1978};
1979
1980/**
1981 * ipr_log_fabric_path - Log a fabric path error
1982 * @hostrcb: hostrcb struct
1983 * @fabric: fabric descriptor
1984 *
1985 * Return value:
1986 * none
1987 **/
1988static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1989 struct ipr_hostrcb_fabric_desc *fabric)
1990{
1991 int i, j;
1992 u8 path_state = fabric->path_state;
1993 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1994 u8 state = path_state & IPR_PATH_STATE_MASK;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1997 if (path_active_desc[i].active != active)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2001 if (path_state_desc[j].state != state)
2002 continue;
2003
2004 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2005 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2006 path_active_desc[i].desc, path_state_desc[j].desc,
2007 fabric->ioa_port);
2008 } else if (fabric->cascaded_expander == 0xff) {
2009 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2010 path_active_desc[i].desc, path_state_desc[j].desc,
2011 fabric->ioa_port, fabric->phy);
2012 } else if (fabric->phy == 0xff) {
2013 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2014 path_active_desc[i].desc, path_state_desc[j].desc,
2015 fabric->ioa_port, fabric->cascaded_expander);
2016 } else {
2017 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2020 }
2021 return;
2022 }
2023 }
2024
2025 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2027}
2028
4565e370
WB
2029/**
2030 * ipr_log64_fabric_path - Log a fabric path error
2031 * @hostrcb: hostrcb struct
2032 * @fabric: fabric descriptor
2033 *
2034 * Return value:
2035 * none
2036 **/
2037static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2038 struct ipr_hostrcb64_fabric_desc *fabric)
2039{
2040 int i, j;
2041 u8 path_state = fabric->path_state;
2042 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2043 u8 state = path_state & IPR_PATH_STATE_MASK;
2044 char buffer[IPR_MAX_RES_PATH_LENGTH];
2045
2046 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2047 if (path_active_desc[i].active != active)
2048 continue;
2049
2050 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2051 if (path_state_desc[j].state != state)
2052 continue;
2053
2054 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2055 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2056 ipr_format_res_path(hostrcb->ioa_cfg,
2057 fabric->res_path,
2058 buffer, sizeof(buffer)));
4565e370
WB
2059 return;
2060 }
2061 }
2062
2063 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2064 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2065 buffer, sizeof(buffer)));
4565e370
WB
2066}
2067
49dc6a18
BK
2068static const struct {
2069 u8 type;
2070 char *desc;
2071} path_type_desc[] = {
2072 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2073 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2074 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2075 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2076};
2077
2078static const struct {
2079 u8 status;
2080 char *desc;
2081} path_status_desc[] = {
2082 { IPR_PATH_CFG_NO_PROB, "Functional" },
2083 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2084 { IPR_PATH_CFG_FAILED, "Failed" },
2085 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2086 { IPR_PATH_NOT_DETECTED, "Missing" },
2087 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2088};
2089
2090static const char *link_rate[] = {
2091 "unknown",
2092 "disabled",
2093 "phy reset problem",
2094 "spinup hold",
2095 "port selector",
2096 "unknown",
2097 "unknown",
2098 "unknown",
2099 "1.5Gbps",
2100 "3.0Gbps",
2101 "unknown",
2102 "unknown",
2103 "unknown",
2104 "unknown",
2105 "unknown",
2106 "unknown"
2107};
2108
2109/**
2110 * ipr_log_path_elem - Log a fabric path element.
2111 * @hostrcb: hostrcb struct
2112 * @cfg: fabric path element struct
2113 *
2114 * Return value:
2115 * none
2116 **/
2117static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2118 struct ipr_hostrcb_config_element *cfg)
2119{
2120 int i, j;
2121 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2122 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2123
2124 if (type == IPR_PATH_CFG_NOT_EXIST)
2125 return;
2126
2127 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2128 if (path_type_desc[i].type != type)
2129 continue;
2130
2131 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2132 if (path_status_desc[j].status != status)
2133 continue;
2134
2135 if (type == IPR_PATH_CFG_IOA_PORT) {
2136 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2137 path_status_desc[j].desc, path_type_desc[i].desc,
2138 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140 } else {
2141 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2142 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2143 path_status_desc[j].desc, path_type_desc[i].desc,
2144 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2146 } else if (cfg->cascaded_expander == 0xff) {
2147 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2148 "WWN=%08X%08X\n", path_status_desc[j].desc,
2149 path_type_desc[i].desc, cfg->phy,
2150 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2151 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2152 } else if (cfg->phy == 0xff) {
2153 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2154 "WWN=%08X%08X\n", path_status_desc[j].desc,
2155 path_type_desc[i].desc, cfg->cascaded_expander,
2156 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158 } else {
2159 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2160 "WWN=%08X%08X\n", path_status_desc[j].desc,
2161 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2162 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164 }
2165 }
2166 return;
2167 }
2168 }
2169
2170 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2171 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2172 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2173 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2174}
2175
4565e370
WB
2176/**
2177 * ipr_log64_path_elem - Log a fabric path element.
2178 * @hostrcb: hostrcb struct
2179 * @cfg: fabric path element struct
2180 *
2181 * Return value:
2182 * none
2183 **/
2184static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2185 struct ipr_hostrcb64_config_element *cfg)
2186{
2187 int i, j;
2188 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2189 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2190 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2191 char buffer[IPR_MAX_RES_PATH_LENGTH];
2192
2193 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2194 return;
2195
2196 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2197 if (path_type_desc[i].type != type)
2198 continue;
2199
2200 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2201 if (path_status_desc[j].status != status)
2202 continue;
2203
2204 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2206 ipr_format_res_path(hostrcb->ioa_cfg,
2207 cfg->res_path, buffer, sizeof(buffer)),
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209 be32_to_cpu(cfg->wwid[0]),
2210 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2211 return;
2212 }
2213 }
2214 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2215 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2216 ipr_format_res_path(hostrcb->ioa_cfg,
2217 cfg->res_path, buffer, sizeof(buffer)),
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2220}
2221
49dc6a18
BK
2222/**
2223 * ipr_log_fabric_error - Log a fabric error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2232{
2233 struct ipr_hostrcb_type_20_error *error;
2234 struct ipr_hostrcb_fabric_desc *fabric;
2235 struct ipr_hostrcb_config_element *cfg;
2236 int i, add_len;
2237
2238 error = &hostrcb->hcam.u.error.u.type_20_error;
2239 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2240 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2241
2242 add_len = be32_to_cpu(hostrcb->hcam.length) -
2243 (offsetof(struct ipr_hostrcb_error, u) +
2244 offsetof(struct ipr_hostrcb_type_20_error, desc));
2245
2246 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2247 ipr_log_fabric_path(hostrcb, fabric);
2248 for_each_fabric_cfg(fabric, cfg)
2249 ipr_log_path_elem(hostrcb, cfg);
2250
2251 add_len -= be16_to_cpu(fabric->length);
2252 fabric = (struct ipr_hostrcb_fabric_desc *)
2253 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2254 }
2255
ac719aba 2256 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2257}
2258
4565e370
WB
2259/**
2260 * ipr_log_sis64_array_error - Log a sis64 array error.
2261 * @ioa_cfg: ioa config struct
2262 * @hostrcb: hostrcb struct
2263 *
2264 * Return value:
2265 * none
2266 **/
2267static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2268 struct ipr_hostrcb *hostrcb)
2269{
2270 int i, num_entries;
2271 struct ipr_hostrcb_type_24_error *error;
2272 struct ipr_hostrcb64_array_data_entry *array_entry;
2273 char buffer[IPR_MAX_RES_PATH_LENGTH];
2274 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2275
2276 error = &hostrcb->hcam.u.error64.u.type_24_error;
2277
2278 ipr_err_separator;
2279
2280 ipr_err("RAID %s Array Configuration: %s\n",
2281 error->protection_level,
b3b3b407
BK
2282 ipr_format_res_path(ioa_cfg, error->last_res_path,
2283 buffer, sizeof(buffer)));
4565e370
WB
2284
2285 ipr_err_separator;
2286
2287 array_entry = error->array_member;
7262026f
WB
2288 num_entries = min_t(u32, error->num_entries,
2289 ARRAY_SIZE(error->array_member));
4565e370
WB
2290
2291 for (i = 0; i < num_entries; i++, array_entry++) {
2292
2293 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2294 continue;
2295
2296 if (error->exposed_mode_adn == i)
2297 ipr_err("Exposed Array Member %d:\n", i);
2298 else
2299 ipr_err("Array Member %d:\n", i);
2300
2301 ipr_err("Array Member %d:\n", i);
2302 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2303 ipr_err("Current Location: %s\n",
b3b3b407
BK
2304 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2305 buffer, sizeof(buffer)));
7262026f 2306 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2307 ipr_format_res_path(ioa_cfg,
2308 array_entry->expected_res_path,
2309 buffer, sizeof(buffer)));
4565e370
WB
2310
2311 ipr_err_separator;
2312 }
2313}
2314
2315/**
2316 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2317 * @ioa_cfg: ioa config struct
2318 * @hostrcb: hostrcb struct
2319 *
2320 * Return value:
2321 * none
2322 **/
2323static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2324 struct ipr_hostrcb *hostrcb)
2325{
2326 struct ipr_hostrcb_type_30_error *error;
2327 struct ipr_hostrcb64_fabric_desc *fabric;
2328 struct ipr_hostrcb64_config_element *cfg;
2329 int i, add_len;
2330
2331 error = &hostrcb->hcam.u.error64.u.type_30_error;
2332
2333 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2334 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2335
2336 add_len = be32_to_cpu(hostrcb->hcam.length) -
2337 (offsetof(struct ipr_hostrcb64_error, u) +
2338 offsetof(struct ipr_hostrcb_type_30_error, desc));
2339
2340 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2341 ipr_log64_fabric_path(hostrcb, fabric);
2342 for_each_fabric_cfg(fabric, cfg)
2343 ipr_log64_path_elem(hostrcb, cfg);
2344
2345 add_len -= be16_to_cpu(fabric->length);
2346 fabric = (struct ipr_hostrcb64_fabric_desc *)
2347 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2348 }
2349
2350 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2351}
2352
b0df54bb
BK
2353/**
2354 * ipr_log_generic_error - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2357 *
2358 * Return value:
2359 * none
2360 **/
2361static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2362 struct ipr_hostrcb *hostrcb)
2363{
ac719aba 2364 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2365 be32_to_cpu(hostrcb->hcam.length));
2366}
2367
169b9ec8
WX
2368/**
2369 * ipr_log_sis64_device_error - Log a cache error.
2370 * @ioa_cfg: ioa config struct
2371 * @hostrcb: hostrcb struct
2372 *
2373 * Return value:
2374 * none
2375 **/
2376static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2377 struct ipr_hostrcb *hostrcb)
2378{
2379 struct ipr_hostrcb_type_21_error *error;
2380 char buffer[IPR_MAX_RES_PATH_LENGTH];
2381
2382 error = &hostrcb->hcam.u.error64.u.type_21_error;
2383
2384 ipr_err("-----Failing Device Information-----\n");
2385 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2386 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2387 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2388 ipr_err("Device Resource Path: %s\n",
2389 __ipr_format_res_path(error->res_path,
2390 buffer, sizeof(buffer)));
2391 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2392 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2393 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2394 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2395 ipr_err("SCSI Sense Data:\n");
2396 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2397 ipr_err("SCSI Command Descriptor Block: \n");
2398 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2399
2400 ipr_err("Additional IOA Data:\n");
2401 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2402}
2403
1da177e4
LT
2404/**
2405 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2406 * @ioasc: IOASC
2407 *
2408 * This function will return the index of into the ipr_error_table
2409 * for the specified IOASC. If the IOASC is not in the table,
2410 * 0 will be returned, which points to the entry used for unknown errors.
2411 *
2412 * Return value:
2413 * index into the ipr_error_table
2414 **/
2415static u32 ipr_get_error(u32 ioasc)
2416{
2417 int i;
2418
2419 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2420 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2421 return i;
2422
2423 return 0;
2424}
2425
2426/**
2427 * ipr_handle_log_data - Log an adapter error.
2428 * @ioa_cfg: ioa config struct
2429 * @hostrcb: hostrcb struct
2430 *
2431 * This function logs an adapter error to the system.
2432 *
2433 * Return value:
2434 * none
2435 **/
2436static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2437 struct ipr_hostrcb *hostrcb)
2438{
2439 u32 ioasc;
2440 int error_index;
3185ea63 2441 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2442
2443 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2444 return;
2445
2446 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2447 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2448
4565e370
WB
2449 if (ioa_cfg->sis64)
2450 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2451 else
2452 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2453
4565e370
WB
2454 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2455 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2456 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2457 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2458 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2459 }
2460
2461 error_index = ipr_get_error(ioasc);
2462
2463 if (!ipr_error_table[error_index].log_hcam)
2464 return;
2465
3185ea63 2466 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2467 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2468 error = &hostrcb->hcam.u.error64.u.type_21_error;
2469
2470 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2471 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2472 return;
2473 }
2474
49dc6a18 2475 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2476
2477 /* Set indication we have logged an error */
2478 ioa_cfg->errors_logged++;
2479
933916f3 2480 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2481 return;
cf852037
BK
2482 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2483 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2484
2485 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2486 case IPR_HOST_RCB_OVERLAY_ID_2:
2487 ipr_log_cache_error(ioa_cfg, hostrcb);
2488 break;
2489 case IPR_HOST_RCB_OVERLAY_ID_3:
2490 ipr_log_config_error(ioa_cfg, hostrcb);
2491 break;
2492 case IPR_HOST_RCB_OVERLAY_ID_4:
2493 case IPR_HOST_RCB_OVERLAY_ID_6:
2494 ipr_log_array_error(ioa_cfg, hostrcb);
2495 break;
b0df54bb
BK
2496 case IPR_HOST_RCB_OVERLAY_ID_7:
2497 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2498 break;
ee0f05b8
BK
2499 case IPR_HOST_RCB_OVERLAY_ID_12:
2500 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2501 break;
2502 case IPR_HOST_RCB_OVERLAY_ID_13:
2503 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2504 break;
2505 case IPR_HOST_RCB_OVERLAY_ID_14:
2506 case IPR_HOST_RCB_OVERLAY_ID_16:
2507 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2508 break;
2509 case IPR_HOST_RCB_OVERLAY_ID_17:
2510 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2511 break;
49dc6a18
BK
2512 case IPR_HOST_RCB_OVERLAY_ID_20:
2513 ipr_log_fabric_error(ioa_cfg, hostrcb);
2514 break;
169b9ec8
WX
2515 case IPR_HOST_RCB_OVERLAY_ID_21:
2516 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2517 break;
4565e370
WB
2518 case IPR_HOST_RCB_OVERLAY_ID_23:
2519 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2520 break;
2521 case IPR_HOST_RCB_OVERLAY_ID_24:
2522 case IPR_HOST_RCB_OVERLAY_ID_26:
2523 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2524 break;
2525 case IPR_HOST_RCB_OVERLAY_ID_30:
2526 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2527 break;
cf852037 2528 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2529 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2530 default:
a9cfca96 2531 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2532 break;
2533 }
2534}
2535
2536/**
2537 * ipr_process_error - Op done function for an adapter error log.
2538 * @ipr_cmd: ipr command struct
2539 *
2540 * This function is the op done function for an error log host
2541 * controlled async from the adapter. It will log the error and
2542 * send the HCAM back to the adapter.
2543 *
2544 * Return value:
2545 * none
2546 **/
2547static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2548{
2549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2550 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2551 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2552 u32 fd_ioasc;
2553
2554 if (ioa_cfg->sis64)
2555 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2556 else
2557 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2558
2559 list_del(&hostrcb->queue);
05a6538a 2560 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2561
2562 if (!ioasc) {
2563 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2564 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2566 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2567 dev_err(&ioa_cfg->pdev->dev,
2568 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2569 }
2570
2571 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2572}
2573
2574/**
2575 * ipr_timeout - An internally generated op has timed out.
2576 * @ipr_cmd: ipr command struct
2577 *
2578 * This function blocks host requests and initiates an
2579 * adapter reset.
2580 *
2581 * Return value:
2582 * none
2583 **/
2584static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2585{
2586 unsigned long lock_flags = 0;
2587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588
2589 ENTER;
2590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2591
2592 ioa_cfg->errors_logged++;
2593 dev_err(&ioa_cfg->pdev->dev,
2594 "Adapter being reset due to command timeout.\n");
2595
2596 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2597 ioa_cfg->sdt_state = GET_DUMP;
2598
2599 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2600 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2601
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2603 LEAVE;
2604}
2605
2606/**
2607 * ipr_oper_timeout - Adapter timed out transitioning to operational
2608 * @ipr_cmd: ipr command struct
2609 *
2610 * This function blocks host requests and initiates an
2611 * adapter reset.
2612 *
2613 * Return value:
2614 * none
2615 **/
2616static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2617{
2618 unsigned long lock_flags = 0;
2619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2620
2621 ENTER;
2622 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2623
2624 ioa_cfg->errors_logged++;
2625 dev_err(&ioa_cfg->pdev->dev,
2626 "Adapter timed out transitioning to operational.\n");
2627
2628 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2629 ioa_cfg->sdt_state = GET_DUMP;
2630
2631 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2632 if (ipr_fastfail)
2633 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2634 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2635 }
2636
2637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2638 LEAVE;
2639}
2640
1da177e4
LT
2641/**
2642 * ipr_find_ses_entry - Find matching SES in SES table
2643 * @res: resource entry struct of SES
2644 *
2645 * Return value:
2646 * pointer to SES table entry / NULL on failure
2647 **/
2648static const struct ipr_ses_table_entry *
2649ipr_find_ses_entry(struct ipr_resource_entry *res)
2650{
2651 int i, j, matches;
3e7ebdfa 2652 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2653 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2654
2655 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2656 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2657 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2658 vpids = &res->std_inq_data.vpids;
2659 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2660 matches++;
2661 else
2662 break;
2663 } else
2664 matches++;
2665 }
2666
2667 if (matches == IPR_PROD_ID_LEN)
2668 return ste;
2669 }
2670
2671 return NULL;
2672}
2673
2674/**
2675 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2676 * @ioa_cfg: ioa config struct
2677 * @bus: SCSI bus
2678 * @bus_width: bus width
2679 *
2680 * Return value:
2681 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2682 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2683 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2684 * max 160MHz = max 320MB/sec).
2685 **/
2686static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2687{
2688 struct ipr_resource_entry *res;
2689 const struct ipr_ses_table_entry *ste;
2690 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2691
2692 /* Loop through each config table entry in the config table buffer */
2693 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2694 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2695 continue;
2696
3e7ebdfa 2697 if (bus != res->bus)
1da177e4
LT
2698 continue;
2699
2700 if (!(ste = ipr_find_ses_entry(res)))
2701 continue;
2702
2703 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2704 }
2705
2706 return max_xfer_rate;
2707}
2708
2709/**
2710 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2711 * @ioa_cfg: ioa config struct
2712 * @max_delay: max delay in micro-seconds to wait
2713 *
2714 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2715 *
2716 * Return value:
2717 * 0 on success / other on failure
2718 **/
2719static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2720{
2721 volatile u32 pcii_reg;
2722 int delay = 1;
2723
2724 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2725 while (delay < max_delay) {
2726 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2727
2728 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2729 return 0;
2730
2731 /* udelay cannot be used if delay is more than a few milliseconds */
2732 if ((delay / 1000) > MAX_UDELAY_MS)
2733 mdelay(delay / 1000);
2734 else
2735 udelay(delay);
2736
2737 delay += delay;
2738 }
2739 return -EIO;
2740}
2741
dcbad00e
WB
2742/**
2743 * ipr_get_sis64_dump_data_section - Dump IOA memory
2744 * @ioa_cfg: ioa config struct
2745 * @start_addr: adapter address to dump
2746 * @dest: destination kernel buffer
2747 * @length_in_words: length to dump in 4 byte words
2748 *
2749 * Return value:
2750 * 0 on success
2751 **/
2752static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2753 u32 start_addr,
2754 __be32 *dest, u32 length_in_words)
2755{
2756 int i;
2757
2758 for (i = 0; i < length_in_words; i++) {
2759 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2760 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2761 dest++;
2762 }
2763
2764 return 0;
2765}
2766
1da177e4
LT
2767/**
2768 * ipr_get_ldump_data_section - Dump IOA memory
2769 * @ioa_cfg: ioa config struct
2770 * @start_addr: adapter address to dump
2771 * @dest: destination kernel buffer
2772 * @length_in_words: length to dump in 4 byte words
2773 *
2774 * Return value:
2775 * 0 on success / -EIO on failure
2776 **/
2777static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2778 u32 start_addr,
2779 __be32 *dest, u32 length_in_words)
2780{
2781 volatile u32 temp_pcii_reg;
2782 int i, delay = 0;
2783
dcbad00e
WB
2784 if (ioa_cfg->sis64)
2785 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2786 dest, length_in_words);
2787
1da177e4
LT
2788 /* Write IOA interrupt reg starting LDUMP state */
2789 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2790 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2791
2792 /* Wait for IO debug acknowledge */
2793 if (ipr_wait_iodbg_ack(ioa_cfg,
2794 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2795 dev_err(&ioa_cfg->pdev->dev,
2796 "IOA dump long data transfer timeout\n");
2797 return -EIO;
2798 }
2799
2800 /* Signal LDUMP interlocked - clear IO debug ack */
2801 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2802 ioa_cfg->regs.clr_interrupt_reg);
2803
2804 /* Write Mailbox with starting address */
2805 writel(start_addr, ioa_cfg->ioa_mailbox);
2806
2807 /* Signal address valid - clear IOA Reset alert */
2808 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2809 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2810
2811 for (i = 0; i < length_in_words; i++) {
2812 /* Wait for IO debug acknowledge */
2813 if (ipr_wait_iodbg_ack(ioa_cfg,
2814 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2815 dev_err(&ioa_cfg->pdev->dev,
2816 "IOA dump short data transfer timeout\n");
2817 return -EIO;
2818 }
2819
2820 /* Read data from mailbox and increment destination pointer */
2821 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2822 dest++;
2823
2824 /* For all but the last word of data, signal data received */
2825 if (i < (length_in_words - 1)) {
2826 /* Signal dump data received - Clear IO debug Ack */
2827 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2828 ioa_cfg->regs.clr_interrupt_reg);
2829 }
2830 }
2831
2832 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2833 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2834 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2835
2836 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2837 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2838
2839 /* Signal dump data received - Clear IO debug Ack */
2840 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2841 ioa_cfg->regs.clr_interrupt_reg);
2842
2843 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2844 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2845 temp_pcii_reg =
214777ba 2846 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2847
2848 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2849 return 0;
2850
2851 udelay(10);
2852 delay += 10;
2853 }
2854
2855 return 0;
2856}
2857
2858#ifdef CONFIG_SCSI_IPR_DUMP
2859/**
2860 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2861 * @ioa_cfg: ioa config struct
2862 * @pci_address: adapter address
2863 * @length: length of data to copy
2864 *
2865 * Copy data from PCI adapter to kernel buffer.
2866 * Note: length MUST be a 4 byte multiple
2867 * Return value:
2868 * 0 on success / other on failure
2869 **/
2870static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2871 unsigned long pci_address, u32 length)
2872{
2873 int bytes_copied = 0;
4d4dd706 2874 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2875 __be32 *page;
2876 unsigned long lock_flags = 0;
2877 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2878
4d4dd706
KSS
2879 if (ioa_cfg->sis64)
2880 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2881 else
2882 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2883
1da177e4 2884 while (bytes_copied < length &&
4d4dd706 2885 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2886 if (ioa_dump->page_offset >= PAGE_SIZE ||
2887 ioa_dump->page_offset == 0) {
2888 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2889
2890 if (!page) {
2891 ipr_trace;
2892 return bytes_copied;
2893 }
2894
2895 ioa_dump->page_offset = 0;
2896 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2897 ioa_dump->next_page_index++;
2898 } else
2899 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2900
2901 rem_len = length - bytes_copied;
2902 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2903 cur_len = min(rem_len, rem_page_len);
2904
2905 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2907 rc = -EIO;
2908 } else {
2909 rc = ipr_get_ldump_data_section(ioa_cfg,
2910 pci_address + bytes_copied,
2911 &page[ioa_dump->page_offset / 4],
2912 (cur_len / sizeof(u32)));
2913 }
2914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2915
2916 if (!rc) {
2917 ioa_dump->page_offset += cur_len;
2918 bytes_copied += cur_len;
2919 } else {
2920 ipr_trace;
2921 break;
2922 }
2923 schedule();
2924 }
2925
2926 return bytes_copied;
2927}
2928
2929/**
2930 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2931 * @hdr: dump entry header struct
2932 *
2933 * Return value:
2934 * nothing
2935 **/
2936static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2937{
2938 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2939 hdr->num_elems = 1;
2940 hdr->offset = sizeof(*hdr);
2941 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2942}
2943
2944/**
2945 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2946 * @ioa_cfg: ioa config struct
2947 * @driver_dump: driver dump struct
2948 *
2949 * Return value:
2950 * nothing
2951 **/
2952static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2953 struct ipr_driver_dump *driver_dump)
2954{
2955 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2956
2957 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2958 driver_dump->ioa_type_entry.hdr.len =
2959 sizeof(struct ipr_dump_ioa_type_entry) -
2960 sizeof(struct ipr_dump_entry_header);
2961 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2962 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2963 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2964 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2965 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2966 ucode_vpd->minor_release[1];
2967 driver_dump->hdr.num_entries++;
2968}
2969
2970/**
2971 * ipr_dump_version_data - Fill in the driver version in the dump.
2972 * @ioa_cfg: ioa config struct
2973 * @driver_dump: driver dump struct
2974 *
2975 * Return value:
2976 * nothing
2977 **/
2978static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2979 struct ipr_driver_dump *driver_dump)
2980{
2981 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2982 driver_dump->version_entry.hdr.len =
2983 sizeof(struct ipr_dump_version_entry) -
2984 sizeof(struct ipr_dump_entry_header);
2985 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2986 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2987 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2988 driver_dump->hdr.num_entries++;
2989}
2990
2991/**
2992 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2993 * @ioa_cfg: ioa config struct
2994 * @driver_dump: driver dump struct
2995 *
2996 * Return value:
2997 * nothing
2998 **/
2999static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3000 struct ipr_driver_dump *driver_dump)
3001{
3002 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3003 driver_dump->trace_entry.hdr.len =
3004 sizeof(struct ipr_dump_trace_entry) -
3005 sizeof(struct ipr_dump_entry_header);
3006 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3007 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3008 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3009 driver_dump->hdr.num_entries++;
3010}
3011
3012/**
3013 * ipr_dump_location_data - Fill in the IOA location in the dump.
3014 * @ioa_cfg: ioa config struct
3015 * @driver_dump: driver dump struct
3016 *
3017 * Return value:
3018 * nothing
3019 **/
3020static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3021 struct ipr_driver_dump *driver_dump)
3022{
3023 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3024 driver_dump->location_entry.hdr.len =
3025 sizeof(struct ipr_dump_location_entry) -
3026 sizeof(struct ipr_dump_entry_header);
3027 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3028 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3029 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3030 driver_dump->hdr.num_entries++;
3031}
3032
3033/**
3034 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3035 * @ioa_cfg: ioa config struct
3036 * @dump: dump struct
3037 *
3038 * Return value:
3039 * nothing
3040 **/
3041static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3042{
3043 unsigned long start_addr, sdt_word;
3044 unsigned long lock_flags = 0;
3045 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3046 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3047 u32 num_entries, max_num_entries, start_off, end_off;
3048 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3049 struct ipr_sdt *sdt;
dcbad00e 3050 int valid = 1;
1da177e4
LT
3051 int i;
3052
3053 ENTER;
3054
3055 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3056
41e9a696 3057 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3059 return;
3060 }
3061
110def85
WB
3062 if (ioa_cfg->sis64) {
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064 ssleep(IPR_DUMP_DELAY_SECONDS);
3065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066 }
3067
1da177e4
LT
3068 start_addr = readl(ioa_cfg->ioa_mailbox);
3069
dcbad00e 3070 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3071 dev_err(&ioa_cfg->pdev->dev,
3072 "Invalid dump table format: %lx\n", start_addr);
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 return;
3075 }
3076
3077 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3078
3079 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3080
3081 /* Initialize the overall dump header */
3082 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3083 driver_dump->hdr.num_entries = 1;
3084 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3085 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3086 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3087 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3088
3089 ipr_dump_version_data(ioa_cfg, driver_dump);
3090 ipr_dump_location_data(ioa_cfg, driver_dump);
3091 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3092 ipr_dump_trace_data(ioa_cfg, driver_dump);
3093
3094 /* Update dump_header */
3095 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3096
3097 /* IOA Dump entry */
3098 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3099 ioa_dump->hdr.len = 0;
3100 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3101 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3102
3103 /* First entries in sdt are actually a list of dump addresses and
3104 lengths to gather the real dump data. sdt represents the pointer
3105 to the ioa generated dump table. Dump data will be extracted based
3106 on entries in this table */
3107 sdt = &ioa_dump->sdt;
3108
4d4dd706
KSS
3109 if (ioa_cfg->sis64) {
3110 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3111 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3112 } else {
3113 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3114 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3115 }
3116
3117 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3118 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3119 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3120 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3121
3122 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3123 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3124 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3125 dev_err(&ioa_cfg->pdev->dev,
3126 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3127 rc, be32_to_cpu(sdt->hdr.state));
3128 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3129 ioa_cfg->sdt_state = DUMP_OBTAINED;
3130 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3131 return;
3132 }
3133
3134 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3135
4d4dd706
KSS
3136 if (num_entries > max_num_entries)
3137 num_entries = max_num_entries;
3138
3139 /* Update dump length to the actual data to be copied */
3140 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3141 if (ioa_cfg->sis64)
3142 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3143 else
3144 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3145
3146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147
3148 for (i = 0; i < num_entries; i++) {
4d4dd706 3149 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3150 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3151 break;
3152 }
3153
3154 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3155 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3156 if (ioa_cfg->sis64)
3157 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3158 else {
3159 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3160 end_off = be32_to_cpu(sdt->entry[i].end_token);
3161
3162 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3163 bytes_to_copy = end_off - start_off;
3164 else
3165 valid = 0;
3166 }
3167 if (valid) {
4d4dd706 3168 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3169 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3170 continue;
3171 }
3172
3173 /* Copy data from adapter to driver buffers */
3174 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3175 bytes_to_copy);
3176
3177 ioa_dump->hdr.len += bytes_copied;
3178
3179 if (bytes_copied != bytes_to_copy) {
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3181 break;
3182 }
3183 }
3184 }
3185 }
3186
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3188
3189 /* Update dump_header */
3190 driver_dump->hdr.len += ioa_dump->hdr.len;
3191 wmb();
3192 ioa_cfg->sdt_state = DUMP_OBTAINED;
3193 LEAVE;
3194}
3195
3196#else
203fa3fe 3197#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3198#endif
3199
3200/**
3201 * ipr_release_dump - Free adapter dump memory
3202 * @kref: kref struct
3203 *
3204 * Return value:
3205 * nothing
3206 **/
3207static void ipr_release_dump(struct kref *kref)
3208{
203fa3fe 3209 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3210 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3211 unsigned long lock_flags = 0;
3212 int i;
3213
3214 ENTER;
3215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216 ioa_cfg->dump = NULL;
3217 ioa_cfg->sdt_state = INACTIVE;
3218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3219
3220 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3221 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3222
4d4dd706 3223 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3224 kfree(dump);
3225 LEAVE;
3226}
3227
3228/**
3229 * ipr_worker_thread - Worker thread
c4028958 3230 * @work: ioa config struct
1da177e4
LT
3231 *
3232 * Called at task level from a work thread. This function takes care
3233 * of adding and removing device from the mid-layer as configuration
3234 * changes are detected by the adapter.
3235 *
3236 * Return value:
3237 * nothing
3238 **/
c4028958 3239static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3240{
3241 unsigned long lock_flags;
3242 struct ipr_resource_entry *res;
3243 struct scsi_device *sdev;
3244 struct ipr_dump *dump;
c4028958
DH
3245 struct ipr_ioa_cfg *ioa_cfg =
3246 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3247 u8 bus, target, lun;
3248 int did_work;
3249
3250 ENTER;
3251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252
41e9a696 3253 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3254 dump = ioa_cfg->dump;
3255 if (!dump) {
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257 return;
3258 }
3259 kref_get(&dump->kref);
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261 ipr_get_ioa_dump(ioa_cfg, dump);
3262 kref_put(&dump->kref, ipr_release_dump);
3263
3264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3265 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3266 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268 return;
3269 }
3270
3271restart:
3272 do {
3273 did_work = 0;
f688f96d 3274 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276 return;
3277 }
3278
3279 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3280 if (res->del_from_ml && res->sdev) {
3281 did_work = 1;
3282 sdev = res->sdev;
3283 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3284 if (!res->add_to_ml)
3285 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3286 else
3287 res->del_from_ml = 0;
1da177e4
LT
3288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 scsi_remove_device(sdev);
3290 scsi_device_put(sdev);
3291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292 }
3293 break;
3294 }
3295 }
203fa3fe 3296 } while (did_work);
1da177e4
LT
3297
3298 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299 if (res->add_to_ml) {
3e7ebdfa
WB
3300 bus = res->bus;
3301 target = res->target;
3302 lun = res->lun;
1121b794 3303 res->add_to_ml = 0;
1da177e4
LT
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305 scsi_add_device(ioa_cfg->host, bus, target, lun);
3306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3307 goto restart;
3308 }
3309 }
3310
f688f96d 3311 ioa_cfg->scan_done = 1;
1da177e4 3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3313 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3314 LEAVE;
3315}
3316
3317#ifdef CONFIG_SCSI_IPR_TRACE
3318/**
3319 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3320 * @filp: open sysfs file
1da177e4 3321 * @kobj: kobject struct
91a69029 3322 * @bin_attr: bin_attribute struct
1da177e4
LT
3323 * @buf: buffer
3324 * @off: offset
3325 * @count: buffer size
3326 *
3327 * Return value:
3328 * number of bytes printed to buffer
3329 **/
2c3c8bea 3330static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3331 struct bin_attribute *bin_attr,
3332 char *buf, loff_t off, size_t count)
1da177e4 3333{
ee959b00
TJ
3334 struct device *dev = container_of(kobj, struct device, kobj);
3335 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3337 unsigned long lock_flags = 0;
d777aaf3 3338 ssize_t ret;
1da177e4
LT
3339
3340 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3341 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3342 IPR_TRACE_SIZE);
1da177e4 3343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3344
3345 return ret;
1da177e4
LT
3346}
3347
3348static struct bin_attribute ipr_trace_attr = {
3349 .attr = {
3350 .name = "trace",
3351 .mode = S_IRUGO,
3352 },
3353 .size = 0,
3354 .read = ipr_read_trace,
3355};
3356#endif
3357
3358/**
3359 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3360 * @dev: class device struct
3361 * @buf: buffer
1da177e4
LT
3362 *
3363 * Return value:
3364 * number of bytes printed to buffer
3365 **/
ee959b00
TJ
3366static ssize_t ipr_show_fw_version(struct device *dev,
3367 struct device_attribute *attr, char *buf)
1da177e4 3368{
ee959b00 3369 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3370 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3371 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3372 unsigned long lock_flags = 0;
3373 int len;
3374
3375 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3376 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3377 ucode_vpd->major_release, ucode_vpd->card_type,
3378 ucode_vpd->minor_release[0],
3379 ucode_vpd->minor_release[1]);
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381 return len;
3382}
3383
ee959b00 3384static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3385 .attr = {
3386 .name = "fw_version",
3387 .mode = S_IRUGO,
3388 },
3389 .show = ipr_show_fw_version,
3390};
3391
3392/**
3393 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3394 * @dev: class device struct
3395 * @buf: buffer
1da177e4
LT
3396 *
3397 * Return value:
3398 * number of bytes printed to buffer
3399 **/
ee959b00
TJ
3400static ssize_t ipr_show_log_level(struct device *dev,
3401 struct device_attribute *attr, char *buf)
1da177e4 3402{
ee959b00 3403 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags = 0;
3406 int len;
3407
3408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3409 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411 return len;
3412}
3413
3414/**
3415 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3416 * @dev: class device struct
3417 * @buf: buffer
1da177e4
LT
3418 *
3419 * Return value:
3420 * number of bytes printed to buffer
3421 **/
ee959b00 3422static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3423 struct device_attribute *attr,
1da177e4
LT
3424 const char *buf, size_t count)
3425{
ee959b00 3426 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3428 unsigned long lock_flags = 0;
3429
3430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433 return strlen(buf);
3434}
3435
ee959b00 3436static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3437 .attr = {
3438 .name = "log_level",
3439 .mode = S_IRUGO | S_IWUSR,
3440 },
3441 .show = ipr_show_log_level,
3442 .store = ipr_store_log_level
3443};
3444
3445/**
3446 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3447 * @dev: device struct
3448 * @buf: buffer
3449 * @count: buffer size
1da177e4
LT
3450 *
3451 * This function will reset the adapter and wait a reasonable
3452 * amount of time for any errors that the adapter might log.
3453 *
3454 * Return value:
3455 * count on success / other on failure
3456 **/
ee959b00
TJ
3457static ssize_t ipr_store_diagnostics(struct device *dev,
3458 struct device_attribute *attr,
1da177e4
LT
3459 const char *buf, size_t count)
3460{
ee959b00 3461 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3464 int rc = count;
3465
3466 if (!capable(CAP_SYS_ADMIN))
3467 return -EACCES;
3468
1da177e4 3469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3470 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474 }
3475
1da177e4
LT
3476 ioa_cfg->errors_logged = 0;
3477 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3478
3479 if (ioa_cfg->in_reset_reload) {
3480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3482
3483 /* Wait for a second for any errors to be logged */
3484 msleep(1000);
3485 } else {
3486 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3487 return -EIO;
3488 }
3489
3490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3491 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3492 rc = -EIO;
3493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3494
3495 return rc;
3496}
3497
ee959b00 3498static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3499 .attr = {
3500 .name = "run_diagnostics",
3501 .mode = S_IWUSR,
3502 },
3503 .store = ipr_store_diagnostics
3504};
3505
f37eb54b
BK
3506/**
3507 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3508 * @class_dev: device struct
3509 * @buf: buffer
f37eb54b
BK
3510 *
3511 * Return value:
3512 * number of bytes printed to buffer
3513 **/
ee959b00
TJ
3514static ssize_t ipr_show_adapter_state(struct device *dev,
3515 struct device_attribute *attr, char *buf)
f37eb54b 3516{
ee959b00 3517 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3518 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519 unsigned long lock_flags = 0;
3520 int len;
3521
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3523 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b
BK
3524 len = snprintf(buf, PAGE_SIZE, "offline\n");
3525 else
3526 len = snprintf(buf, PAGE_SIZE, "online\n");
3527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3528 return len;
3529}
3530
3531/**
3532 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3533 * @dev: device struct
3534 * @buf: buffer
3535 * @count: buffer size
f37eb54b
BK
3536 *
3537 * This function will change the adapter's state.
3538 *
3539 * Return value:
3540 * count on success / other on failure
3541 **/
ee959b00
TJ
3542static ssize_t ipr_store_adapter_state(struct device *dev,
3543 struct device_attribute *attr,
f37eb54b
BK
3544 const char *buf, size_t count)
3545{
ee959b00 3546 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3547 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3548 unsigned long lock_flags;
56d6aa33 3549 int result = count, i;
f37eb54b
BK
3550
3551 if (!capable(CAP_SYS_ADMIN))
3552 return -EACCES;
3553
3554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3555 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3556 !strncmp(buf, "online", 6)) {
3557 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3558 spin_lock(&ioa_cfg->hrrq[i]._lock);
3559 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3560 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3561 }
3562 wmb();
f37eb54b
BK
3563 ioa_cfg->reset_retries = 0;
3564 ioa_cfg->in_ioa_bringdown = 0;
3565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3566 }
3567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3568 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3569
3570 return result;
3571}
3572
ee959b00 3573static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3574 .attr = {
49dd0961 3575 .name = "online_state",
f37eb54b
BK
3576 .mode = S_IRUGO | S_IWUSR,
3577 },
3578 .show = ipr_show_adapter_state,
3579 .store = ipr_store_adapter_state
3580};
3581
1da177e4
LT
3582/**
3583 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3584 * @dev: device struct
3585 * @buf: buffer
3586 * @count: buffer size
1da177e4
LT
3587 *
3588 * This function will reset the adapter.
3589 *
3590 * Return value:
3591 * count on success / other on failure
3592 **/
ee959b00
TJ
3593static ssize_t ipr_store_reset_adapter(struct device *dev,
3594 struct device_attribute *attr,
1da177e4
LT
3595 const char *buf, size_t count)
3596{
ee959b00 3597 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3598 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3599 unsigned long lock_flags;
3600 int result = count;
3601
3602 if (!capable(CAP_SYS_ADMIN))
3603 return -EACCES;
3604
3605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3606 if (!ioa_cfg->in_reset_reload)
3607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3609 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3610
3611 return result;
3612}
3613
ee959b00 3614static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3615 .attr = {
3616 .name = "reset_host",
3617 .mode = S_IWUSR,
3618 },
3619 .store = ipr_store_reset_adapter
3620};
3621
b53d124a 3622static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3623 /**
3624 * ipr_show_iopoll_weight - Show ipr polling mode
3625 * @dev: class device struct
3626 * @buf: buffer
3627 *
3628 * Return value:
3629 * number of bytes printed to buffer
3630 **/
3631static ssize_t ipr_show_iopoll_weight(struct device *dev,
3632 struct device_attribute *attr, char *buf)
3633{
3634 struct Scsi_Host *shost = class_to_shost(dev);
3635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636 unsigned long lock_flags = 0;
3637 int len;
3638
3639 spin_lock_irqsave(shost->host_lock, lock_flags);
3640 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643 return len;
3644}
3645
3646/**
3647 * ipr_store_iopoll_weight - Change the adapter's polling mode
3648 * @dev: class device struct
3649 * @buf: buffer
3650 *
3651 * Return value:
3652 * number of bytes printed to buffer
3653 **/
3654static ssize_t ipr_store_iopoll_weight(struct device *dev,
3655 struct device_attribute *attr,
3656 const char *buf, size_t count)
3657{
3658 struct Scsi_Host *shost = class_to_shost(dev);
3659 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660 unsigned long user_iopoll_weight;
3661 unsigned long lock_flags = 0;
3662 int i;
3663
3664 if (!ioa_cfg->sis64) {
3665 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3666 return -EINVAL;
3667 }
3668 if (kstrtoul(buf, 10, &user_iopoll_weight))
3669 return -EINVAL;
3670
3671 if (user_iopoll_weight > 256) {
3672 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3673 return -EINVAL;
3674 }
3675
3676 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3677 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3678 return strlen(buf);
3679 }
3680
89f8b33c 3681 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3682 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3683 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3684 }
3685
3686 spin_lock_irqsave(shost->host_lock, lock_flags);
3687 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3688 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3689 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3690 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3691 ioa_cfg->iopoll_weight, ipr_iopoll);
3692 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3693 }
3694 }
3695 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3696
3697 return strlen(buf);
3698}
3699
3700static struct device_attribute ipr_iopoll_weight_attr = {
3701 .attr = {
3702 .name = "iopoll_weight",
3703 .mode = S_IRUGO | S_IWUSR,
3704 },
3705 .show = ipr_show_iopoll_weight,
3706 .store = ipr_store_iopoll_weight
3707};
3708
1da177e4
LT
3709/**
3710 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3711 * @buf_len: buffer length
3712 *
3713 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3714 * list to use for microcode download
3715 *
3716 * Return value:
3717 * pointer to sglist / NULL on failure
3718 **/
3719static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3720{
3721 int sg_size, order, bsize_elem, num_elem, i, j;
3722 struct ipr_sglist *sglist;
3723 struct scatterlist *scatterlist;
3724 struct page *page;
3725
3726 /* Get the minimum size per scatter/gather element */
3727 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3728
3729 /* Get the actual size per element */
3730 order = get_order(sg_size);
3731
3732 /* Determine the actual number of bytes per element */
3733 bsize_elem = PAGE_SIZE * (1 << order);
3734
3735 /* Determine the actual number of sg entries needed */
3736 if (buf_len % bsize_elem)
3737 num_elem = (buf_len / bsize_elem) + 1;
3738 else
3739 num_elem = buf_len / bsize_elem;
3740
3741 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3742 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3743 (sizeof(struct scatterlist) * (num_elem - 1)),
3744 GFP_KERNEL);
3745
3746 if (sglist == NULL) {
3747 ipr_trace;
3748 return NULL;
3749 }
3750
1da177e4 3751 scatterlist = sglist->scatterlist;
45711f1a 3752 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3753
3754 sglist->order = order;
3755 sglist->num_sg = num_elem;
3756
3757 /* Allocate a bunch of sg elements */
3758 for (i = 0; i < num_elem; i++) {
3759 page = alloc_pages(GFP_KERNEL, order);
3760 if (!page) {
3761 ipr_trace;
3762
3763 /* Free up what we already allocated */
3764 for (j = i - 1; j >= 0; j--)
45711f1a 3765 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3766 kfree(sglist);
3767 return NULL;
3768 }
3769
642f1490 3770 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3771 }
3772
3773 return sglist;
3774}
3775
3776/**
3777 * ipr_free_ucode_buffer - Frees a microcode download buffer
3778 * @p_dnld: scatter/gather list pointer
3779 *
3780 * Free a DMA'able ucode download buffer previously allocated with
3781 * ipr_alloc_ucode_buffer
3782 *
3783 * Return value:
3784 * nothing
3785 **/
3786static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3787{
3788 int i;
3789
3790 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3791 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3792
3793 kfree(sglist);
3794}
3795
3796/**
3797 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3798 * @sglist: scatter/gather list pointer
3799 * @buffer: buffer pointer
3800 * @len: buffer length
3801 *
3802 * Copy a microcode image from a user buffer into a buffer allocated by
3803 * ipr_alloc_ucode_buffer
3804 *
3805 * Return value:
3806 * 0 on success / other on failure
3807 **/
3808static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3809 u8 *buffer, u32 len)
3810{
3811 int bsize_elem, i, result = 0;
3812 struct scatterlist *scatterlist;
3813 void *kaddr;
3814
3815 /* Determine the actual number of bytes per element */
3816 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3817
3818 scatterlist = sglist->scatterlist;
3819
3820 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3821 struct page *page = sg_page(&scatterlist[i]);
3822
3823 kaddr = kmap(page);
1da177e4 3824 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3825 kunmap(page);
1da177e4
LT
3826
3827 scatterlist[i].length = bsize_elem;
3828
3829 if (result != 0) {
3830 ipr_trace;
3831 return result;
3832 }
3833 }
3834
3835 if (len % bsize_elem) {
45711f1a
JA
3836 struct page *page = sg_page(&scatterlist[i]);
3837
3838 kaddr = kmap(page);
1da177e4 3839 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3840 kunmap(page);
1da177e4
LT
3841
3842 scatterlist[i].length = len % bsize_elem;
3843 }
3844
3845 sglist->buffer_len = len;
3846 return result;
3847}
3848
a32c055f
WB
3849/**
3850 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3851 * @ipr_cmd: ipr command struct
3852 * @sglist: scatter/gather list
3853 *
3854 * Builds a microcode download IOA data list (IOADL).
3855 *
3856 **/
3857static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3858 struct ipr_sglist *sglist)
3859{
3860 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3861 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3862 struct scatterlist *scatterlist = sglist->scatterlist;
3863 int i;
3864
3865 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3866 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3867 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3868
3869 ioarcb->ioadl_len =
3870 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3871 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3872 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3873 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3874 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3875 }
3876
3877 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3878}
3879
1da177e4 3880/**
12baa420 3881 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3882 * @ipr_cmd: ipr command struct
3883 * @sglist: scatter/gather list
1da177e4 3884 *
12baa420 3885 * Builds a microcode download IOA data list (IOADL).
1da177e4 3886 *
1da177e4 3887 **/
12baa420
BK
3888static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3889 struct ipr_sglist *sglist)
1da177e4 3890{
1da177e4 3891 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3892 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3893 struct scatterlist *scatterlist = sglist->scatterlist;
3894 int i;
3895
12baa420 3896 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3898 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3899
3900 ioarcb->ioadl_len =
1da177e4
LT
3901 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3902
3903 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3904 ioadl[i].flags_and_data_len =
3905 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3906 ioadl[i].address =
3907 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3908 }
3909
12baa420
BK
3910 ioadl[i-1].flags_and_data_len |=
3911 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3912}
3913
3914/**
3915 * ipr_update_ioa_ucode - Update IOA's microcode
3916 * @ioa_cfg: ioa config struct
3917 * @sglist: scatter/gather list
3918 *
3919 * Initiate an adapter reset to update the IOA's microcode
3920 *
3921 * Return value:
3922 * 0 on success / -EIO on failure
3923 **/
3924static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3925 struct ipr_sglist *sglist)
3926{
3927 unsigned long lock_flags;
3928
3929 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3930 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3933 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3934 }
12baa420
BK
3935
3936 if (ioa_cfg->ucode_sglist) {
3937 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3938 dev_err(&ioa_cfg->pdev->dev,
3939 "Microcode download already in progress\n");
3940 return -EIO;
1da177e4 3941 }
12baa420 3942
d73341bf
AB
3943 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3944 sglist->scatterlist, sglist->num_sg,
3945 DMA_TO_DEVICE);
12baa420
BK
3946
3947 if (!sglist->num_dma_sg) {
3948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3949 dev_err(&ioa_cfg->pdev->dev,
3950 "Failed to map microcode download buffer!\n");
1da177e4
LT
3951 return -EIO;
3952 }
3953
12baa420
BK
3954 ioa_cfg->ucode_sglist = sglist;
3955 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3958
3959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3960 ioa_cfg->ucode_sglist = NULL;
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3962 return 0;
3963}
3964
3965/**
3966 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3967 * @class_dev: device struct
3968 * @buf: buffer
3969 * @count: buffer size
1da177e4
LT
3970 *
3971 * This function will update the firmware on the adapter.
3972 *
3973 * Return value:
3974 * count on success / other on failure
3975 **/
ee959b00
TJ
3976static ssize_t ipr_store_update_fw(struct device *dev,
3977 struct device_attribute *attr,
3978 const char *buf, size_t count)
1da177e4 3979{
ee959b00 3980 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3981 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3982 struct ipr_ucode_image_header *image_hdr;
3983 const struct firmware *fw_entry;
3984 struct ipr_sglist *sglist;
1da177e4
LT
3985 char fname[100];
3986 char *src;
3987 int len, result, dnld_size;
3988
3989 if (!capable(CAP_SYS_ADMIN))
3990 return -EACCES;
3991
3992 len = snprintf(fname, 99, "%s", buf);
3993 fname[len-1] = '\0';
3994
203fa3fe 3995 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3996 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3997 return -EIO;
3998 }
3999
4000 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4001
1da177e4
LT
4002 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4003 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4004 sglist = ipr_alloc_ucode_buffer(dnld_size);
4005
4006 if (!sglist) {
4007 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4008 release_firmware(fw_entry);
4009 return -ENOMEM;
4010 }
4011
4012 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4013
4014 if (result) {
4015 dev_err(&ioa_cfg->pdev->dev,
4016 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4017 goto out;
1da177e4
LT
4018 }
4019
14ed9cc7
WB
4020 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4021
12baa420 4022 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4023
12baa420
BK
4024 if (!result)
4025 result = count;
4026out:
1da177e4
LT
4027 ipr_free_ucode_buffer(sglist);
4028 release_firmware(fw_entry);
12baa420 4029 return result;
1da177e4
LT
4030}
4031
ee959b00 4032static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4033 .attr = {
4034 .name = "update_fw",
4035 .mode = S_IWUSR,
4036 },
4037 .store = ipr_store_update_fw
4038};
4039
75576bb9
WB
4040/**
4041 * ipr_show_fw_type - Show the adapter's firmware type.
4042 * @dev: class device struct
4043 * @buf: buffer
4044 *
4045 * Return value:
4046 * number of bytes printed to buffer
4047 **/
4048static ssize_t ipr_show_fw_type(struct device *dev,
4049 struct device_attribute *attr, char *buf)
4050{
4051 struct Scsi_Host *shost = class_to_shost(dev);
4052 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4053 unsigned long lock_flags = 0;
4054 int len;
4055
4056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4059 return len;
4060}
4061
4062static struct device_attribute ipr_ioa_fw_type_attr = {
4063 .attr = {
4064 .name = "fw_type",
4065 .mode = S_IRUGO,
4066 },
4067 .show = ipr_show_fw_type
4068};
4069
ee959b00 4070static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4071 &ipr_fw_version_attr,
4072 &ipr_log_level_attr,
4073 &ipr_diagnostics_attr,
f37eb54b 4074 &ipr_ioa_state_attr,
1da177e4
LT
4075 &ipr_ioa_reset_attr,
4076 &ipr_update_fw_attr,
75576bb9 4077 &ipr_ioa_fw_type_attr,
b53d124a 4078 &ipr_iopoll_weight_attr,
1da177e4
LT
4079 NULL,
4080};
4081
4082#ifdef CONFIG_SCSI_IPR_DUMP
4083/**
4084 * ipr_read_dump - Dump the adapter
2c3c8bea 4085 * @filp: open sysfs file
1da177e4 4086 * @kobj: kobject struct
91a69029 4087 * @bin_attr: bin_attribute struct
1da177e4
LT
4088 * @buf: buffer
4089 * @off: offset
4090 * @count: buffer size
4091 *
4092 * Return value:
4093 * number of bytes printed to buffer
4094 **/
2c3c8bea 4095static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4096 struct bin_attribute *bin_attr,
4097 char *buf, loff_t off, size_t count)
1da177e4 4098{
ee959b00 4099 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4100 struct Scsi_Host *shost = class_to_shost(cdev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4102 struct ipr_dump *dump;
4103 unsigned long lock_flags = 0;
4104 char *src;
4d4dd706 4105 int len, sdt_end;
1da177e4
LT
4106 size_t rc = count;
4107
4108 if (!capable(CAP_SYS_ADMIN))
4109 return -EACCES;
4110
4111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4112 dump = ioa_cfg->dump;
4113
4114 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4116 return 0;
4117 }
4118 kref_get(&dump->kref);
4119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4120
4121 if (off > dump->driver_dump.hdr.len) {
4122 kref_put(&dump->kref, ipr_release_dump);
4123 return 0;
4124 }
4125
4126 if (off + count > dump->driver_dump.hdr.len) {
4127 count = dump->driver_dump.hdr.len - off;
4128 rc = count;
4129 }
4130
4131 if (count && off < sizeof(dump->driver_dump)) {
4132 if (off + count > sizeof(dump->driver_dump))
4133 len = sizeof(dump->driver_dump) - off;
4134 else
4135 len = count;
4136 src = (u8 *)&dump->driver_dump + off;
4137 memcpy(buf, src, len);
4138 buf += len;
4139 off += len;
4140 count -= len;
4141 }
4142
4143 off -= sizeof(dump->driver_dump);
4144
4d4dd706
KSS
4145 if (ioa_cfg->sis64)
4146 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4147 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4148 sizeof(struct ipr_sdt_entry));
4149 else
4150 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4151 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4152
4153 if (count && off < sdt_end) {
4154 if (off + count > sdt_end)
4155 len = sdt_end - off;
1da177e4
LT
4156 else
4157 len = count;
4158 src = (u8 *)&dump->ioa_dump + off;
4159 memcpy(buf, src, len);
4160 buf += len;
4161 off += len;
4162 count -= len;
4163 }
4164
4d4dd706 4165 off -= sdt_end;
1da177e4
LT
4166
4167 while (count) {
4168 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4169 len = PAGE_ALIGN(off) - off;
4170 else
4171 len = count;
4172 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4173 src += off & ~PAGE_MASK;
4174 memcpy(buf, src, len);
4175 buf += len;
4176 off += len;
4177 count -= len;
4178 }
4179
4180 kref_put(&dump->kref, ipr_release_dump);
4181 return rc;
4182}
4183
4184/**
4185 * ipr_alloc_dump - Prepare for adapter dump
4186 * @ioa_cfg: ioa config struct
4187 *
4188 * Return value:
4189 * 0 on success / other on failure
4190 **/
4191static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4192{
4193 struct ipr_dump *dump;
4d4dd706 4194 __be32 **ioa_data;
1da177e4
LT
4195 unsigned long lock_flags = 0;
4196
0bc42e35 4197 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4198
4199 if (!dump) {
4200 ipr_err("Dump memory allocation failed\n");
4201 return -ENOMEM;
4202 }
4203
4d4dd706
KSS
4204 if (ioa_cfg->sis64)
4205 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4206 else
4207 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4208
4209 if (!ioa_data) {
4210 ipr_err("Dump memory allocation failed\n");
4211 kfree(dump);
4212 return -ENOMEM;
4213 }
4214
4215 dump->ioa_dump.ioa_data = ioa_data;
4216
1da177e4
LT
4217 kref_init(&dump->kref);
4218 dump->ioa_cfg = ioa_cfg;
4219
4220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221
4222 if (INACTIVE != ioa_cfg->sdt_state) {
4223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4224 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4225 kfree(dump);
4226 return 0;
4227 }
4228
4229 ioa_cfg->dump = dump;
4230 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4231 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4232 ioa_cfg->dump_taken = 1;
4233 schedule_work(&ioa_cfg->work_q);
4234 }
4235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236
1da177e4
LT
4237 return 0;
4238}
4239
4240/**
4241 * ipr_free_dump - Free adapter dump memory
4242 * @ioa_cfg: ioa config struct
4243 *
4244 * Return value:
4245 * 0 on success / other on failure
4246 **/
4247static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4248{
4249 struct ipr_dump *dump;
4250 unsigned long lock_flags = 0;
4251
4252 ENTER;
4253
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 dump = ioa_cfg->dump;
4256 if (!dump) {
4257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4258 return 0;
4259 }
4260
4261 ioa_cfg->dump = NULL;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263
4264 kref_put(&dump->kref, ipr_release_dump);
4265
4266 LEAVE;
4267 return 0;
4268}
4269
4270/**
4271 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4272 * @filp: open sysfs file
1da177e4 4273 * @kobj: kobject struct
91a69029 4274 * @bin_attr: bin_attribute struct
1da177e4
LT
4275 * @buf: buffer
4276 * @off: offset
4277 * @count: buffer size
4278 *
4279 * Return value:
4280 * number of bytes printed to buffer
4281 **/
2c3c8bea 4282static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4283 struct bin_attribute *bin_attr,
4284 char *buf, loff_t off, size_t count)
1da177e4 4285{
ee959b00 4286 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4287 struct Scsi_Host *shost = class_to_shost(cdev);
4288 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4289 int rc;
4290
4291 if (!capable(CAP_SYS_ADMIN))
4292 return -EACCES;
4293
4294 if (buf[0] == '1')
4295 rc = ipr_alloc_dump(ioa_cfg);
4296 else if (buf[0] == '0')
4297 rc = ipr_free_dump(ioa_cfg);
4298 else
4299 return -EINVAL;
4300
4301 if (rc)
4302 return rc;
4303 else
4304 return count;
4305}
4306
4307static struct bin_attribute ipr_dump_attr = {
4308 .attr = {
4309 .name = "dump",
4310 .mode = S_IRUSR | S_IWUSR,
4311 },
4312 .size = 0,
4313 .read = ipr_read_dump,
4314 .write = ipr_write_dump
4315};
4316#else
4317static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4318#endif
4319
4320/**
4321 * ipr_change_queue_depth - Change the device's queue depth
4322 * @sdev: scsi device struct
4323 * @qdepth: depth to set
e881a172 4324 * @reason: calling context
1da177e4
LT
4325 *
4326 * Return value:
4327 * actual depth set
4328 **/
db5ed4df 4329static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4330{
35a39691
BK
4331 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4332 struct ipr_resource_entry *res;
4333 unsigned long lock_flags = 0;
4334
4335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336 res = (struct ipr_resource_entry *)sdev->hostdata;
4337
4338 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4339 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4341
db5ed4df 4342 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4343 return sdev->queue_depth;
4344}
4345
1da177e4
LT
4346/**
4347 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4348 * @dev: device struct
46d74563 4349 * @attr: device attribute structure
1da177e4
LT
4350 * @buf: buffer
4351 *
4352 * Return value:
4353 * number of bytes printed to buffer
4354 **/
10523b3b 4355static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4356{
4357 struct scsi_device *sdev = to_scsi_device(dev);
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361 ssize_t len = -ENXIO;
4362
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 res = (struct ipr_resource_entry *)sdev->hostdata;
4365 if (res)
3e7ebdfa 4366 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4367 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4368 return len;
4369}
4370
4371static struct device_attribute ipr_adapter_handle_attr = {
4372 .attr = {
4373 .name = "adapter_handle",
4374 .mode = S_IRUSR,
4375 },
4376 .show = ipr_show_adapter_handle
4377};
4378
3e7ebdfa 4379/**
5adcbeb3
WB
4380 * ipr_show_resource_path - Show the resource path or the resource address for
4381 * this device.
3e7ebdfa 4382 * @dev: device struct
46d74563 4383 * @attr: device attribute structure
3e7ebdfa
WB
4384 * @buf: buffer
4385 *
4386 * Return value:
4387 * number of bytes printed to buffer
4388 **/
4389static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4390{
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4396 char buffer[IPR_MAX_RES_PATH_LENGTH];
4397
4398 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4399 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4400 if (res && ioa_cfg->sis64)
3e7ebdfa 4401 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4402 __ipr_format_res_path(res->res_path, buffer,
4403 sizeof(buffer)));
5adcbeb3
WB
4404 else if (res)
4405 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4406 res->bus, res->target, res->lun);
4407
3e7ebdfa
WB
4408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409 return len;
4410}
4411
4412static struct device_attribute ipr_resource_path_attr = {
4413 .attr = {
4414 .name = "resource_path",
75576bb9 4415 .mode = S_IRUGO,
3e7ebdfa
WB
4416 },
4417 .show = ipr_show_resource_path
4418};
4419
46d74563
WB
4420/**
4421 * ipr_show_device_id - Show the device_id for this device.
4422 * @dev: device struct
4423 * @attr: device attribute structure
4424 * @buf: buffer
4425 *
4426 * Return value:
4427 * number of bytes printed to buffer
4428 **/
4429static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4430{
4431 struct scsi_device *sdev = to_scsi_device(dev);
4432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4433 struct ipr_resource_entry *res;
4434 unsigned long lock_flags = 0;
4435 ssize_t len = -ENXIO;
4436
4437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4438 res = (struct ipr_resource_entry *)sdev->hostdata;
4439 if (res && ioa_cfg->sis64)
4440 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4441 else if (res)
4442 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4443
4444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4445 return len;
4446}
4447
4448static struct device_attribute ipr_device_id_attr = {
4449 .attr = {
4450 .name = "device_id",
4451 .mode = S_IRUGO,
4452 },
4453 .show = ipr_show_device_id
4454};
4455
75576bb9
WB
4456/**
4457 * ipr_show_resource_type - Show the resource type for this device.
4458 * @dev: device struct
46d74563 4459 * @attr: device attribute structure
75576bb9
WB
4460 * @buf: buffer
4461 *
4462 * Return value:
4463 * number of bytes printed to buffer
4464 **/
4465static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4466{
4467 struct scsi_device *sdev = to_scsi_device(dev);
4468 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4469 struct ipr_resource_entry *res;
4470 unsigned long lock_flags = 0;
4471 ssize_t len = -ENXIO;
4472
4473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4474 res = (struct ipr_resource_entry *)sdev->hostdata;
4475
4476 if (res)
4477 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4478
4479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480 return len;
4481}
4482
4483static struct device_attribute ipr_resource_type_attr = {
4484 .attr = {
4485 .name = "resource_type",
4486 .mode = S_IRUGO,
4487 },
4488 .show = ipr_show_resource_type
4489};
4490
1da177e4
LT
4491static struct device_attribute *ipr_dev_attrs[] = {
4492 &ipr_adapter_handle_attr,
3e7ebdfa 4493 &ipr_resource_path_attr,
46d74563 4494 &ipr_device_id_attr,
75576bb9 4495 &ipr_resource_type_attr,
1da177e4
LT
4496 NULL,
4497};
4498
4499/**
4500 * ipr_biosparam - Return the HSC mapping
4501 * @sdev: scsi device struct
4502 * @block_device: block device pointer
4503 * @capacity: capacity of the device
4504 * @parm: Array containing returned HSC values.
4505 *
4506 * This function generates the HSC parms that fdisk uses.
4507 * We want to make sure we return something that places partitions
4508 * on 4k boundaries for best performance with the IOA.
4509 *
4510 * Return value:
4511 * 0 on success
4512 **/
4513static int ipr_biosparam(struct scsi_device *sdev,
4514 struct block_device *block_device,
4515 sector_t capacity, int *parm)
4516{
4517 int heads, sectors;
4518 sector_t cylinders;
4519
4520 heads = 128;
4521 sectors = 32;
4522
4523 cylinders = capacity;
4524 sector_div(cylinders, (128 * 32));
4525
4526 /* return result */
4527 parm[0] = heads;
4528 parm[1] = sectors;
4529 parm[2] = cylinders;
4530
4531 return 0;
4532}
4533
35a39691
BK
4534/**
4535 * ipr_find_starget - Find target based on bus/target.
4536 * @starget: scsi target struct
4537 *
4538 * Return value:
4539 * resource entry pointer if found / NULL if not found
4540 **/
4541static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4542{
4543 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4545 struct ipr_resource_entry *res;
4546
4547 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4548 if ((res->bus == starget->channel) &&
0ee1d714 4549 (res->target == starget->id)) {
35a39691
BK
4550 return res;
4551 }
4552 }
4553
4554 return NULL;
4555}
4556
4557static struct ata_port_info sata_port_info;
4558
4559/**
4560 * ipr_target_alloc - Prepare for commands to a SCSI target
4561 * @starget: scsi target struct
4562 *
4563 * If the device is a SATA device, this function allocates an
4564 * ATA port with libata, else it does nothing.
4565 *
4566 * Return value:
4567 * 0 on success / non-0 on failure
4568 **/
4569static int ipr_target_alloc(struct scsi_target *starget)
4570{
4571 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4573 struct ipr_sata_port *sata_port;
4574 struct ata_port *ap;
4575 struct ipr_resource_entry *res;
4576 unsigned long lock_flags;
4577
4578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4579 res = ipr_find_starget(starget);
4580 starget->hostdata = NULL;
4581
4582 if (res && ipr_is_gata(res)) {
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4585 if (!sata_port)
4586 return -ENOMEM;
4587
4588 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4589 if (ap) {
4590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4591 sata_port->ioa_cfg = ioa_cfg;
4592 sata_port->ap = ap;
4593 sata_port->res = res;
4594
4595 res->sata_port = sata_port;
4596 ap->private_data = sata_port;
4597 starget->hostdata = sata_port;
4598 } else {
4599 kfree(sata_port);
4600 return -ENOMEM;
4601 }
4602 }
4603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604
4605 return 0;
4606}
4607
4608/**
4609 * ipr_target_destroy - Destroy a SCSI target
4610 * @starget: scsi target struct
4611 *
4612 * If the device was a SATA device, this function frees the libata
4613 * ATA port, else it does nothing.
4614 *
4615 **/
4616static void ipr_target_destroy(struct scsi_target *starget)
4617{
4618 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4619 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4620 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4621
4622 if (ioa_cfg->sis64) {
0ee1d714
BK
4623 if (!ipr_find_starget(starget)) {
4624 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4625 clear_bit(starget->id, ioa_cfg->array_ids);
4626 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4627 clear_bit(starget->id, ioa_cfg->vset_ids);
4628 else if (starget->channel == 0)
4629 clear_bit(starget->id, ioa_cfg->target_ids);
4630 }
3e7ebdfa 4631 }
35a39691
BK
4632
4633 if (sata_port) {
4634 starget->hostdata = NULL;
4635 ata_sas_port_destroy(sata_port->ap);
4636 kfree(sata_port);
4637 }
4638}
4639
4640/**
4641 * ipr_find_sdev - Find device based on bus/target/lun.
4642 * @sdev: scsi device struct
4643 *
4644 * Return value:
4645 * resource entry pointer if found / NULL if not found
4646 **/
4647static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4648{
4649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4650 struct ipr_resource_entry *res;
4651
4652 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4653 if ((res->bus == sdev->channel) &&
4654 (res->target == sdev->id) &&
4655 (res->lun == sdev->lun))
35a39691
BK
4656 return res;
4657 }
4658
4659 return NULL;
4660}
4661
1da177e4
LT
4662/**
4663 * ipr_slave_destroy - Unconfigure a SCSI device
4664 * @sdev: scsi device struct
4665 *
4666 * Return value:
4667 * nothing
4668 **/
4669static void ipr_slave_destroy(struct scsi_device *sdev)
4670{
4671 struct ipr_resource_entry *res;
4672 struct ipr_ioa_cfg *ioa_cfg;
4673 unsigned long lock_flags = 0;
4674
4675 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4676
4677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4678 res = (struct ipr_resource_entry *) sdev->hostdata;
4679 if (res) {
35a39691 4680 if (res->sata_port)
3e4ec344 4681 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4682 sdev->hostdata = NULL;
4683 res->sdev = NULL;
35a39691 4684 res->sata_port = NULL;
1da177e4
LT
4685 }
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4687}
4688
4689/**
4690 * ipr_slave_configure - Configure a SCSI device
4691 * @sdev: scsi device struct
4692 *
4693 * This function configures the specified scsi device.
4694 *
4695 * Return value:
4696 * 0 on success
4697 **/
4698static int ipr_slave_configure(struct scsi_device *sdev)
4699{
4700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4701 struct ipr_resource_entry *res;
dd406ef8 4702 struct ata_port *ap = NULL;
1da177e4 4703 unsigned long lock_flags = 0;
3e7ebdfa 4704 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4705
4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707 res = sdev->hostdata;
4708 if (res) {
4709 if (ipr_is_af_dasd_device(res))
4710 sdev->type = TYPE_RAID;
0726ce26 4711 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4712 sdev->scsi_level = 4;
0726ce26
BK
4713 sdev->no_uld_attach = 1;
4714 }
1da177e4 4715 if (ipr_is_vset_device(res)) {
60654e25 4716 sdev->scsi_level = SCSI_SPC_3;
242f9dcb
JA
4717 blk_queue_rq_timeout(sdev->request_queue,
4718 IPR_VSET_RW_TIMEOUT);
086fa5ff 4719 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4720 }
dd406ef8
BK
4721 if (ipr_is_gata(res) && res->sata_port)
4722 ap = res->sata_port->ap;
4723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4724
4725 if (ap) {
db5ed4df 4726 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4727 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4728 }
4729
3e7ebdfa
WB
4730 if (ioa_cfg->sis64)
4731 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4732 ipr_format_res_path(ioa_cfg,
4733 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4734 return 0;
1da177e4
LT
4735 }
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 return 0;
4738}
4739
35a39691
BK
4740/**
4741 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4742 * @sdev: scsi device struct
4743 *
4744 * This function initializes an ATA port so that future commands
4745 * sent through queuecommand will work.
4746 *
4747 * Return value:
4748 * 0 on success
4749 **/
4750static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4751{
4752 struct ipr_sata_port *sata_port = NULL;
4753 int rc = -ENXIO;
4754
4755 ENTER;
4756 if (sdev->sdev_target)
4757 sata_port = sdev->sdev_target->hostdata;
b2024459 4758 if (sata_port) {
35a39691 4759 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4760 if (rc == 0)
4761 rc = ata_sas_sync_probe(sata_port->ap);
4762 }
4763
35a39691
BK
4764 if (rc)
4765 ipr_slave_destroy(sdev);
4766
4767 LEAVE;
4768 return rc;
4769}
4770
1da177e4
LT
4771/**
4772 * ipr_slave_alloc - Prepare for commands to a device.
4773 * @sdev: scsi device struct
4774 *
4775 * This function saves a pointer to the resource entry
4776 * in the scsi device struct if the device exists. We
4777 * can then use this pointer in ipr_queuecommand when
4778 * handling new commands.
4779 *
4780 * Return value:
692aebfc 4781 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4782 **/
4783static int ipr_slave_alloc(struct scsi_device *sdev)
4784{
4785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4786 struct ipr_resource_entry *res;
4787 unsigned long lock_flags;
692aebfc 4788 int rc = -ENXIO;
1da177e4
LT
4789
4790 sdev->hostdata = NULL;
4791
4792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793
35a39691
BK
4794 res = ipr_find_sdev(sdev);
4795 if (res) {
4796 res->sdev = sdev;
4797 res->add_to_ml = 0;
4798 res->in_erp = 0;
4799 sdev->hostdata = res;
4800 if (!ipr_is_naca_model(res))
4801 res->needs_sync_complete = 1;
4802 rc = 0;
4803 if (ipr_is_gata(res)) {
4804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4806 }
4807 }
4808
4809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4810
692aebfc 4811 return rc;
1da177e4
LT
4812}
4813
70233ac5 4814static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
4815{
4816 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 4817 unsigned long lock_flags = 0;
4818 int rc = SUCCESS;
1da177e4
LT
4819
4820 ENTER;
70233ac5 4821 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 4823
96b04db9 4824 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 4825 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
4826 dev_err(&ioa_cfg->pdev->dev,
4827 "Adapter being reset as a result of error recovery.\n");
1da177e4 4828
a92fa25c
KSS
4829 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4830 ioa_cfg->sdt_state = GET_DUMP;
4831 }
1da177e4 4832
70233ac5 4833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 4836
70233ac5 4837 /* If we got hit with a host reset while we were already resetting
4838 the adapter for some reason, and the reset failed. */
4839 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4840 ipr_trace;
4841 rc = FAILED;
4842 }
df0ae249 4843
70233ac5 4844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4845 LEAVE;
df0ae249
JG
4846 return rc;
4847}
4848
c6513096
BK
4849/**
4850 * ipr_device_reset - Reset the device
4851 * @ioa_cfg: ioa config struct
4852 * @res: resource entry struct
4853 *
4854 * This function issues a device reset to the affected device.
4855 * If the device is a SCSI device, a LUN reset will be sent
4856 * to the device first. If that does not work, a target reset
35a39691
BK
4857 * will be sent. If the device is a SATA device, a PHY reset will
4858 * be sent.
c6513096
BK
4859 *
4860 * Return value:
4861 * 0 on success / non-zero on failure
4862 **/
4863static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4864 struct ipr_resource_entry *res)
4865{
4866 struct ipr_cmnd *ipr_cmd;
4867 struct ipr_ioarcb *ioarcb;
4868 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4869 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4870 u32 ioasc;
4871
4872 ENTER;
4873 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4874 ioarcb = &ipr_cmd->ioarcb;
4875 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4876
4877 if (ipr_cmd->ioa_cfg->sis64) {
4878 regs = &ipr_cmd->i.ata_ioadl.regs;
4879 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4880 } else
4881 regs = &ioarcb->u.add_data.u.regs;
c6513096 4882
3e7ebdfa 4883 ioarcb->res_handle = res->res_handle;
c6513096
BK
4884 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4885 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4886 if (ipr_is_gata(res)) {
4887 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4888 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4889 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4890 }
c6513096
BK
4891
4892 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4893 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 4894 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
4895 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4896 if (ipr_cmd->ioa_cfg->sis64)
4897 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4898 sizeof(struct ipr_ioasa_gata));
4899 else
4900 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4901 sizeof(struct ipr_ioasa_gata));
4902 }
c6513096
BK
4903
4904 LEAVE;
203fa3fe 4905 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4906}
4907
35a39691
BK
4908/**
4909 * ipr_sata_reset - Reset the SATA port
cc0680a5 4910 * @link: SATA link to reset
35a39691
BK
4911 * @classes: class of the attached device
4912 *
cc0680a5 4913 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4914 *
4915 * Return value:
4916 * 0 on success / non-zero on failure
4917 **/
cc0680a5 4918static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4919 unsigned long deadline)
35a39691 4920{
cc0680a5 4921 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4922 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4923 struct ipr_resource_entry *res;
4924 unsigned long lock_flags = 0;
4925 int rc = -ENXIO;
4926
4927 ENTER;
4928 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4929 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
4930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4931 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4932 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4933 }
4934
35a39691
BK
4935 res = sata_port->res;
4936 if (res) {
4937 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4938 *classes = res->ata_class;
35a39691
BK
4939 }
4940
4941 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4942 LEAVE;
4943 return rc;
4944}
4945
1da177e4
LT
4946/**
4947 * ipr_eh_dev_reset - Reset the device
4948 * @scsi_cmd: scsi command struct
4949 *
4950 * This function issues a device reset to the affected device.
4951 * A LUN reset will be sent to the device first. If that does
4952 * not work, a target reset will be sent.
4953 *
4954 * Return value:
4955 * SUCCESS / FAILED
4956 **/
203fa3fe 4957static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4958{
4959 struct ipr_cmnd *ipr_cmd;
4960 struct ipr_ioa_cfg *ioa_cfg;
4961 struct ipr_resource_entry *res;
35a39691
BK
4962 struct ata_port *ap;
4963 int rc = 0;
05a6538a 4964 struct ipr_hrr_queue *hrrq;
1da177e4
LT
4965
4966 ENTER;
4967 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4968 res = scsi_cmd->device->hostdata;
4969
eeb88307 4970 if (!res)
1da177e4
LT
4971 return FAILED;
4972
4973 /*
4974 * If we are currently going through reset/reload, return failed. This will force the
4975 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4976 * reset to complete
4977 */
4978 if (ioa_cfg->in_reset_reload)
4979 return FAILED;
56d6aa33 4980 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
4981 return FAILED;
4982
05a6538a 4983 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 4984 spin_lock(&hrrq->_lock);
05a6538a 4985 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4986 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4987 if (ipr_cmd->scsi_cmd)
4988 ipr_cmd->done = ipr_scsi_eh_done;
4989 if (ipr_cmd->qc)
4990 ipr_cmd->done = ipr_sata_eh_done;
4991 if (ipr_cmd->qc &&
4992 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4993 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4994 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4995 }
7402ecef 4996 }
1da177e4 4997 }
56d6aa33 4998 spin_unlock(&hrrq->_lock);
1da177e4 4999 }
1da177e4 5000 res->resetting_device = 1;
fb3ed3cb 5001 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5002
5003 if (ipr_is_gata(res) && res->sata_port) {
5004 ap = res->sata_port->ap;
5005 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5006 ata_std_error_handler(ap);
35a39691 5007 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26 5008
05a6538a 5009 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5010 spin_lock(&hrrq->_lock);
05a6538a 5011 list_for_each_entry(ipr_cmd,
5012 &hrrq->hrrq_pending_q, queue) {
5013 if (ipr_cmd->ioarcb.res_handle ==
5014 res->res_handle) {
5015 rc = -EIO;
5016 break;
5017 }
5af23d26 5018 }
56d6aa33 5019 spin_unlock(&hrrq->_lock);
5af23d26 5020 }
35a39691
BK
5021 } else
5022 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5023 res->resetting_device = 0;
0b1f8d44 5024 res->reset_occurred = 1;
1da177e4 5025
1da177e4 5026 LEAVE;
203fa3fe 5027 return rc ? FAILED : SUCCESS;
1da177e4
LT
5028}
5029
203fa3fe 5030static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5031{
5032 int rc;
5033
5034 spin_lock_irq(cmd->device->host->host_lock);
5035 rc = __ipr_eh_dev_reset(cmd);
5036 spin_unlock_irq(cmd->device->host->host_lock);
5037
5038 return rc;
5039}
5040
1da177e4
LT
5041/**
5042 * ipr_bus_reset_done - Op done function for bus reset.
5043 * @ipr_cmd: ipr command struct
5044 *
5045 * This function is the op done function for a bus reset
5046 *
5047 * Return value:
5048 * none
5049 **/
5050static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5051{
5052 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5053 struct ipr_resource_entry *res;
5054
5055 ENTER;
3e7ebdfa
WB
5056 if (!ioa_cfg->sis64)
5057 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5058 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5059 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5060 break;
5061 }
1da177e4 5062 }
1da177e4
LT
5063
5064 /*
5065 * If abort has not completed, indicate the reset has, else call the
5066 * abort's done function to wake the sleeping eh thread
5067 */
5068 if (ipr_cmd->sibling->sibling)
5069 ipr_cmd->sibling->sibling = NULL;
5070 else
5071 ipr_cmd->sibling->done(ipr_cmd->sibling);
5072
05a6538a 5073 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5074 LEAVE;
5075}
5076
5077/**
5078 * ipr_abort_timeout - An abort task has timed out
5079 * @ipr_cmd: ipr command struct
5080 *
5081 * This function handles when an abort task times out. If this
5082 * happens we issue a bus reset since we have resources tied
5083 * up that must be freed before returning to the midlayer.
5084 *
5085 * Return value:
5086 * none
5087 **/
5088static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5089{
5090 struct ipr_cmnd *reset_cmd;
5091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5092 struct ipr_cmd_pkt *cmd_pkt;
5093 unsigned long lock_flags = 0;
5094
5095 ENTER;
5096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5097 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5099 return;
5100 }
5101
fb3ed3cb 5102 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5103 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5104 ipr_cmd->sibling = reset_cmd;
5105 reset_cmd->sibling = ipr_cmd;
5106 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5107 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5108 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5109 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5110 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5111
5112 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5113 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5114 LEAVE;
5115}
5116
5117/**
5118 * ipr_cancel_op - Cancel specified op
5119 * @scsi_cmd: scsi command struct
5120 *
5121 * This function cancels specified op.
5122 *
5123 * Return value:
5124 * SUCCESS / FAILED
5125 **/
203fa3fe 5126static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5127{
5128 struct ipr_cmnd *ipr_cmd;
5129 struct ipr_ioa_cfg *ioa_cfg;
5130 struct ipr_resource_entry *res;
5131 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5132 u32 ioasc, int_reg;
1da177e4 5133 int op_found = 0;
05a6538a 5134 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5135
5136 ENTER;
5137 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5138 res = scsi_cmd->device->hostdata;
5139
8fa728a2
JG
5140 /* If we are currently going through reset/reload, return failed.
5141 * This will force the mid-layer to call ipr_eh_host_reset,
5142 * which will then go to sleep and wait for the reset to complete
5143 */
56d6aa33 5144 if (ioa_cfg->in_reset_reload ||
5145 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5146 return FAILED;
a92fa25c
KSS
5147 if (!res)
5148 return FAILED;
5149
5150 /*
5151 * If we are aborting a timed out op, chances are that the timeout was caused
5152 * by a still not detected EEH error. In such cases, reading a register will
5153 * trigger the EEH recovery infrastructure.
5154 */
5155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5156
5157 if (!ipr_is_gscsi(res))
1da177e4
LT
5158 return FAILED;
5159
05a6538a 5160 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5161 spin_lock(&hrrq->_lock);
05a6538a 5162 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5163 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5164 ipr_cmd->done = ipr_scsi_eh_done;
5165 op_found = 1;
5166 break;
5167 }
1da177e4 5168 }
56d6aa33 5169 spin_unlock(&hrrq->_lock);
1da177e4
LT
5170 }
5171
5172 if (!op_found)
5173 return SUCCESS;
5174
5175 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5176 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5177 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5178 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5179 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5180 ipr_cmd->u.sdev = scsi_cmd->device;
5181
fb3ed3cb
BK
5182 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5183 scsi_cmd->cmnd[0]);
1da177e4 5184 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5185 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5186
5187 /*
5188 * If the abort task timed out and we sent a bus reset, we will get
5189 * one the following responses to the abort
5190 */
5191 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5192 ioasc = 0;
5193 ipr_trace;
5194 }
5195
c4ee22a3 5196 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa
BK
5197 if (!ipr_is_naca_model(res))
5198 res->needs_sync_complete = 1;
1da177e4
LT
5199
5200 LEAVE;
203fa3fe 5201 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5202}
5203
5204/**
5205 * ipr_eh_abort - Abort a single op
5206 * @scsi_cmd: scsi command struct
5207 *
5208 * Return value:
f688f96d
BK
5209 * 0 if scan in progress / 1 if scan is complete
5210 **/
5211static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5212{
5213 unsigned long lock_flags;
5214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5215 int rc = 0;
5216
5217 spin_lock_irqsave(shost->host_lock, lock_flags);
5218 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5219 rc = 1;
5220 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5221 rc = 1;
5222 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5223 return rc;
5224}
5225
5226/**
5227 * ipr_eh_host_reset - Reset the host adapter
5228 * @scsi_cmd: scsi command struct
5229 *
5230 * Return value:
1da177e4
LT
5231 * SUCCESS / FAILED
5232 **/
203fa3fe 5233static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5234{
8fa728a2
JG
5235 unsigned long flags;
5236 int rc;
1da177e4
LT
5237
5238 ENTER;
1da177e4 5239
8fa728a2
JG
5240 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5241 rc = ipr_cancel_op(scsi_cmd);
5242 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
5243
5244 LEAVE;
8fa728a2 5245 return rc;
1da177e4
LT
5246}
5247
5248/**
5249 * ipr_handle_other_interrupt - Handle "other" interrupts
5250 * @ioa_cfg: ioa config struct
634651fa 5251 * @int_reg: interrupt register
1da177e4
LT
5252 *
5253 * Return value:
5254 * IRQ_NONE / IRQ_HANDLED
5255 **/
634651fa 5256static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5257 u32 int_reg)
1da177e4
LT
5258{
5259 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5260 u32 int_mask_reg;
56d6aa33 5261
7dacb64f
WB
5262 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5263 int_reg &= ~int_mask_reg;
5264
5265 /* If an interrupt on the adapter did not occur, ignore it.
5266 * Or in the case of SIS 64, check for a stage change interrupt.
5267 */
5268 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5269 if (ioa_cfg->sis64) {
5270 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5271 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5272 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5273
5274 /* clear stage change */
5275 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5276 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5277 list_del(&ioa_cfg->reset_cmd->queue);
5278 del_timer(&ioa_cfg->reset_cmd->timer);
5279 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5280 return IRQ_HANDLED;
5281 }
5282 }
5283
5284 return IRQ_NONE;
5285 }
1da177e4
LT
5286
5287 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5288 /* Mask the interrupt */
5289 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5290
5291 /* Clear the interrupt */
5292 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5293 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5294
5295 list_del(&ioa_cfg->reset_cmd->queue);
5296 del_timer(&ioa_cfg->reset_cmd->timer);
5297 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5298 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5299 if (ioa_cfg->clear_isr) {
5300 if (ipr_debug && printk_ratelimit())
5301 dev_err(&ioa_cfg->pdev->dev,
5302 "Spurious interrupt detected. 0x%08X\n", int_reg);
5303 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5304 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5305 return IRQ_NONE;
5306 }
1da177e4
LT
5307 } else {
5308 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5309 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5310 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5311 dev_err(&ioa_cfg->pdev->dev,
5312 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5313 else
5314 dev_err(&ioa_cfg->pdev->dev,
5315 "Permanent IOA failure. 0x%08X\n", int_reg);
5316
5317 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5318 ioa_cfg->sdt_state = GET_DUMP;
5319
5320 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5321 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5322 }
56d6aa33 5323
1da177e4
LT
5324 return rc;
5325}
5326
3feeb89d
WB
5327/**
5328 * ipr_isr_eh - Interrupt service routine error handler
5329 * @ioa_cfg: ioa config struct
5330 * @msg: message to log
5331 *
5332 * Return value:
5333 * none
5334 **/
05a6538a 5335static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5336{
5337 ioa_cfg->errors_logged++;
05a6538a 5338 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5339
5340 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5341 ioa_cfg->sdt_state = GET_DUMP;
5342
5343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5344}
5345
b53d124a 5346static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5347 struct list_head *doneq)
5348{
5349 u32 ioasc;
5350 u16 cmd_index;
5351 struct ipr_cmnd *ipr_cmd;
5352 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5353 int num_hrrq = 0;
5354
5355 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5356 if (!hrr_queue->allow_interrupts)
05a6538a 5357 return 0;
5358
5359 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5360 hrr_queue->toggle_bit) {
5361
5362 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5363 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5364 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5365
5366 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5367 cmd_index < hrr_queue->min_cmd_id)) {
5368 ipr_isr_eh(ioa_cfg,
5369 "Invalid response handle from IOA: ",
5370 cmd_index);
5371 break;
5372 }
5373
5374 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5375 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5376
5377 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5378
5379 list_move_tail(&ipr_cmd->queue, doneq);
5380
5381 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5382 hrr_queue->hrrq_curr++;
5383 } else {
5384 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5385 hrr_queue->toggle_bit ^= 1u;
5386 }
5387 num_hrrq++;
b53d124a 5388 if (budget > 0 && num_hrrq >= budget)
5389 break;
05a6538a 5390 }
b53d124a 5391
05a6538a 5392 return num_hrrq;
5393}
b53d124a 5394
5395static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5396{
5397 struct ipr_ioa_cfg *ioa_cfg;
5398 struct ipr_hrr_queue *hrrq;
5399 struct ipr_cmnd *ipr_cmd, *temp;
5400 unsigned long hrrq_flags;
5401 int completed_ops;
5402 LIST_HEAD(doneq);
5403
5404 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5405 ioa_cfg = hrrq->ioa_cfg;
5406
5407 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5408 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5409
5410 if (completed_ops < budget)
5411 blk_iopoll_complete(iop);
5412 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5413
5414 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5415 list_del(&ipr_cmd->queue);
5416 del_timer(&ipr_cmd->timer);
5417 ipr_cmd->fast_done(ipr_cmd);
5418 }
5419
5420 return completed_ops;
5421}
5422
1da177e4
LT
5423/**
5424 * ipr_isr - Interrupt service routine
5425 * @irq: irq number
5426 * @devp: pointer to ioa config struct
1da177e4
LT
5427 *
5428 * Return value:
5429 * IRQ_NONE / IRQ_HANDLED
5430 **/
7d12e780 5431static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5432{
05a6538a 5433 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5434 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5435 unsigned long hrrq_flags = 0;
7dacb64f 5436 u32 int_reg = 0;
3feeb89d 5437 int num_hrrq = 0;
7dacb64f 5438 int irq_none = 0;
172cd6e1 5439 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5440 irqreturn_t rc = IRQ_NONE;
172cd6e1 5441 LIST_HEAD(doneq);
1da177e4 5442
56d6aa33 5443 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5444 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5445 if (!hrrq->allow_interrupts) {
5446 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5447 return IRQ_NONE;
5448 }
5449
1da177e4 5450 while (1) {
b53d124a 5451 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5452 rc = IRQ_HANDLED;
1da177e4 5453
b53d124a 5454 if (!ioa_cfg->clear_isr)
5455 break;
7dd21308 5456
1da177e4 5457 /* Clear the PCI interrupt */
a5442ba4 5458 num_hrrq = 0;
3feeb89d 5459 do {
b53d124a 5460 writel(IPR_PCII_HRRQ_UPDATED,
5461 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5462 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5463 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5464 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5465
7dacb64f
WB
5466 } else if (rc == IRQ_NONE && irq_none == 0) {
5467 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5468 irq_none++;
a5442ba4
WB
5469 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5470 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5471 ipr_isr_eh(ioa_cfg,
5472 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5473 rc = IRQ_HANDLED;
b53d124a 5474 break;
1da177e4
LT
5475 } else
5476 break;
5477 }
5478
5479 if (unlikely(rc == IRQ_NONE))
634651fa 5480 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5481
56d6aa33 5482 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5483 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5484 list_del(&ipr_cmd->queue);
5485 del_timer(&ipr_cmd->timer);
5486 ipr_cmd->fast_done(ipr_cmd);
5487 }
05a6538a 5488 return rc;
5489}
5490
5491/**
5492 * ipr_isr_mhrrq - Interrupt service routine
5493 * @irq: irq number
5494 * @devp: pointer to ioa config struct
5495 *
5496 * Return value:
5497 * IRQ_NONE / IRQ_HANDLED
5498 **/
5499static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5500{
5501 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5502 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5503 unsigned long hrrq_flags = 0;
05a6538a 5504 struct ipr_cmnd *ipr_cmd, *temp;
5505 irqreturn_t rc = IRQ_NONE;
5506 LIST_HEAD(doneq);
172cd6e1 5507
56d6aa33 5508 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5509
5510 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5511 if (!hrrq->allow_interrupts) {
5512 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5513 return IRQ_NONE;
5514 }
5515
89f8b33c 5516 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5517 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5518 hrrq->toggle_bit) {
5519 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5520 blk_iopoll_sched(&hrrq->iopoll);
5521 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5522 return IRQ_HANDLED;
5523 }
5524 } else {
5525 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5526 hrrq->toggle_bit)
05a6538a 5527
b53d124a 5528 if (ipr_process_hrrq(hrrq, -1, &doneq))
5529 rc = IRQ_HANDLED;
5530 }
05a6538a 5531
56d6aa33 5532 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5533
5534 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5535 list_del(&ipr_cmd->queue);
5536 del_timer(&ipr_cmd->timer);
5537 ipr_cmd->fast_done(ipr_cmd);
5538 }
1da177e4
LT
5539 return rc;
5540}
5541
a32c055f
WB
5542/**
5543 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5544 * @ioa_cfg: ioa config struct
5545 * @ipr_cmd: ipr command struct
5546 *
5547 * Return value:
5548 * 0 on success / -1 on failure
5549 **/
5550static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5551 struct ipr_cmnd *ipr_cmd)
5552{
5553 int i, nseg;
5554 struct scatterlist *sg;
5555 u32 length;
5556 u32 ioadl_flags = 0;
5557 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5558 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5559 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5560
5561 length = scsi_bufflen(scsi_cmd);
5562 if (!length)
5563 return 0;
5564
5565 nseg = scsi_dma_map(scsi_cmd);
5566 if (nseg < 0) {
51f52a47 5567 if (printk_ratelimit())
d73341bf 5568 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5569 return -1;
5570 }
5571
5572 ipr_cmd->dma_use_sg = nseg;
5573
438b0331 5574 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5575 ioarcb->ioadl_len =
5576 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5577
a32c055f
WB
5578 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5579 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5580 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5581 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5582 ioadl_flags = IPR_IOADL_FLAGS_READ;
5583
5584 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5585 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5586 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5587 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5588 }
5589
5590 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5591 return 0;
5592}
5593
1da177e4
LT
5594/**
5595 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5596 * @ioa_cfg: ioa config struct
5597 * @ipr_cmd: ipr command struct
5598 *
5599 * Return value:
5600 * 0 on success / -1 on failure
5601 **/
5602static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5603 struct ipr_cmnd *ipr_cmd)
5604{
63015bc9
FT
5605 int i, nseg;
5606 struct scatterlist *sg;
1da177e4
LT
5607 u32 length;
5608 u32 ioadl_flags = 0;
5609 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5610 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5611 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5612
63015bc9
FT
5613 length = scsi_bufflen(scsi_cmd);
5614 if (!length)
1da177e4
LT
5615 return 0;
5616
63015bc9
FT
5617 nseg = scsi_dma_map(scsi_cmd);
5618 if (nseg < 0) {
d73341bf 5619 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5620 return -1;
5621 }
51b1c7e1 5622
63015bc9
FT
5623 ipr_cmd->dma_use_sg = nseg;
5624
5625 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5626 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5627 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5628 ioarcb->data_transfer_length = cpu_to_be32(length);
5629 ioarcb->ioadl_len =
63015bc9
FT
5630 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5631 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5632 ioadl_flags = IPR_IOADL_FLAGS_READ;
5633 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5634 ioarcb->read_ioadl_len =
5635 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5636 }
1da177e4 5637
a32c055f
WB
5638 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5639 ioadl = ioarcb->u.add_data.u.ioadl;
5640 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5641 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5642 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5643 }
1da177e4 5644
63015bc9
FT
5645 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5646 ioadl[i].flags_and_data_len =
5647 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5648 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5649 }
5650
63015bc9
FT
5651 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5652 return 0;
1da177e4
LT
5653}
5654
1da177e4
LT
5655/**
5656 * ipr_erp_done - Process completion of ERP for a device
5657 * @ipr_cmd: ipr command struct
5658 *
5659 * This function copies the sense buffer into the scsi_cmd
5660 * struct and pushes the scsi_done function.
5661 *
5662 * Return value:
5663 * nothing
5664 **/
5665static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5666{
5667 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5668 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5669 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5670
5671 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5672 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5673 scmd_printk(KERN_ERR, scsi_cmd,
5674 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5675 } else {
5676 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5677 SCSI_SENSE_BUFFERSIZE);
5678 }
5679
5680 if (res) {
ee0a90fa
BK
5681 if (!ipr_is_naca_model(res))
5682 res->needs_sync_complete = 1;
1da177e4
LT
5683 res->in_erp = 0;
5684 }
63015bc9 5685 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 5686 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5687 scsi_cmd->scsi_done(scsi_cmd);
5688}
5689
5690/**
5691 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5692 * @ipr_cmd: ipr command struct
5693 *
5694 * Return value:
5695 * none
5696 **/
5697static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5698{
51b1c7e1 5699 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5700 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5701 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5702
5703 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5704 ioarcb->data_transfer_length = 0;
1da177e4 5705 ioarcb->read_data_transfer_length = 0;
a32c055f 5706 ioarcb->ioadl_len = 0;
1da177e4 5707 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5708 ioasa->hdr.ioasc = 0;
5709 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5710
5711 if (ipr_cmd->ioa_cfg->sis64)
5712 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5713 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5714 else {
5715 ioarcb->write_ioadl_addr =
5716 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5717 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5718 }
1da177e4
LT
5719}
5720
5721/**
5722 * ipr_erp_request_sense - Send request sense to a device
5723 * @ipr_cmd: ipr command struct
5724 *
5725 * This function sends a request sense to a device as a result
5726 * of a check condition.
5727 *
5728 * Return value:
5729 * nothing
5730 **/
5731static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5732{
5733 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5734 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5735
5736 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5737 ipr_erp_done(ipr_cmd);
5738 return;
5739 }
5740
5741 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5742
5743 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5744 cmd_pkt->cdb[0] = REQUEST_SENSE;
5745 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5746 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5747 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5748 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5749
a32c055f
WB
5750 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5751 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5752
5753 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5754 IPR_REQUEST_SENSE_TIMEOUT * 2);
5755}
5756
5757/**
5758 * ipr_erp_cancel_all - Send cancel all to a device
5759 * @ipr_cmd: ipr command struct
5760 *
5761 * This function sends a cancel all to a device to clear the
5762 * queue. If we are running TCQ on the device, QERR is set to 1,
5763 * which means all outstanding ops have been dropped on the floor.
5764 * Cancel all will return them to us.
5765 *
5766 * Return value:
5767 * nothing
5768 **/
5769static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5770{
5771 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5772 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5773 struct ipr_cmd_pkt *cmd_pkt;
5774
5775 res->in_erp = 1;
5776
5777 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5778
17ea0126 5779 if (!scsi_cmd->device->simple_tags) {
1da177e4
LT
5780 ipr_erp_request_sense(ipr_cmd);
5781 return;
5782 }
5783
5784 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5785 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5786 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5787
5788 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5789 IPR_CANCEL_ALL_TIMEOUT);
5790}
5791
5792/**
5793 * ipr_dump_ioasa - Dump contents of IOASA
5794 * @ioa_cfg: ioa config struct
5795 * @ipr_cmd: ipr command struct
fe964d0a 5796 * @res: resource entry struct
1da177e4
LT
5797 *
5798 * This function is invoked by the interrupt handler when ops
5799 * fail. It will log the IOASA if appropriate. Only called
5800 * for GPDD ops.
5801 *
5802 * Return value:
5803 * none
5804 **/
5805static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5806 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5807{
5808 int i;
5809 u16 data_len;
b0692dd4 5810 u32 ioasc, fd_ioasc;
96d21f00 5811 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5812 __be32 *ioasa_data = (__be32 *)ioasa;
5813 int error_index;
5814
96d21f00
WB
5815 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5816 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5817
5818 if (0 == ioasc)
5819 return;
5820
5821 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5822 return;
5823
b0692dd4
BK
5824 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5825 error_index = ipr_get_error(fd_ioasc);
5826 else
5827 error_index = ipr_get_error(ioasc);
1da177e4
LT
5828
5829 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5830 /* Don't log an error if the IOA already logged one */
96d21f00 5831 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5832 return;
5833
cc9bd5d4
BK
5834 if (!ipr_is_gscsi(res))
5835 return;
5836
1da177e4
LT
5837 if (ipr_error_table[error_index].log_ioasa == 0)
5838 return;
5839 }
5840
fe964d0a 5841 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5842
96d21f00
WB
5843 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5844 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5845 data_len = sizeof(struct ipr_ioasa64);
5846 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5847 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5848
5849 ipr_err("IOASA Dump:\n");
5850
5851 for (i = 0; i < data_len / 4; i += 4) {
5852 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5853 be32_to_cpu(ioasa_data[i]),
5854 be32_to_cpu(ioasa_data[i+1]),
5855 be32_to_cpu(ioasa_data[i+2]),
5856 be32_to_cpu(ioasa_data[i+3]));
5857 }
5858}
5859
5860/**
5861 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5862 * @ioasa: IOASA
5863 * @sense_buf: sense data buffer
5864 *
5865 * Return value:
5866 * none
5867 **/
5868static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5869{
5870 u32 failing_lba;
5871 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5872 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5873 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5874 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5875
5876 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5877
5878 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5879 return;
5880
5881 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5882
5883 if (ipr_is_vset_device(res) &&
5884 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5885 ioasa->u.vset.failing_lba_hi != 0) {
5886 sense_buf[0] = 0x72;
5887 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5888 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5889 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5890
5891 sense_buf[7] = 12;
5892 sense_buf[8] = 0;
5893 sense_buf[9] = 0x0A;
5894 sense_buf[10] = 0x80;
5895
5896 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5897
5898 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5899 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5900 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5901 sense_buf[15] = failing_lba & 0x000000ff;
5902
5903 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5904
5905 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5906 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5907 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5908 sense_buf[19] = failing_lba & 0x000000ff;
5909 } else {
5910 sense_buf[0] = 0x70;
5911 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5912 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5913 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5914
5915 /* Illegal request */
5916 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5917 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5918 sense_buf[7] = 10; /* additional length */
5919
5920 /* IOARCB was in error */
5921 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5922 sense_buf[15] = 0xC0;
5923 else /* Parameter data was invalid */
5924 sense_buf[15] = 0x80;
5925
5926 sense_buf[16] =
5927 ((IPR_FIELD_POINTER_MASK &
96d21f00 5928 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5929 sense_buf[17] =
5930 (IPR_FIELD_POINTER_MASK &
96d21f00 5931 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5932 } else {
5933 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5934 if (ipr_is_vset_device(res))
5935 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5936 else
5937 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5938
5939 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5940 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5941 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5942 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5943 sense_buf[6] = failing_lba & 0x000000ff;
5944 }
5945
5946 sense_buf[7] = 6; /* additional length */
5947 }
5948 }
5949}
5950
ee0a90fa
BK
5951/**
5952 * ipr_get_autosense - Copy autosense data to sense buffer
5953 * @ipr_cmd: ipr command struct
5954 *
5955 * This function copies the autosense buffer to the buffer
5956 * in the scsi_cmd, if there is autosense available.
5957 *
5958 * Return value:
5959 * 1 if autosense was available / 0 if not
5960 **/
5961static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5962{
96d21f00
WB
5963 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5964 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5965
96d21f00 5966 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5967 return 0;
5968
96d21f00
WB
5969 if (ipr_cmd->ioa_cfg->sis64)
5970 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5971 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5972 SCSI_SENSE_BUFFERSIZE));
5973 else
5974 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5975 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5976 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5977 return 1;
5978}
5979
1da177e4
LT
5980/**
5981 * ipr_erp_start - Process an error response for a SCSI op
5982 * @ioa_cfg: ioa config struct
5983 * @ipr_cmd: ipr command struct
5984 *
5985 * This function determines whether or not to initiate ERP
5986 * on the affected device.
5987 *
5988 * Return value:
5989 * nothing
5990 **/
5991static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5992 struct ipr_cmnd *ipr_cmd)
5993{
5994 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5995 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5996 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5997 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5998
5999 if (!res) {
6000 ipr_scsi_eh_done(ipr_cmd);
6001 return;
6002 }
6003
8a048994 6004 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6005 ipr_gen_sense(ipr_cmd);
6006
cc9bd5d4
BK
6007 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6008
8a048994 6009 switch (masked_ioasc) {
1da177e4 6010 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
6011 if (ipr_is_naca_model(res))
6012 scsi_cmd->result |= (DID_ABORT << 16);
6013 else
6014 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6015 break;
6016 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6017 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6018 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6019 break;
6020 case IPR_IOASC_HW_SEL_TIMEOUT:
6021 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
6022 if (!ipr_is_naca_model(res))
6023 res->needs_sync_complete = 1;
1da177e4
LT
6024 break;
6025 case IPR_IOASC_SYNC_REQUIRED:
6026 if (!res->in_erp)
6027 res->needs_sync_complete = 1;
6028 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6029 break;
6030 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6031 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
6032 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6033 break;
6034 case IPR_IOASC_BUS_WAS_RESET:
6035 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6036 /*
6037 * Report the bus reset and ask for a retry. The device
6038 * will give CC/UA the next command.
6039 */
6040 if (!res->resetting_device)
6041 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6042 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
6043 if (!ipr_is_naca_model(res))
6044 res->needs_sync_complete = 1;
1da177e4
LT
6045 break;
6046 case IPR_IOASC_HW_DEV_BUS_STATUS:
6047 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6048 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
6049 if (!ipr_get_autosense(ipr_cmd)) {
6050 if (!ipr_is_naca_model(res)) {
6051 ipr_erp_cancel_all(ipr_cmd);
6052 return;
6053 }
6054 }
1da177e4 6055 }
ee0a90fa
BK
6056 if (!ipr_is_naca_model(res))
6057 res->needs_sync_complete = 1;
1da177e4
LT
6058 break;
6059 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6060 break;
6061 default:
5b7304fb
BK
6062 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6063 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6064 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6065 res->needs_sync_complete = 1;
6066 break;
6067 }
6068
63015bc9 6069 scsi_dma_unmap(ipr_cmd->scsi_cmd);
05a6538a 6070 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6071 scsi_cmd->scsi_done(scsi_cmd);
6072}
6073
6074/**
6075 * ipr_scsi_done - mid-layer done function
6076 * @ipr_cmd: ipr command struct
6077 *
6078 * This function is invoked by the interrupt handler for
6079 * ops generated by the SCSI mid-layer
6080 *
6081 * Return value:
6082 * none
6083 **/
6084static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6085{
6086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6087 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6088 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 6089 unsigned long hrrq_flags;
1da177e4 6090
96d21f00 6091 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6092
6093 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6094 scsi_dma_unmap(scsi_cmd);
6095
56d6aa33 6096 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
05a6538a 6097 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6098 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6099 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6100 } else {
56d6aa33 6101 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
1da177e4 6102 ipr_erp_start(ioa_cfg, ipr_cmd);
56d6aa33 6103 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
172cd6e1 6104 }
1da177e4
LT
6105}
6106
1da177e4
LT
6107/**
6108 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6109 * @shost: scsi host struct
1da177e4 6110 * @scsi_cmd: scsi command struct
1da177e4
LT
6111 *
6112 * This function queues a request generated by the mid-layer.
6113 *
6114 * Return value:
6115 * 0 on success
6116 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6117 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6118 **/
00bfef2c
BK
6119static int ipr_queuecommand(struct Scsi_Host *shost,
6120 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6121{
6122 struct ipr_ioa_cfg *ioa_cfg;
6123 struct ipr_resource_entry *res;
6124 struct ipr_ioarcb *ioarcb;
6125 struct ipr_cmnd *ipr_cmd;
56d6aa33 6126 unsigned long hrrq_flags, lock_flags;
d12f1576 6127 int rc;
05a6538a 6128 struct ipr_hrr_queue *hrrq;
6129 int hrrq_id;
1da177e4 6130
00bfef2c
BK
6131 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6132
1da177e4 6133 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6134 res = scsi_cmd->device->hostdata;
56d6aa33 6135
6136 if (ipr_is_gata(res) && res->sata_port) {
6137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6138 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6140 return rc;
6141 }
6142
05a6538a 6143 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6144 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6145
56d6aa33 6146 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6147 /*
6148 * We are currently blocking all devices due to a host reset
6149 * We have told the host to stop giving us new requests, but
6150 * ERP ops don't count. FIXME
6151 */
bfae7820 6152 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6153 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6154 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6155 }
1da177e4
LT
6156
6157 /*
6158 * FIXME - Create scsi_set_host_offline interface
6159 * and the ioa_is_dead check can be removed
6160 */
bfae7820 6161 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6162 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6163 goto err_nodev;
1da177e4
LT
6164 }
6165
05a6538a 6166 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6167 if (ipr_cmd == NULL) {
56d6aa33 6168 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6169 return SCSI_MLQUEUE_HOST_BUSY;
6170 }
56d6aa33 6171 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6172
172cd6e1 6173 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6174 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6175
6176 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6177 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6178 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
6179
6180 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6181 if (scsi_cmd->underflow == 0)
6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6183
1da177e4 6184 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
0b1f8d44
WX
6185 if (ipr_is_gscsi(res) && res->reset_occurred) {
6186 res->reset_occurred = 0;
ab6c10b1 6187 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6188 }
1da177e4 6189 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6190 if (scsi_cmd->flags & SCMD_TAGGED)
6191 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6192 else
6193 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6194 }
6195
6196 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6197 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6198 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6199 }
1da177e4 6200
d12f1576
DC
6201 if (ioa_cfg->sis64)
6202 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6203 else
6204 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6205
56d6aa33 6206 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6207 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6208 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6209 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6210 if (!rc)
6211 scsi_dma_unmap(scsi_cmd);
a5fb407e 6212 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6213 }
6214
56d6aa33 6215 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6216 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6217 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6218 scsi_dma_unmap(scsi_cmd);
6219 goto err_nodev;
6220 }
6221
6222 ioarcb->res_handle = res->res_handle;
6223 if (res->needs_sync_complete) {
6224 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6225 res->needs_sync_complete = 0;
6226 }
05a6538a 6227 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6228 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6229 ipr_send_command(ipr_cmd);
56d6aa33 6230 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6231 return 0;
1da177e4 6232
00bfef2c 6233err_nodev:
56d6aa33 6234 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6235 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6236 scsi_cmd->result = (DID_NO_CONNECT << 16);
6237 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6238 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6239 return 0;
6240}
f281233d 6241
35a39691
BK
6242/**
6243 * ipr_ioctl - IOCTL handler
6244 * @sdev: scsi device struct
6245 * @cmd: IOCTL cmd
6246 * @arg: IOCTL arg
6247 *
6248 * Return value:
6249 * 0 on success / other on failure
6250 **/
bd705f2d 6251static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6252{
6253 struct ipr_resource_entry *res;
6254
6255 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6256 if (res && ipr_is_gata(res)) {
6257 if (cmd == HDIO_GET_IDENTITY)
6258 return -ENOTTY;
94be9a58 6259 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6260 }
35a39691
BK
6261
6262 return -EINVAL;
6263}
6264
1da177e4
LT
6265/**
6266 * ipr_info - Get information about the card/driver
6267 * @scsi_host: scsi host struct
6268 *
6269 * Return value:
6270 * pointer to buffer with description string
6271 **/
203fa3fe 6272static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6273{
6274 static char buffer[512];
6275 struct ipr_ioa_cfg *ioa_cfg;
6276 unsigned long lock_flags = 0;
6277
6278 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6279
6280 spin_lock_irqsave(host->host_lock, lock_flags);
6281 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6282 spin_unlock_irqrestore(host->host_lock, lock_flags);
6283
6284 return buffer;
6285}
6286
6287static struct scsi_host_template driver_template = {
6288 .module = THIS_MODULE,
6289 .name = "IPR",
6290 .info = ipr_ioa_info,
35a39691 6291 .ioctl = ipr_ioctl,
1da177e4
LT
6292 .queuecommand = ipr_queuecommand,
6293 .eh_abort_handler = ipr_eh_abort,
6294 .eh_device_reset_handler = ipr_eh_dev_reset,
6295 .eh_host_reset_handler = ipr_eh_host_reset,
6296 .slave_alloc = ipr_slave_alloc,
6297 .slave_configure = ipr_slave_configure,
6298 .slave_destroy = ipr_slave_destroy,
f688f96d 6299 .scan_finished = ipr_scan_finished,
35a39691
BK
6300 .target_alloc = ipr_target_alloc,
6301 .target_destroy = ipr_target_destroy,
1da177e4 6302 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6303 .bios_param = ipr_biosparam,
6304 .can_queue = IPR_MAX_COMMANDS,
6305 .this_id = -1,
6306 .sg_tablesize = IPR_MAX_SGLIST,
6307 .max_sectors = IPR_IOA_MAX_SECTORS,
6308 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6309 .use_clustering = ENABLE_CLUSTERING,
6310 .shost_attrs = ipr_ioa_attrs,
6311 .sdev_attrs = ipr_dev_attrs,
54b2b50c
MP
6312 .proc_name = IPR_NAME,
6313 .no_write_same = 1,
2ecb204d 6314 .use_blk_tags = 1,
1da177e4
LT
6315};
6316
35a39691
BK
6317/**
6318 * ipr_ata_phy_reset - libata phy_reset handler
6319 * @ap: ata port to reset
6320 *
6321 **/
6322static void ipr_ata_phy_reset(struct ata_port *ap)
6323{
6324 unsigned long flags;
6325 struct ipr_sata_port *sata_port = ap->private_data;
6326 struct ipr_resource_entry *res = sata_port->res;
6327 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6328 int rc;
6329
6330 ENTER;
6331 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6332 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6334 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6335 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6336 }
6337
56d6aa33 6338 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6339 goto out_unlock;
6340
6341 rc = ipr_device_reset(ioa_cfg, res);
6342
6343 if (rc) {
3e4ec344 6344 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6345 goto out_unlock;
6346 }
6347
3e7ebdfa
WB
6348 ap->link.device[0].class = res->ata_class;
6349 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6350 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6351
6352out_unlock:
6353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6354 LEAVE;
6355}
6356
6357/**
6358 * ipr_ata_post_internal - Cleanup after an internal command
6359 * @qc: ATA queued command
6360 *
6361 * Return value:
6362 * none
6363 **/
6364static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6365{
6366 struct ipr_sata_port *sata_port = qc->ap->private_data;
6367 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6368 struct ipr_cmnd *ipr_cmd;
05a6538a 6369 struct ipr_hrr_queue *hrrq;
35a39691
BK
6370 unsigned long flags;
6371
6372 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6373 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6375 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6376 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6377 }
6378
05a6538a 6379 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6380 spin_lock(&hrrq->_lock);
05a6538a 6381 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6382 if (ipr_cmd->qc == qc) {
6383 ipr_device_reset(ioa_cfg, sata_port->res);
6384 break;
6385 }
35a39691 6386 }
56d6aa33 6387 spin_unlock(&hrrq->_lock);
35a39691
BK
6388 }
6389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6390}
6391
35a39691
BK
6392/**
6393 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6394 * @regs: destination
6395 * @tf: source ATA taskfile
6396 *
6397 * Return value:
6398 * none
6399 **/
6400static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6401 struct ata_taskfile *tf)
6402{
6403 regs->feature = tf->feature;
6404 regs->nsect = tf->nsect;
6405 regs->lbal = tf->lbal;
6406 regs->lbam = tf->lbam;
6407 regs->lbah = tf->lbah;
6408 regs->device = tf->device;
6409 regs->command = tf->command;
6410 regs->hob_feature = tf->hob_feature;
6411 regs->hob_nsect = tf->hob_nsect;
6412 regs->hob_lbal = tf->hob_lbal;
6413 regs->hob_lbam = tf->hob_lbam;
6414 regs->hob_lbah = tf->hob_lbah;
6415 regs->ctl = tf->ctl;
6416}
6417
6418/**
6419 * ipr_sata_done - done function for SATA commands
6420 * @ipr_cmd: ipr command struct
6421 *
6422 * This function is invoked by the interrupt handler for
6423 * ops generated by the SCSI mid-layer to SATA devices
6424 *
6425 * Return value:
6426 * none
6427 **/
6428static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6429{
6430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6431 struct ata_queued_cmd *qc = ipr_cmd->qc;
6432 struct ipr_sata_port *sata_port = qc->ap->private_data;
6433 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6434 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6435
56d6aa33 6436 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6437 if (ipr_cmd->ioa_cfg->sis64)
6438 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6439 sizeof(struct ipr_ioasa_gata));
6440 else
6441 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6442 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6443 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6444
96d21f00 6445 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6446 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6447
6448 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6449 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6450 else
96d21f00 6451 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6452 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6453 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6454 ata_qc_complete(qc);
6455}
6456
a32c055f
WB
6457/**
6458 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6459 * @ipr_cmd: ipr command struct
6460 * @qc: ATA queued command
6461 *
6462 **/
6463static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6464 struct ata_queued_cmd *qc)
6465{
6466 u32 ioadl_flags = 0;
6467 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6468 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6469 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6470 int len = qc->nbytes;
6471 struct scatterlist *sg;
6472 unsigned int si;
6473 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6474
6475 if (len == 0)
6476 return;
6477
6478 if (qc->dma_dir == DMA_TO_DEVICE) {
6479 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6480 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6481 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6482 ioadl_flags = IPR_IOADL_FLAGS_READ;
6483
6484 ioarcb->data_transfer_length = cpu_to_be32(len);
6485 ioarcb->ioadl_len =
6486 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6487 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6488 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6489
6490 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6491 ioadl64->flags = cpu_to_be32(ioadl_flags);
6492 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6493 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6494
6495 last_ioadl64 = ioadl64;
6496 ioadl64++;
6497 }
6498
6499 if (likely(last_ioadl64))
6500 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6501}
6502
35a39691
BK
6503/**
6504 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6505 * @ipr_cmd: ipr command struct
6506 * @qc: ATA queued command
6507 *
6508 **/
6509static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6510 struct ata_queued_cmd *qc)
6511{
6512 u32 ioadl_flags = 0;
6513 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6514 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6515 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6516 int len = qc->nbytes;
35a39691 6517 struct scatterlist *sg;
ff2aeb1e 6518 unsigned int si;
35a39691
BK
6519
6520 if (len == 0)
6521 return;
6522
6523 if (qc->dma_dir == DMA_TO_DEVICE) {
6524 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6525 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6526 ioarcb->data_transfer_length = cpu_to_be32(len);
6527 ioarcb->ioadl_len =
35a39691
BK
6528 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6529 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6530 ioadl_flags = IPR_IOADL_FLAGS_READ;
6531 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6532 ioarcb->read_ioadl_len =
6533 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6534 }
6535
ff2aeb1e 6536 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6537 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6538 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6539
6540 last_ioadl = ioadl;
6541 ioadl++;
35a39691 6542 }
3be6cbd7
JG
6543
6544 if (likely(last_ioadl))
6545 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6546}
6547
56d6aa33 6548/**
6549 * ipr_qc_defer - Get a free ipr_cmd
6550 * @qc: queued command
6551 *
6552 * Return value:
6553 * 0 if success
6554 **/
6555static int ipr_qc_defer(struct ata_queued_cmd *qc)
6556{
6557 struct ata_port *ap = qc->ap;
6558 struct ipr_sata_port *sata_port = ap->private_data;
6559 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6560 struct ipr_cmnd *ipr_cmd;
6561 struct ipr_hrr_queue *hrrq;
6562 int hrrq_id;
6563
6564 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6565 hrrq = &ioa_cfg->hrrq[hrrq_id];
6566
6567 qc->lldd_task = NULL;
6568 spin_lock(&hrrq->_lock);
6569 if (unlikely(hrrq->ioa_is_dead)) {
6570 spin_unlock(&hrrq->_lock);
6571 return 0;
6572 }
6573
6574 if (unlikely(!hrrq->allow_cmds)) {
6575 spin_unlock(&hrrq->_lock);
6576 return ATA_DEFER_LINK;
6577 }
6578
6579 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6580 if (ipr_cmd == NULL) {
6581 spin_unlock(&hrrq->_lock);
6582 return ATA_DEFER_LINK;
6583 }
6584
6585 qc->lldd_task = ipr_cmd;
6586 spin_unlock(&hrrq->_lock);
6587 return 0;
6588}
6589
35a39691
BK
6590/**
6591 * ipr_qc_issue - Issue a SATA qc to a device
6592 * @qc: queued command
6593 *
6594 * Return value:
6595 * 0 if success
6596 **/
6597static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6598{
6599 struct ata_port *ap = qc->ap;
6600 struct ipr_sata_port *sata_port = ap->private_data;
6601 struct ipr_resource_entry *res = sata_port->res;
6602 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6603 struct ipr_cmnd *ipr_cmd;
6604 struct ipr_ioarcb *ioarcb;
6605 struct ipr_ioarcb_ata_regs *regs;
6606
56d6aa33 6607 if (qc->lldd_task == NULL)
6608 ipr_qc_defer(qc);
6609
6610 ipr_cmd = qc->lldd_task;
6611 if (ipr_cmd == NULL)
0feeed82 6612 return AC_ERR_SYSTEM;
35a39691 6613
56d6aa33 6614 qc->lldd_task = NULL;
6615 spin_lock(&ipr_cmd->hrrq->_lock);
6616 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6617 ipr_cmd->hrrq->ioa_is_dead)) {
6618 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6619 spin_unlock(&ipr_cmd->hrrq->_lock);
6620 return AC_ERR_SYSTEM;
6621 }
6622
05a6538a 6623 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 6624 ioarcb = &ipr_cmd->ioarcb;
35a39691 6625
a32c055f
WB
6626 if (ioa_cfg->sis64) {
6627 regs = &ipr_cmd->i.ata_ioadl.regs;
6628 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6629 } else
6630 regs = &ioarcb->u.add_data.u.regs;
6631
6632 memset(regs, 0, sizeof(*regs));
6633 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 6634
56d6aa33 6635 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
6636 ipr_cmd->qc = qc;
6637 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6638 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6639 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6640 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6641 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6642 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6643
a32c055f
WB
6644 if (ioa_cfg->sis64)
6645 ipr_build_ata_ioadl64(ipr_cmd, qc);
6646 else
6647 ipr_build_ata_ioadl(ipr_cmd, qc);
6648
35a39691
BK
6649 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6650 ipr_copy_sata_tf(regs, &qc->tf);
6651 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6652 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6653
6654 switch (qc->tf.protocol) {
6655 case ATA_PROT_NODATA:
6656 case ATA_PROT_PIO:
6657 break;
6658
6659 case ATA_PROT_DMA:
6660 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6661 break;
6662
0dc36888
TH
6663 case ATAPI_PROT_PIO:
6664 case ATAPI_PROT_NODATA:
35a39691
BK
6665 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6666 break;
6667
0dc36888 6668 case ATAPI_PROT_DMA:
35a39691
BK
6669 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6670 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6671 break;
6672
6673 default:
6674 WARN_ON(1);
56d6aa33 6675 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 6676 return AC_ERR_INVALID;
35a39691
BK
6677 }
6678
a32c055f 6679 ipr_send_command(ipr_cmd);
56d6aa33 6680 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 6681
35a39691
BK
6682 return 0;
6683}
6684
4c9bf4e7
TH
6685/**
6686 * ipr_qc_fill_rtf - Read result TF
6687 * @qc: ATA queued command
6688 *
6689 * Return value:
6690 * true
6691 **/
6692static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6693{
6694 struct ipr_sata_port *sata_port = qc->ap->private_data;
6695 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6696 struct ata_taskfile *tf = &qc->result_tf;
6697
6698 tf->feature = g->error;
6699 tf->nsect = g->nsect;
6700 tf->lbal = g->lbal;
6701 tf->lbam = g->lbam;
6702 tf->lbah = g->lbah;
6703 tf->device = g->device;
6704 tf->command = g->status;
6705 tf->hob_nsect = g->hob_nsect;
6706 tf->hob_lbal = g->hob_lbal;
6707 tf->hob_lbam = g->hob_lbam;
6708 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
6709
6710 return true;
6711}
6712
35a39691 6713static struct ata_port_operations ipr_sata_ops = {
35a39691 6714 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6715 .hardreset = ipr_sata_reset,
35a39691 6716 .post_internal_cmd = ipr_ata_post_internal,
35a39691 6717 .qc_prep = ata_noop_qc_prep,
56d6aa33 6718 .qc_defer = ipr_qc_defer,
35a39691 6719 .qc_issue = ipr_qc_issue,
4c9bf4e7 6720 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6721 .port_start = ata_sas_port_start,
6722 .port_stop = ata_sas_port_stop
6723};
6724
6725static struct ata_port_info sata_port_info = {
9cbe056f 6726 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6727 .pio_mask = ATA_PIO4_ONLY,
6728 .mwdma_mask = ATA_MWDMA2,
6729 .udma_mask = ATA_UDMA6,
35a39691
BK
6730 .port_ops = &ipr_sata_ops
6731};
6732
1da177e4
LT
6733#ifdef CONFIG_PPC_PSERIES
6734static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6735 PVR_NORTHSTAR,
6736 PVR_PULSAR,
6737 PVR_POWER4,
6738 PVR_ICESTAR,
6739 PVR_SSTAR,
6740 PVR_POWER4p,
6741 PVR_630,
6742 PVR_630p
1da177e4
LT
6743};
6744
6745/**
6746 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6747 * @ioa_cfg: ioa cfg struct
6748 *
6749 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6750 * certain pSeries hardware. This function determines if the given
6751 * adapter is in one of these confgurations or not.
6752 *
6753 * Return value:
6754 * 1 if adapter is not supported / 0 if adapter is supported
6755 **/
6756static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6757{
1da177e4
LT
6758 int i;
6759
44c10138 6760 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6761 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6762 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6763 return 1;
1da177e4
LT
6764 }
6765 }
6766 return 0;
6767}
6768#else
6769#define ipr_invalid_adapter(ioa_cfg) 0
6770#endif
6771
6772/**
6773 * ipr_ioa_bringdown_done - IOA bring down completion.
6774 * @ipr_cmd: ipr command struct
6775 *
6776 * This function processes the completion of an adapter bring down.
6777 * It wakes any reset sleepers.
6778 *
6779 * Return value:
6780 * IPR_RC_JOB_RETURN
6781 **/
6782static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6783{
6784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 6785 int i;
1da177e4
LT
6786
6787 ENTER;
bfae7820
BK
6788 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6789 ipr_trace;
6790 spin_unlock_irq(ioa_cfg->host->host_lock);
6791 scsi_unblock_requests(ioa_cfg->host);
6792 spin_lock_irq(ioa_cfg->host->host_lock);
6793 }
6794
1da177e4
LT
6795 ioa_cfg->in_reset_reload = 0;
6796 ioa_cfg->reset_retries = 0;
96b04db9 6797 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6798 spin_lock(&ioa_cfg->hrrq[i]._lock);
6799 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6800 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6801 }
6802 wmb();
6803
05a6538a 6804 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 6805 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
6806 LEAVE;
6807
6808 return IPR_RC_JOB_RETURN;
6809}
6810
6811/**
6812 * ipr_ioa_reset_done - IOA reset completion.
6813 * @ipr_cmd: ipr command struct
6814 *
6815 * This function processes the completion of an adapter reset.
6816 * It schedules any necessary mid-layer add/removes and
6817 * wakes any reset sleepers.
6818 *
6819 * Return value:
6820 * IPR_RC_JOB_RETURN
6821 **/
6822static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6823{
6824 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6825 struct ipr_resource_entry *res;
6826 struct ipr_hostrcb *hostrcb, *temp;
56d6aa33 6827 int i = 0, j;
1da177e4
LT
6828
6829 ENTER;
6830 ioa_cfg->in_reset_reload = 0;
56d6aa33 6831 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6832 spin_lock(&ioa_cfg->hrrq[j]._lock);
6833 ioa_cfg->hrrq[j].allow_cmds = 1;
6834 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6835 }
6836 wmb();
1da177e4 6837 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6838 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6839
6840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 6841 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
6842 ipr_trace;
6843 break;
6844 }
6845 }
6846 schedule_work(&ioa_cfg->work_q);
6847
6848 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6849 list_del(&hostrcb->queue);
6850 if (i++ < IPR_NUM_LOG_HCAMS)
6851 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6852 else
6853 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6854 }
6855
6bb04170 6856 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6857 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6858
6859 ioa_cfg->reset_retries = 0;
05a6538a 6860 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6861 wake_up_all(&ioa_cfg->reset_wait_q);
6862
30237853 6863 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6864 scsi_unblock_requests(ioa_cfg->host);
30237853 6865 spin_lock(ioa_cfg->host->host_lock);
1da177e4 6866
56d6aa33 6867 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
1da177e4
LT
6868 scsi_block_requests(ioa_cfg->host);
6869
f688f96d 6870 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
6871 LEAVE;
6872 return IPR_RC_JOB_RETURN;
6873}
6874
6875/**
6876 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6877 * @supported_dev: supported device struct
6878 * @vpids: vendor product id struct
6879 *
6880 * Return value:
6881 * none
6882 **/
6883static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6884 struct ipr_std_inq_vpids *vpids)
6885{
6886 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6887 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6888 supported_dev->num_records = 1;
6889 supported_dev->data_length =
6890 cpu_to_be16(sizeof(struct ipr_supported_device));
6891 supported_dev->reserved = 0;
6892}
6893
6894/**
6895 * ipr_set_supported_devs - Send Set Supported Devices for a device
6896 * @ipr_cmd: ipr command struct
6897 *
a32c055f 6898 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6907 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6908 struct ipr_resource_entry *res = ipr_cmd->u.res;
6909
6910 ipr_cmd->job_step = ipr_ioa_reset_done;
6911
6912 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6913 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6914 continue;
6915
6916 ipr_cmd->u.res = res;
3e7ebdfa 6917 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6918
6919 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6920 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6921 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6922
6923 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6924 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6925 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6926 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6927
a32c055f
WB
6928 ipr_init_ioadl(ipr_cmd,
6929 ioa_cfg->vpd_cbs_dma +
6930 offsetof(struct ipr_misc_cbs, supp_dev),
6931 sizeof(struct ipr_supported_device),
6932 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6933
6934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6935 IPR_SET_SUP_DEVICE_TIMEOUT);
6936
3e7ebdfa
WB
6937 if (!ioa_cfg->sis64)
6938 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 6939 LEAVE;
1da177e4
LT
6940 return IPR_RC_JOB_RETURN;
6941 }
6942
05a6538a 6943 LEAVE;
1da177e4
LT
6944 return IPR_RC_JOB_CONTINUE;
6945}
6946
6947/**
6948 * ipr_get_mode_page - Locate specified mode page
6949 * @mode_pages: mode page buffer
6950 * @page_code: page code to find
6951 * @len: minimum required length for mode page
6952 *
6953 * Return value:
6954 * pointer to mode page / NULL on failure
6955 **/
6956static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6957 u32 page_code, u32 len)
6958{
6959 struct ipr_mode_page_hdr *mode_hdr;
6960 u32 page_length;
6961 u32 length;
6962
6963 if (!mode_pages || (mode_pages->hdr.length == 0))
6964 return NULL;
6965
6966 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6967 mode_hdr = (struct ipr_mode_page_hdr *)
6968 (mode_pages->data + mode_pages->hdr.block_desc_len);
6969
6970 while (length) {
6971 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6972 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6973 return mode_hdr;
6974 break;
6975 } else {
6976 page_length = (sizeof(struct ipr_mode_page_hdr) +
6977 mode_hdr->page_length);
6978 length -= page_length;
6979 mode_hdr = (struct ipr_mode_page_hdr *)
6980 ((unsigned long)mode_hdr + page_length);
6981 }
6982 }
6983 return NULL;
6984}
6985
6986/**
6987 * ipr_check_term_power - Check for term power errors
6988 * @ioa_cfg: ioa config struct
6989 * @mode_pages: IOAFP mode pages buffer
6990 *
6991 * Check the IOAFP's mode page 28 for term power errors
6992 *
6993 * Return value:
6994 * nothing
6995 **/
6996static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6997 struct ipr_mode_pages *mode_pages)
6998{
6999 int i;
7000 int entry_length;
7001 struct ipr_dev_bus_entry *bus;
7002 struct ipr_mode_page28 *mode_page;
7003
7004 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7005 sizeof(struct ipr_mode_page28));
7006
7007 entry_length = mode_page->entry_length;
7008
7009 bus = mode_page->bus;
7010
7011 for (i = 0; i < mode_page->num_entries; i++) {
7012 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7013 dev_err(&ioa_cfg->pdev->dev,
7014 "Term power is absent on scsi bus %d\n",
7015 bus->res_addr.bus);
7016 }
7017
7018 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7019 }
7020}
7021
7022/**
7023 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7024 * @ioa_cfg: ioa config struct
7025 *
7026 * Looks through the config table checking for SES devices. If
7027 * the SES device is in the SES table indicating a maximum SCSI
7028 * bus speed, the speed is limited for the bus.
7029 *
7030 * Return value:
7031 * none
7032 **/
7033static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7034{
7035 u32 max_xfer_rate;
7036 int i;
7037
7038 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7039 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7040 ioa_cfg->bus_attr[i].bus_width);
7041
7042 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7043 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7044 }
7045}
7046
7047/**
7048 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7049 * @ioa_cfg: ioa config struct
7050 * @mode_pages: mode page 28 buffer
7051 *
7052 * Updates mode page 28 based on driver configuration
7053 *
7054 * Return value:
7055 * none
7056 **/
7057static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7058 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7059{
7060 int i, entry_length;
7061 struct ipr_dev_bus_entry *bus;
7062 struct ipr_bus_attributes *bus_attr;
7063 struct ipr_mode_page28 *mode_page;
7064
7065 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7066 sizeof(struct ipr_mode_page28));
7067
7068 entry_length = mode_page->entry_length;
7069
7070 /* Loop for each device bus entry */
7071 for (i = 0, bus = mode_page->bus;
7072 i < mode_page->num_entries;
7073 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7074 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7075 dev_err(&ioa_cfg->pdev->dev,
7076 "Invalid resource address reported: 0x%08X\n",
7077 IPR_GET_PHYS_LOC(bus->res_addr));
7078 continue;
7079 }
7080
7081 bus_attr = &ioa_cfg->bus_attr[i];
7082 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7083 bus->bus_width = bus_attr->bus_width;
7084 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7085 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7086 if (bus_attr->qas_enabled)
7087 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7088 else
7089 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7090 }
7091}
7092
7093/**
7094 * ipr_build_mode_select - Build a mode select command
7095 * @ipr_cmd: ipr command struct
7096 * @res_handle: resource handle to send command to
7097 * @parm: Byte 2 of Mode Sense command
7098 * @dma_addr: DMA buffer address
7099 * @xfer_len: data transfer length
7100 *
7101 * Return value:
7102 * none
7103 **/
7104static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7105 __be32 res_handle, u8 parm,
7106 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7107{
1da177e4
LT
7108 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7109
7110 ioarcb->res_handle = res_handle;
7111 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7112 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7113 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7114 ioarcb->cmd_pkt.cdb[1] = parm;
7115 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7116
a32c055f 7117 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7118}
7119
7120/**
7121 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7122 * @ipr_cmd: ipr command struct
7123 *
7124 * This function sets up the SCSI bus attributes and sends
7125 * a Mode Select for Page 28 to activate them.
7126 *
7127 * Return value:
7128 * IPR_RC_JOB_RETURN
7129 **/
7130static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7131{
7132 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7133 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7134 int length;
7135
7136 ENTER;
4733804c
BK
7137 ipr_scsi_bus_speed_limit(ioa_cfg);
7138 ipr_check_term_power(ioa_cfg, mode_pages);
7139 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7140 length = mode_pages->hdr.length + 1;
7141 mode_pages->hdr.length = 0;
1da177e4
LT
7142
7143 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7145 length);
7146
f72919ec
WB
7147 ipr_cmd->job_step = ipr_set_supported_devs;
7148 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7149 struct ipr_resource_entry, queue);
1da177e4
LT
7150 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7151
7152 LEAVE;
7153 return IPR_RC_JOB_RETURN;
7154}
7155
7156/**
7157 * ipr_build_mode_sense - Builds a mode sense command
7158 * @ipr_cmd: ipr command struct
7159 * @res: resource entry struct
7160 * @parm: Byte 2 of mode sense command
7161 * @dma_addr: DMA address of mode sense buffer
7162 * @xfer_len: Size of DMA buffer
7163 *
7164 * Return value:
7165 * none
7166 **/
7167static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7168 __be32 res_handle,
a32c055f 7169 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7170{
1da177e4
LT
7171 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7172
7173 ioarcb->res_handle = res_handle;
7174 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7175 ioarcb->cmd_pkt.cdb[2] = parm;
7176 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7177 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7178
a32c055f 7179 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7180}
7181
dfed823e
BK
7182/**
7183 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7184 * @ipr_cmd: ipr command struct
7185 *
7186 * This function handles the failure of an IOA bringup command.
7187 *
7188 * Return value:
7189 * IPR_RC_JOB_RETURN
7190 **/
7191static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7192{
7193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7194 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7195
7196 dev_err(&ioa_cfg->pdev->dev,
7197 "0x%02X failed with IOASC: 0x%08X\n",
7198 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7199
7200 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7201 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e
BK
7202 return IPR_RC_JOB_RETURN;
7203}
7204
7205/**
7206 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7207 * @ipr_cmd: ipr command struct
7208 *
7209 * This function handles the failure of a Mode Sense to the IOAFP.
7210 * Some adapters do not handle all mode pages.
7211 *
7212 * Return value:
7213 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7214 **/
7215static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7216{
f72919ec 7217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7218 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
7219
7220 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7221 ipr_cmd->job_step = ipr_set_supported_devs;
7222 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7223 struct ipr_resource_entry, queue);
dfed823e
BK
7224 return IPR_RC_JOB_CONTINUE;
7225 }
7226
7227 return ipr_reset_cmd_failed(ipr_cmd);
7228}
7229
1da177e4
LT
7230/**
7231 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7232 * @ipr_cmd: ipr command struct
7233 *
7234 * This function send a Page 28 mode sense to the IOA to
7235 * retrieve SCSI bus attributes.
7236 *
7237 * Return value:
7238 * IPR_RC_JOB_RETURN
7239 **/
7240static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7241{
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243
7244 ENTER;
7245 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7246 0x28, ioa_cfg->vpd_cbs_dma +
7247 offsetof(struct ipr_misc_cbs, mode_pages),
7248 sizeof(struct ipr_mode_pages));
7249
7250 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7251 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7252
7253 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7254
7255 LEAVE;
7256 return IPR_RC_JOB_RETURN;
7257}
7258
ac09c349
BK
7259/**
7260 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7261 * @ipr_cmd: ipr command struct
7262 *
7263 * This function enables dual IOA RAID support if possible.
7264 *
7265 * Return value:
7266 * IPR_RC_JOB_RETURN
7267 **/
7268static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7269{
7270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7271 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7272 struct ipr_mode_page24 *mode_page;
7273 int length;
7274
7275 ENTER;
7276 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7277 sizeof(struct ipr_mode_page24));
7278
7279 if (mode_page)
7280 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7281
7282 length = mode_pages->hdr.length + 1;
7283 mode_pages->hdr.length = 0;
7284
7285 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7286 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7287 length);
7288
7289 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7290 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7291
7292 LEAVE;
7293 return IPR_RC_JOB_RETURN;
7294}
7295
7296/**
7297 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7298 * @ipr_cmd: ipr command struct
7299 *
7300 * This function handles the failure of a Mode Sense to the IOAFP.
7301 * Some adapters do not handle all mode pages.
7302 *
7303 * Return value:
7304 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7305 **/
7306static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7307{
96d21f00 7308 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7309
7310 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7311 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7312 return IPR_RC_JOB_CONTINUE;
7313 }
7314
7315 return ipr_reset_cmd_failed(ipr_cmd);
7316}
7317
7318/**
7319 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7320 * @ipr_cmd: ipr command struct
7321 *
7322 * This function send a mode sense to the IOA to retrieve
7323 * the IOA Advanced Function Control mode page.
7324 *
7325 * Return value:
7326 * IPR_RC_JOB_RETURN
7327 **/
7328static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7329{
7330 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7331
7332 ENTER;
7333 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7334 0x24, ioa_cfg->vpd_cbs_dma +
7335 offsetof(struct ipr_misc_cbs, mode_pages),
7336 sizeof(struct ipr_mode_pages));
7337
7338 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7339 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7340
7341 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7342
7343 LEAVE;
7344 return IPR_RC_JOB_RETURN;
7345}
7346
1da177e4
LT
7347/**
7348 * ipr_init_res_table - Initialize the resource table
7349 * @ipr_cmd: ipr command struct
7350 *
7351 * This function looks through the existing resource table, comparing
7352 * it with the config table. This function will take care of old/new
7353 * devices and schedule adding/removing them from the mid-layer
7354 * as appropriate.
7355 *
7356 * Return value:
7357 * IPR_RC_JOB_CONTINUE
7358 **/
7359static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7360{
7361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7362 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7363 struct ipr_config_table_entry_wrapper cfgtew;
7364 int entries, found, flag, i;
1da177e4
LT
7365 LIST_HEAD(old_res);
7366
7367 ENTER;
3e7ebdfa
WB
7368 if (ioa_cfg->sis64)
7369 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7370 else
7371 flag = ioa_cfg->u.cfg_table->hdr.flags;
7372
7373 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7374 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7375
7376 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7377 list_move_tail(&res->queue, &old_res);
7378
3e7ebdfa 7379 if (ioa_cfg->sis64)
438b0331 7380 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7381 else
7382 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7383
7384 for (i = 0; i < entries; i++) {
7385 if (ioa_cfg->sis64)
7386 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7387 else
7388 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7389 found = 0;
7390
7391 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7392 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7393 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7394 found = 1;
7395 break;
7396 }
7397 }
7398
7399 if (!found) {
7400 if (list_empty(&ioa_cfg->free_res_q)) {
7401 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7402 break;
7403 }
7404
7405 found = 1;
7406 res = list_entry(ioa_cfg->free_res_q.next,
7407 struct ipr_resource_entry, queue);
7408 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7409 ipr_init_res_entry(res, &cfgtew);
1da177e4 7410 res->add_to_ml = 1;
56115598
WB
7411 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7412 res->sdev->allow_restart = 1;
1da177e4
LT
7413
7414 if (found)
3e7ebdfa 7415 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7416 }
7417
7418 list_for_each_entry_safe(res, temp, &old_res, queue) {
7419 if (res->sdev) {
7420 res->del_from_ml = 1;
3e7ebdfa 7421 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7422 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7423 }
7424 }
7425
3e7ebdfa
WB
7426 list_for_each_entry_safe(res, temp, &old_res, queue) {
7427 ipr_clear_res_target(res);
7428 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7429 }
7430
ac09c349
BK
7431 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7432 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7433 else
7434 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7435
7436 LEAVE;
7437 return IPR_RC_JOB_CONTINUE;
7438}
7439
7440/**
7441 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7442 * @ipr_cmd: ipr command struct
7443 *
7444 * This function sends a Query IOA Configuration command
7445 * to the adapter to retrieve the IOA configuration table.
7446 *
7447 * Return value:
7448 * IPR_RC_JOB_RETURN
7449 **/
7450static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7451{
7452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7453 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7454 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7455 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7456
7457 ENTER;
ac09c349
BK
7458 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7459 ioa_cfg->dual_raid = 1;
1da177e4
LT
7460 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7461 ucode_vpd->major_release, ucode_vpd->card_type,
7462 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7463 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7464 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7465
7466 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7467 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7468 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7469 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7470
3e7ebdfa 7471 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7472 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7473
7474 ipr_cmd->job_step = ipr_init_res_table;
7475
7476 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7477
7478 LEAVE;
7479 return IPR_RC_JOB_RETURN;
7480}
7481
7482/**
7483 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7484 * @ipr_cmd: ipr command struct
7485 *
7486 * This utility function sends an inquiry to the adapter.
7487 *
7488 * Return value:
7489 * none
7490 **/
7491static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7492 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7493{
7494 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7495
7496 ENTER;
7497 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7498 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7499
7500 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7501 ioarcb->cmd_pkt.cdb[1] = flags;
7502 ioarcb->cmd_pkt.cdb[2] = page;
7503 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7504
a32c055f 7505 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7506
7507 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7508 LEAVE;
7509}
7510
62275040
BK
7511/**
7512 * ipr_inquiry_page_supported - Is the given inquiry page supported
7513 * @page0: inquiry page 0 buffer
7514 * @page: page code.
7515 *
7516 * This function determines if the specified inquiry page is supported.
7517 *
7518 * Return value:
7519 * 1 if page is supported / 0 if not
7520 **/
7521static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7522{
7523 int i;
7524
7525 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7526 if (page0->page[i] == page)
7527 return 1;
7528
7529 return 0;
7530}
7531
ac09c349
BK
7532/**
7533 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7534 * @ipr_cmd: ipr command struct
7535 *
7536 * This function sends a Page 0xD0 inquiry to the adapter
7537 * to retrieve adapter capabilities.
7538 *
7539 * Return value:
7540 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7541 **/
7542static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7543{
7544 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7545 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7546 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7547
7548 ENTER;
7549 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7550 memset(cap, 0, sizeof(*cap));
7551
7552 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7553 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7554 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7555 sizeof(struct ipr_inquiry_cap));
7556 return IPR_RC_JOB_RETURN;
7557 }
7558
7559 LEAVE;
7560 return IPR_RC_JOB_CONTINUE;
7561}
7562
1da177e4
LT
7563/**
7564 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7565 * @ipr_cmd: ipr command struct
7566 *
7567 * This function sends a Page 3 inquiry to the adapter
7568 * to retrieve software VPD information.
7569 *
7570 * Return value:
7571 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7572 **/
7573static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7574{
7575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7576
7577 ENTER;
7578
ac09c349 7579 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7580
7581 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7582 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7583 sizeof(struct ipr_inquiry_page3));
7584
7585 LEAVE;
7586 return IPR_RC_JOB_RETURN;
7587}
7588
7589/**
7590 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7591 * @ipr_cmd: ipr command struct
7592 *
7593 * This function sends a Page 0 inquiry to the adapter
7594 * to retrieve supported inquiry pages.
7595 *
7596 * Return value:
7597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598 **/
7599static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7600{
7601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7602 char type[5];
7603
7604 ENTER;
7605
7606 /* Grab the type out of the VPD and store it away */
7607 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7608 type[4] = '\0';
7609 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7610
f688f96d
BK
7611 if (ipr_invalid_adapter(ioa_cfg)) {
7612 dev_err(&ioa_cfg->pdev->dev,
7613 "Adapter not supported in this hardware configuration.\n");
7614
7615 if (!ipr_testmode) {
7616 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7618 list_add_tail(&ipr_cmd->queue,
7619 &ioa_cfg->hrrq->hrrq_free_q);
7620 return IPR_RC_JOB_RETURN;
7621 }
7622 }
7623
62275040 7624 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7625
62275040
BK
7626 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7627 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7628 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7629
7630 LEAVE;
7631 return IPR_RC_JOB_RETURN;
7632}
7633
7634/**
7635 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7636 * @ipr_cmd: ipr command struct
7637 *
7638 * This function sends a standard inquiry to the adapter.
7639 *
7640 * Return value:
7641 * IPR_RC_JOB_RETURN
7642 **/
7643static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7644{
7645 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7646
7647 ENTER;
62275040 7648 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7649
7650 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7651 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7652 sizeof(struct ipr_ioa_vpd));
7653
7654 LEAVE;
7655 return IPR_RC_JOB_RETURN;
7656}
7657
7658/**
214777ba 7659 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7660 * @ipr_cmd: ipr command struct
7661 *
7662 * This function send an Identify Host Request Response Queue
7663 * command to establish the HRRQ with the adapter.
7664 *
7665 * Return value:
7666 * IPR_RC_JOB_RETURN
7667 **/
214777ba 7668static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7669{
7670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 7672 struct ipr_hrr_queue *hrrq;
1da177e4
LT
7673
7674 ENTER;
05a6538a 7675 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
1da177e4
LT
7676 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7677
56d6aa33 7678 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7679 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 7680
05a6538a 7681 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7682 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 7683
05a6538a 7684 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7685 if (ioa_cfg->sis64)
7686 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 7687
05a6538a 7688 if (ioa_cfg->nvectors == 1)
7689 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7690 else
7691 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7692
7693 ioarcb->cmd_pkt.cdb[2] =
7694 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7695 ioarcb->cmd_pkt.cdb[3] =
7696 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7697 ioarcb->cmd_pkt.cdb[4] =
7698 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7699 ioarcb->cmd_pkt.cdb[5] =
7700 ((u64) hrrq->host_rrq_dma) & 0xff;
7701 ioarcb->cmd_pkt.cdb[7] =
7702 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7703 ioarcb->cmd_pkt.cdb[8] =
7704 (sizeof(u32) * hrrq->size) & 0xff;
7705
7706 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7707 ioarcb->cmd_pkt.cdb[9] =
7708 ioa_cfg->identify_hrrq_index;
1da177e4 7709
05a6538a 7710 if (ioa_cfg->sis64) {
7711 ioarcb->cmd_pkt.cdb[10] =
7712 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7713 ioarcb->cmd_pkt.cdb[11] =
7714 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7715 ioarcb->cmd_pkt.cdb[12] =
7716 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7717 ioarcb->cmd_pkt.cdb[13] =
7718 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7719 }
7720
7721 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 7722 ioarcb->cmd_pkt.cdb[14] =
7723 ioa_cfg->identify_hrrq_index;
05a6538a 7724
7725 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7726 IPR_INTERNAL_TIMEOUT);
7727
56d6aa33 7728 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7729 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 7730
7731 LEAVE;
7732 return IPR_RC_JOB_RETURN;
05a6538a 7733 }
7734
1da177e4 7735 LEAVE;
05a6538a 7736 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
7737}
7738
7739/**
7740 * ipr_reset_timer_done - Adapter reset timer function
7741 * @ipr_cmd: ipr command struct
7742 *
7743 * Description: This function is used in adapter reset processing
7744 * for timing events. If the reset_cmd pointer in the IOA
7745 * config struct is not this adapter's we are doing nested
7746 * resets and fail_all_ops will take care of freeing the
7747 * command block.
7748 *
7749 * Return value:
7750 * none
7751 **/
7752static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7753{
7754 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7755 unsigned long lock_flags = 0;
7756
7757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7758
7759 if (ioa_cfg->reset_cmd == ipr_cmd) {
7760 list_del(&ipr_cmd->queue);
7761 ipr_cmd->done(ipr_cmd);
7762 }
7763
7764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7765}
7766
7767/**
7768 * ipr_reset_start_timer - Start a timer for adapter reset job
7769 * @ipr_cmd: ipr command struct
7770 * @timeout: timeout value
7771 *
7772 * Description: This function is used in adapter reset processing
7773 * for timing events. If the reset_cmd pointer in the IOA
7774 * config struct is not this adapter's we are doing nested
7775 * resets and fail_all_ops will take care of freeing the
7776 * command block.
7777 *
7778 * Return value:
7779 * none
7780 **/
7781static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7782 unsigned long timeout)
7783{
05a6538a 7784
7785 ENTER;
7786 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7787 ipr_cmd->done = ipr_reset_ioa_job;
7788
7789 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7790 ipr_cmd->timer.expires = jiffies + timeout;
7791 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7792 add_timer(&ipr_cmd->timer);
7793}
7794
7795/**
7796 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7797 * @ioa_cfg: ioa cfg struct
7798 *
7799 * Return value:
7800 * nothing
7801 **/
7802static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7803{
05a6538a 7804 struct ipr_hrr_queue *hrrq;
1da177e4 7805
05a6538a 7806 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 7807 spin_lock(&hrrq->_lock);
05a6538a 7808 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7809
7810 /* Initialize Host RRQ pointers */
7811 hrrq->hrrq_start = hrrq->host_rrq;
7812 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7813 hrrq->hrrq_curr = hrrq->hrrq_start;
7814 hrrq->toggle_bit = 1;
56d6aa33 7815 spin_unlock(&hrrq->_lock);
05a6538a 7816 }
56d6aa33 7817 wmb();
05a6538a 7818
56d6aa33 7819 ioa_cfg->identify_hrrq_index = 0;
7820 if (ioa_cfg->hrrq_num == 1)
7821 atomic_set(&ioa_cfg->hrrq_index, 0);
7822 else
7823 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
7824
7825 /* Zero out config table */
3e7ebdfa 7826 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7827}
7828
214777ba
WB
7829/**
7830 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7831 * @ipr_cmd: ipr command struct
7832 *
7833 * Return value:
7834 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7835 **/
7836static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7837{
7838 unsigned long stage, stage_time;
7839 u32 feedback;
7840 volatile u32 int_reg;
7841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7842 u64 maskval = 0;
7843
7844 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7845 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7846 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7847
7848 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7849
7850 /* sanity check the stage_time value */
438b0331
WB
7851 if (stage_time == 0)
7852 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7853 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7854 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7855 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7856 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7857
7858 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7859 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7860 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7861 stage_time = ioa_cfg->transop_timeout;
7862 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7863 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7864 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7865 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7866 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7867 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7868 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7869 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7870 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7871 return IPR_RC_JOB_CONTINUE;
7872 }
214777ba
WB
7873 }
7874
7875 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7876 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7877 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7878 ipr_cmd->done = ipr_reset_ioa_job;
7879 add_timer(&ipr_cmd->timer);
05a6538a 7880
7881 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
7882
7883 return IPR_RC_JOB_RETURN;
7884}
7885
1da177e4
LT
7886/**
7887 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7888 * @ipr_cmd: ipr command struct
7889 *
7890 * This function reinitializes some control blocks and
7891 * enables destructive diagnostics on the adapter.
7892 *
7893 * Return value:
7894 * IPR_RC_JOB_RETURN
7895 **/
7896static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7897{
7898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7899 volatile u32 int_reg;
7be96900 7900 volatile u64 maskval;
56d6aa33 7901 int i;
1da177e4
LT
7902
7903 ENTER;
214777ba 7904 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7905 ipr_init_ioa_mem(ioa_cfg);
7906
56d6aa33 7907 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7908 spin_lock(&ioa_cfg->hrrq[i]._lock);
7909 ioa_cfg->hrrq[i].allow_interrupts = 1;
7910 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7911 }
7912 wmb();
8701f185
WB
7913 if (ioa_cfg->sis64) {
7914 /* Set the adapter to the correct endian mode. */
7915 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7916 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7917 }
7918
7be96900 7919 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7920
7921 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7922 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7923 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7924 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7925 return IPR_RC_JOB_CONTINUE;
7926 }
7927
7928 /* Enable destructive diagnostics on IOA */
214777ba
WB
7929 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7930
7be96900
WB
7931 if (ioa_cfg->sis64) {
7932 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7933 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7934 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7935 } else
7936 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7937
1da177e4
LT
7938 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7939
7940 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7941
214777ba
WB
7942 if (ioa_cfg->sis64) {
7943 ipr_cmd->job_step = ipr_reset_next_stage;
7944 return IPR_RC_JOB_CONTINUE;
7945 }
7946
1da177e4 7947 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7948 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7949 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7950 ipr_cmd->done = ipr_reset_ioa_job;
7951 add_timer(&ipr_cmd->timer);
05a6538a 7952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
7953
7954 LEAVE;
7955 return IPR_RC_JOB_RETURN;
7956}
7957
7958/**
7959 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7960 * @ipr_cmd: ipr command struct
7961 *
7962 * This function is invoked when an adapter dump has run out
7963 * of processing time.
7964 *
7965 * Return value:
7966 * IPR_RC_JOB_CONTINUE
7967 **/
7968static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7969{
7970 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7971
7972 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7973 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7974 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7975 ioa_cfg->sdt_state = ABORT_DUMP;
7976
4c647e90 7977 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7978 ipr_cmd->job_step = ipr_reset_alert;
7979
7980 return IPR_RC_JOB_CONTINUE;
7981}
7982
7983/**
7984 * ipr_unit_check_no_data - Log a unit check/no data error log
7985 * @ioa_cfg: ioa config struct
7986 *
7987 * Logs an error indicating the adapter unit checked, but for some
7988 * reason, we were unable to fetch the unit check buffer.
7989 *
7990 * Return value:
7991 * nothing
7992 **/
7993static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7994{
7995 ioa_cfg->errors_logged++;
7996 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7997}
7998
7999/**
8000 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8001 * @ioa_cfg: ioa config struct
8002 *
8003 * Fetches the unit check buffer from the adapter by clocking the data
8004 * through the mailbox register.
8005 *
8006 * Return value:
8007 * nothing
8008 **/
8009static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8010{
8011 unsigned long mailbox;
8012 struct ipr_hostrcb *hostrcb;
8013 struct ipr_uc_sdt sdt;
8014 int rc, length;
65f56475 8015 u32 ioasc;
1da177e4
LT
8016
8017 mailbox = readl(ioa_cfg->ioa_mailbox);
8018
dcbad00e 8019 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8020 ipr_unit_check_no_data(ioa_cfg);
8021 return;
8022 }
8023
8024 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8025 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8026 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8027
dcbad00e
WB
8028 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8029 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8030 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8031 ipr_unit_check_no_data(ioa_cfg);
8032 return;
8033 }
8034
8035 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8036 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8037 length = be32_to_cpu(sdt.entry[0].end_token);
8038 else
8039 length = (be32_to_cpu(sdt.entry[0].end_token) -
8040 be32_to_cpu(sdt.entry[0].start_token)) &
8041 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8042
8043 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8044 struct ipr_hostrcb, queue);
8045 list_del(&hostrcb->queue);
8046 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8047
8048 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8049 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8050 (__be32 *)&hostrcb->hcam,
8051 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8052
65f56475 8053 if (!rc) {
1da177e4 8054 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8055 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8056 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8057 ioa_cfg->sdt_state == GET_DUMP)
8058 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8059 } else
1da177e4
LT
8060 ipr_unit_check_no_data(ioa_cfg);
8061
8062 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8063}
8064
110def85
WB
8065/**
8066 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8067 * @ipr_cmd: ipr command struct
8068 *
8069 * Description: This function will call to get the unit check buffer.
8070 *
8071 * Return value:
8072 * IPR_RC_JOB_RETURN
8073 **/
8074static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8075{
8076 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8077
8078 ENTER;
8079 ioa_cfg->ioa_unit_checked = 0;
8080 ipr_get_unit_check_buffer(ioa_cfg);
8081 ipr_cmd->job_step = ipr_reset_alert;
8082 ipr_reset_start_timer(ipr_cmd, 0);
8083
8084 LEAVE;
8085 return IPR_RC_JOB_RETURN;
8086}
8087
1da177e4
LT
8088/**
8089 * ipr_reset_restore_cfg_space - Restore PCI config space.
8090 * @ipr_cmd: ipr command struct
8091 *
8092 * Description: This function restores the saved PCI config space of
8093 * the adapter, fails all outstanding ops back to the callers, and
8094 * fetches the dump/unit check if applicable to this reset.
8095 *
8096 * Return value:
8097 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8098 **/
8099static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8100{
8101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8102 u32 int_reg;
1da177e4
LT
8103
8104 ENTER;
99c965dd 8105 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8106 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8107
8108 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8109 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8110 return IPR_RC_JOB_CONTINUE;
8111 }
8112
8113 ipr_fail_all_ops(ioa_cfg);
8114
8701f185
WB
8115 if (ioa_cfg->sis64) {
8116 /* Set the adapter to the correct endian mode. */
8117 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8118 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8119 }
8120
1da177e4 8121 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8122 if (ioa_cfg->sis64) {
8123 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8124 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8125 return IPR_RC_JOB_RETURN;
8126 } else {
8127 ioa_cfg->ioa_unit_checked = 0;
8128 ipr_get_unit_check_buffer(ioa_cfg);
8129 ipr_cmd->job_step = ipr_reset_alert;
8130 ipr_reset_start_timer(ipr_cmd, 0);
8131 return IPR_RC_JOB_RETURN;
8132 }
1da177e4
LT
8133 }
8134
8135 if (ioa_cfg->in_ioa_bringdown) {
8136 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8137 } else {
8138 ipr_cmd->job_step = ipr_reset_enable_ioa;
8139
8140 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 8141 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 8142 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
8143 if (ioa_cfg->sis64)
8144 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8145 else
8146 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
8147 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8148 schedule_work(&ioa_cfg->work_q);
8149 return IPR_RC_JOB_RETURN;
8150 }
8151 }
8152
438b0331 8153 LEAVE;
1da177e4
LT
8154 return IPR_RC_JOB_CONTINUE;
8155}
8156
e619e1a7
BK
8157/**
8158 * ipr_reset_bist_done - BIST has completed on the adapter.
8159 * @ipr_cmd: ipr command struct
8160 *
8161 * Description: Unblock config space and resume the reset process.
8162 *
8163 * Return value:
8164 * IPR_RC_JOB_CONTINUE
8165 **/
8166static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8167{
fb51ccbf
JK
8168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8169
e619e1a7 8170 ENTER;
fb51ccbf
JK
8171 if (ioa_cfg->cfg_locked)
8172 pci_cfg_access_unlock(ioa_cfg->pdev);
8173 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8174 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8175 LEAVE;
8176 return IPR_RC_JOB_CONTINUE;
8177}
8178
1da177e4
LT
8179/**
8180 * ipr_reset_start_bist - Run BIST on the adapter.
8181 * @ipr_cmd: ipr command struct
8182 *
8183 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8184 *
8185 * Return value:
8186 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8187 **/
8188static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8189{
8190 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8191 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8192
8193 ENTER;
cb237ef7
WB
8194 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8195 writel(IPR_UPROCI_SIS64_START_BIST,
8196 ioa_cfg->regs.set_uproc_interrupt_reg32);
8197 else
8198 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8199
8200 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8201 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8202 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8203 rc = IPR_RC_JOB_RETURN;
cb237ef7 8204 } else {
fb51ccbf
JK
8205 if (ioa_cfg->cfg_locked)
8206 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8207 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8208 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8209 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8210 }
8211
8212 LEAVE;
8213 return rc;
8214}
8215
463fc696
BK
8216/**
8217 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8218 * @ipr_cmd: ipr command struct
8219 *
8220 * Description: This clears PCI reset to the adapter and delays two seconds.
8221 *
8222 * Return value:
8223 * IPR_RC_JOB_RETURN
8224 **/
8225static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8226{
8227 ENTER;
8228 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8229 ipr_cmd->job_step = ipr_reset_bist_done;
8230 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8231 LEAVE;
8232 return IPR_RC_JOB_RETURN;
8233}
8234
8235/**
8236 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8237 * @ipr_cmd: ipr command struct
8238 *
8239 * Description: This asserts PCI reset to the adapter.
8240 *
8241 * Return value:
8242 * IPR_RC_JOB_RETURN
8243 **/
8244static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8245{
8246 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8247 struct pci_dev *pdev = ioa_cfg->pdev;
8248
8249 ENTER;
463fc696
BK
8250 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8251 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8252 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8253 LEAVE;
8254 return IPR_RC_JOB_RETURN;
8255}
8256
fb51ccbf
JK
8257/**
8258 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8259 * @ipr_cmd: ipr command struct
8260 *
8261 * Description: This attempts to block config access to the IOA.
8262 *
8263 * Return value:
8264 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8265 **/
8266static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8267{
8268 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8269 int rc = IPR_RC_JOB_CONTINUE;
8270
8271 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8272 ioa_cfg->cfg_locked = 1;
8273 ipr_cmd->job_step = ioa_cfg->reset;
8274 } else {
8275 if (ipr_cmd->u.time_left) {
8276 rc = IPR_RC_JOB_RETURN;
8277 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8278 ipr_reset_start_timer(ipr_cmd,
8279 IPR_CHECK_FOR_RESET_TIMEOUT);
8280 } else {
8281 ipr_cmd->job_step = ioa_cfg->reset;
8282 dev_err(&ioa_cfg->pdev->dev,
8283 "Timed out waiting to lock config access. Resetting anyway.\n");
8284 }
8285 }
8286
8287 return rc;
8288}
8289
8290/**
8291 * ipr_reset_block_config_access - Block config access to the IOA
8292 * @ipr_cmd: ipr command struct
8293 *
8294 * Description: This attempts to block config access to the IOA
8295 *
8296 * Return value:
8297 * IPR_RC_JOB_CONTINUE
8298 **/
8299static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8300{
8301 ipr_cmd->ioa_cfg->cfg_locked = 0;
8302 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8303 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8304 return IPR_RC_JOB_CONTINUE;
8305}
8306
1da177e4
LT
8307/**
8308 * ipr_reset_allowed - Query whether or not IOA can be reset
8309 * @ioa_cfg: ioa config struct
8310 *
8311 * Return value:
8312 * 0 if reset not allowed / non-zero if reset is allowed
8313 **/
8314static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8315{
8316 volatile u32 temp_reg;
8317
8318 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8319 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8320}
8321
8322/**
8323 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8324 * @ipr_cmd: ipr command struct
8325 *
8326 * Description: This function waits for adapter permission to run BIST,
8327 * then runs BIST. If the adapter does not give permission after a
8328 * reasonable time, we will reset the adapter anyway. The impact of
8329 * resetting the adapter without warning the adapter is the risk of
8330 * losing the persistent error log on the adapter. If the adapter is
8331 * reset while it is writing to the flash on the adapter, the flash
8332 * segment will have bad ECC and be zeroed.
8333 *
8334 * Return value:
8335 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8336 **/
8337static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8338{
8339 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8340 int rc = IPR_RC_JOB_RETURN;
8341
8342 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8343 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8344 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8345 } else {
fb51ccbf 8346 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8347 rc = IPR_RC_JOB_CONTINUE;
8348 }
8349
8350 return rc;
8351}
8352
8353/**
8701f185 8354 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8355 * @ipr_cmd: ipr command struct
8356 *
8357 * Description: This function alerts the adapter that it will be reset.
8358 * If memory space is not currently enabled, proceed directly
8359 * to running BIST on the adapter. The timer must always be started
8360 * so we guarantee we do not run BIST from ipr_isr.
8361 *
8362 * Return value:
8363 * IPR_RC_JOB_RETURN
8364 **/
8365static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8366{
8367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368 u16 cmd_reg;
8369 int rc;
8370
8371 ENTER;
8372 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8373
8374 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8375 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8376 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8377 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8378 } else {
fb51ccbf 8379 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8380 }
8381
8382 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8383 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8384
8385 LEAVE;
8386 return IPR_RC_JOB_RETURN;
8387}
8388
8389/**
8390 * ipr_reset_ucode_download_done - Microcode download completion
8391 * @ipr_cmd: ipr command struct
8392 *
8393 * Description: This function unmaps the microcode download buffer.
8394 *
8395 * Return value:
8396 * IPR_RC_JOB_CONTINUE
8397 **/
8398static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8399{
8400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8401 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8402
d73341bf 8403 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
8404 sglist->num_sg, DMA_TO_DEVICE);
8405
8406 ipr_cmd->job_step = ipr_reset_alert;
8407 return IPR_RC_JOB_CONTINUE;
8408}
8409
8410/**
8411 * ipr_reset_ucode_download - Download microcode to the adapter
8412 * @ipr_cmd: ipr command struct
8413 *
8414 * Description: This function checks to see if it there is microcode
8415 * to download to the adapter. If there is, a download is performed.
8416 *
8417 * Return value:
8418 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8419 **/
8420static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8421{
8422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8423 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8424
8425 ENTER;
8426 ipr_cmd->job_step = ipr_reset_alert;
8427
8428 if (!sglist)
8429 return IPR_RC_JOB_CONTINUE;
8430
8431 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8432 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8433 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8434 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8435 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8436 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8437 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8438
a32c055f
WB
8439 if (ioa_cfg->sis64)
8440 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8441 else
8442 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8443 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8444
8445 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8446 IPR_WRITE_BUFFER_TIMEOUT);
8447
8448 LEAVE;
8449 return IPR_RC_JOB_RETURN;
8450}
8451
8452/**
8453 * ipr_reset_shutdown_ioa - Shutdown the adapter
8454 * @ipr_cmd: ipr command struct
8455 *
8456 * Description: This function issues an adapter shutdown of the
8457 * specified type to the specified adapter as part of the
8458 * adapter reset job.
8459 *
8460 * Return value:
8461 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8462 **/
8463static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8464{
8465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8466 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8467 unsigned long timeout;
8468 int rc = IPR_RC_JOB_CONTINUE;
8469
8470 ENTER;
56d6aa33 8471 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8472 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
8473 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8474 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8475 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8476 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8477
ac09c349
BK
8478 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8479 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8480 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8481 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8482 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8483 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8484 else
ac09c349 8485 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8486
8487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8488
8489 rc = IPR_RC_JOB_RETURN;
8490 ipr_cmd->job_step = ipr_reset_ucode_download;
8491 } else
8492 ipr_cmd->job_step = ipr_reset_alert;
8493
8494 LEAVE;
8495 return rc;
8496}
8497
8498/**
8499 * ipr_reset_ioa_job - Adapter reset job
8500 * @ipr_cmd: ipr command struct
8501 *
8502 * Description: This function is the job router for the adapter reset job.
8503 *
8504 * Return value:
8505 * none
8506 **/
8507static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8508{
8509 u32 rc, ioasc;
1da177e4
LT
8510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8511
8512 do {
96d21f00 8513 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8514
8515 if (ioa_cfg->reset_cmd != ipr_cmd) {
8516 /*
8517 * We are doing nested adapter resets and this is
8518 * not the current reset job.
8519 */
05a6538a 8520 list_add_tail(&ipr_cmd->queue,
8521 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
8522 return;
8523 }
8524
8525 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8526 rc = ipr_cmd->job_step_failed(ipr_cmd);
8527 if (rc == IPR_RC_JOB_RETURN)
8528 return;
1da177e4
LT
8529 }
8530
8531 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8532 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8533 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8534 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8535}
8536
8537/**
8538 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8539 * @ioa_cfg: ioa config struct
8540 * @job_step: first job step of reset job
8541 * @shutdown_type: shutdown type
8542 *
8543 * Description: This function will initiate the reset of the given adapter
8544 * starting at the selected job step.
8545 * If the caller needs to wait on the completion of the reset,
8546 * the caller must sleep on the reset_wait_q.
8547 *
8548 * Return value:
8549 * none
8550 **/
8551static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8552 int (*job_step) (struct ipr_cmnd *),
8553 enum ipr_shutdown_type shutdown_type)
8554{
8555 struct ipr_cmnd *ipr_cmd;
56d6aa33 8556 int i;
1da177e4
LT
8557
8558 ioa_cfg->in_reset_reload = 1;
56d6aa33 8559 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8560 spin_lock(&ioa_cfg->hrrq[i]._lock);
8561 ioa_cfg->hrrq[i].allow_cmds = 0;
8562 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8563 }
8564 wmb();
bfae7820
BK
8565 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8566 scsi_block_requests(ioa_cfg->host);
1da177e4
LT
8567
8568 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8569 ioa_cfg->reset_cmd = ipr_cmd;
8570 ipr_cmd->job_step = job_step;
8571 ipr_cmd->u.shutdown_type = shutdown_type;
8572
8573 ipr_reset_ioa_job(ipr_cmd);
8574}
8575
8576/**
8577 * ipr_initiate_ioa_reset - Initiate an adapter reset
8578 * @ioa_cfg: ioa config struct
8579 * @shutdown_type: shutdown type
8580 *
8581 * Description: This function will initiate the reset of the given adapter.
8582 * If the caller needs to wait on the completion of the reset,
8583 * the caller must sleep on the reset_wait_q.
8584 *
8585 * Return value:
8586 * none
8587 **/
8588static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8589 enum ipr_shutdown_type shutdown_type)
8590{
56d6aa33 8591 int i;
8592
8593 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
8594 return;
8595
41e9a696
BK
8596 if (ioa_cfg->in_reset_reload) {
8597 if (ioa_cfg->sdt_state == GET_DUMP)
8598 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8599 else if (ioa_cfg->sdt_state == READ_DUMP)
8600 ioa_cfg->sdt_state = ABORT_DUMP;
8601 }
1da177e4
LT
8602
8603 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8604 dev_err(&ioa_cfg->pdev->dev,
8605 "IOA taken offline - error recovery failed\n");
8606
8607 ioa_cfg->reset_retries = 0;
56d6aa33 8608 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8609 spin_lock(&ioa_cfg->hrrq[i]._lock);
8610 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8611 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8612 }
8613 wmb();
1da177e4
LT
8614
8615 if (ioa_cfg->in_ioa_bringdown) {
8616 ioa_cfg->reset_cmd = NULL;
8617 ioa_cfg->in_reset_reload = 0;
8618 ipr_fail_all_ops(ioa_cfg);
8619 wake_up_all(&ioa_cfg->reset_wait_q);
8620
bfae7820
BK
8621 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8622 spin_unlock_irq(ioa_cfg->host->host_lock);
8623 scsi_unblock_requests(ioa_cfg->host);
8624 spin_lock_irq(ioa_cfg->host->host_lock);
8625 }
1da177e4
LT
8626 return;
8627 } else {
8628 ioa_cfg->in_ioa_bringdown = 1;
8629 shutdown_type = IPR_SHUTDOWN_NONE;
8630 }
8631 }
8632
8633 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8634 shutdown_type);
8635}
8636
f8a88b19
LV
8637/**
8638 * ipr_reset_freeze - Hold off all I/O activity
8639 * @ipr_cmd: ipr command struct
8640 *
8641 * Description: If the PCI slot is frozen, hold off all I/O
8642 * activity; then, as soon as the slot is available again,
8643 * initiate an adapter reset.
8644 */
8645static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8646{
56d6aa33 8647 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8648 int i;
8649
f8a88b19 8650 /* Disallow new interrupts, avoid loop */
56d6aa33 8651 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8652 spin_lock(&ioa_cfg->hrrq[i]._lock);
8653 ioa_cfg->hrrq[i].allow_interrupts = 0;
8654 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8655 }
8656 wmb();
05a6538a 8657 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
8658 ipr_cmd->done = ipr_reset_ioa_job;
8659 return IPR_RC_JOB_RETURN;
8660}
8661
6270e593
BK
8662/**
8663 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8664 * @pdev: PCI device struct
8665 *
8666 * Description: This routine is called to tell us that the MMIO
8667 * access to the IOA has been restored
8668 */
8669static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8670{
8671 unsigned long flags = 0;
8672 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8673
8674 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8675 if (!ioa_cfg->probe_done)
8676 pci_save_state(pdev);
8677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8678 return PCI_ERS_RESULT_NEED_RESET;
8679}
8680
f8a88b19
LV
8681/**
8682 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8683 * @pdev: PCI device struct
8684 *
8685 * Description: This routine is called to tell us that the PCI bus
8686 * is down. Can't do anything here, except put the device driver
8687 * into a holding pattern, waiting for the PCI bus to come back.
8688 */
8689static void ipr_pci_frozen(struct pci_dev *pdev)
8690{
8691 unsigned long flags = 0;
8692 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693
8694 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8695 if (ioa_cfg->probe_done)
8696 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
8697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8698}
8699
8700/**
8701 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8702 * @pdev: PCI device struct
8703 *
8704 * Description: This routine is called by the pci error recovery
8705 * code after the PCI slot has been reset, just before we
8706 * should resume normal operations.
8707 */
8708static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8709{
8710 unsigned long flags = 0;
8711 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8712
8713 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8714 if (ioa_cfg->probe_done) {
8715 if (ioa_cfg->needs_warm_reset)
8716 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8717 else
8718 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8719 IPR_SHUTDOWN_NONE);
8720 } else
8721 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8723 return PCI_ERS_RESULT_RECOVERED;
8724}
8725
8726/**
8727 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8728 * @pdev: PCI device struct
8729 *
8730 * Description: This routine is called when the PCI bus has
8731 * permanently failed.
8732 */
8733static void ipr_pci_perm_failure(struct pci_dev *pdev)
8734{
8735 unsigned long flags = 0;
8736 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 8737 int i;
f8a88b19
LV
8738
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
8740 if (ioa_cfg->probe_done) {
8741 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8742 ioa_cfg->sdt_state = ABORT_DUMP;
8743 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8744 ioa_cfg->in_ioa_bringdown = 1;
8745 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8746 spin_lock(&ioa_cfg->hrrq[i]._lock);
8747 ioa_cfg->hrrq[i].allow_cmds = 0;
8748 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8749 }
8750 wmb();
8751 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8752 } else
8753 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
8754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8755}
8756
8757/**
8758 * ipr_pci_error_detected - Called when a PCI error is detected.
8759 * @pdev: PCI device struct
8760 * @state: PCI channel state
8761 *
8762 * Description: Called when a PCI error is detected.
8763 *
8764 * Return value:
8765 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8766 */
8767static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8768 pci_channel_state_t state)
8769{
8770 switch (state) {
8771 case pci_channel_io_frozen:
8772 ipr_pci_frozen(pdev);
6270e593 8773 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
8774 case pci_channel_io_perm_failure:
8775 ipr_pci_perm_failure(pdev);
8776 return PCI_ERS_RESULT_DISCONNECT;
8777 break;
8778 default:
8779 break;
8780 }
8781 return PCI_ERS_RESULT_NEED_RESET;
8782}
8783
1da177e4
LT
8784/**
8785 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8786 * @ioa_cfg: ioa cfg struct
8787 *
8788 * Description: This is the second phase of adapter intialization
8789 * This function takes care of initilizing the adapter to the point
8790 * where it can accept new commands.
8791
8792 * Return value:
b1c11812 8793 * 0 on success / -EIO on failure
1da177e4 8794 **/
6f039790 8795static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8796{
8797 int rc = 0;
8798 unsigned long host_lock_flags = 0;
8799
8800 ENTER;
8801 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8802 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 8803 ioa_cfg->probe_done = 1;
ce155cce
BK
8804 if (ioa_cfg->needs_hard_reset) {
8805 ioa_cfg->needs_hard_reset = 0;
8806 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8807 } else
8808 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8809 IPR_SHUTDOWN_NONE);
1da177e4 8810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
8811
8812 LEAVE;
8813 return rc;
8814}
8815
8816/**
8817 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8818 * @ioa_cfg: ioa config struct
8819 *
8820 * Return value:
8821 * none
8822 **/
8823static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8824{
8825 int i;
8826
8827 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8828 if (ioa_cfg->ipr_cmnd_list[i])
d73341bf 8829 dma_pool_free(ioa_cfg->ipr_cmd_pool,
1da177e4
LT
8830 ioa_cfg->ipr_cmnd_list[i],
8831 ioa_cfg->ipr_cmnd_list_dma[i]);
8832
8833 ioa_cfg->ipr_cmnd_list[i] = NULL;
8834 }
8835
8836 if (ioa_cfg->ipr_cmd_pool)
d73341bf 8837 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8838
89aad428
BK
8839 kfree(ioa_cfg->ipr_cmnd_list);
8840 kfree(ioa_cfg->ipr_cmnd_list_dma);
8841 ioa_cfg->ipr_cmnd_list = NULL;
8842 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8843 ioa_cfg->ipr_cmd_pool = NULL;
8844}
8845
8846/**
8847 * ipr_free_mem - Frees memory allocated for an adapter
8848 * @ioa_cfg: ioa cfg struct
8849 *
8850 * Return value:
8851 * nothing
8852 **/
8853static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8854{
8855 int i;
8856
8857 kfree(ioa_cfg->res_entries);
d73341bf
AB
8858 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8859 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 8860 ipr_free_cmd_blks(ioa_cfg);
05a6538a 8861
8862 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
8863 dma_free_coherent(&ioa_cfg->pdev->dev,
8864 sizeof(u32) * ioa_cfg->hrrq[i].size,
8865 ioa_cfg->hrrq[i].host_rrq,
8866 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 8867
d73341bf
AB
8868 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8869 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4
LT
8870
8871 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
8872 dma_free_coherent(&ioa_cfg->pdev->dev,
8873 sizeof(struct ipr_hostrcb),
8874 ioa_cfg->hostrcb[i],
8875 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
8876 }
8877
8878 ipr_free_dump(ioa_cfg);
1da177e4
LT
8879 kfree(ioa_cfg->trace);
8880}
8881
8882/**
8883 * ipr_free_all_resources - Free all allocated resources for an adapter.
8884 * @ipr_cmd: ipr command struct
8885 *
8886 * This function frees all allocated resources for the
8887 * specified adapter.
8888 *
8889 * Return value:
8890 * none
8891 **/
8892static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8893{
8894 struct pci_dev *pdev = ioa_cfg->pdev;
8895
8896 ENTER;
05a6538a 8897 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8898 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8899 int i;
8900 for (i = 0; i < ioa_cfg->nvectors; i++)
8901 free_irq(ioa_cfg->vectors_info[i].vec,
8902 &ioa_cfg->hrrq[i]);
8903 } else
8904 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8905
56d6aa33 8906 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
05a6538a 8907 pci_disable_msi(pdev);
56d6aa33 8908 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8909 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
05a6538a 8910 pci_disable_msix(pdev);
56d6aa33 8911 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8912 }
05a6538a 8913
1da177e4
LT
8914 iounmap(ioa_cfg->hdw_dma_regs);
8915 pci_release_regions(pdev);
8916 ipr_free_mem(ioa_cfg);
8917 scsi_host_put(ioa_cfg->host);
8918 pci_disable_device(pdev);
8919 LEAVE;
8920}
8921
8922/**
8923 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8924 * @ioa_cfg: ioa config struct
8925 *
8926 * Return value:
8927 * 0 on success / -ENOMEM on allocation failure
8928 **/
6f039790 8929static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8930{
8931 struct ipr_cmnd *ipr_cmd;
8932 struct ipr_ioarcb *ioarcb;
8933 dma_addr_t dma_addr;
05a6538a 8934 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 8935
d73341bf 8936 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 8937 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
8938
8939 if (!ioa_cfg->ipr_cmd_pool)
8940 return -ENOMEM;
8941
89aad428
BK
8942 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8943 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8944
8945 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8946 ipr_free_cmd_blks(ioa_cfg);
8947 return -ENOMEM;
8948 }
8949
05a6538a 8950 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8951 if (ioa_cfg->hrrq_num > 1) {
8952 if (i == 0) {
8953 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8954 ioa_cfg->hrrq[i].min_cmd_id = 0;
8955 ioa_cfg->hrrq[i].max_cmd_id =
8956 (entries_each_hrrq - 1);
8957 } else {
8958 entries_each_hrrq =
8959 IPR_NUM_BASE_CMD_BLKS/
8960 (ioa_cfg->hrrq_num - 1);
8961 ioa_cfg->hrrq[i].min_cmd_id =
8962 IPR_NUM_INTERNAL_CMD_BLKS +
8963 (i - 1) * entries_each_hrrq;
8964 ioa_cfg->hrrq[i].max_cmd_id =
8965 (IPR_NUM_INTERNAL_CMD_BLKS +
8966 i * entries_each_hrrq - 1);
8967 }
8968 } else {
8969 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8970 ioa_cfg->hrrq[i].min_cmd_id = 0;
8971 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8972 }
8973 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8974 }
8975
8976 BUG_ON(ioa_cfg->hrrq_num == 0);
8977
8978 i = IPR_NUM_CMD_BLKS -
8979 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8980 if (i > 0) {
8981 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8982 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8983 }
8984
1da177e4 8985 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
d73341bf 8986 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8987
8988 if (!ipr_cmd) {
8989 ipr_free_cmd_blks(ioa_cfg);
8990 return -ENOMEM;
8991 }
8992
8993 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8994 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8995 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8996
8997 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8998 ipr_cmd->dma_addr = dma_addr;
8999 if (ioa_cfg->sis64)
9000 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9001 else
9002 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9003
1da177e4 9004 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9005 if (ioa_cfg->sis64) {
9006 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9007 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9008 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9009 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9010 } else {
9011 ioarcb->write_ioadl_addr =
9012 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9013 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9014 ioarcb->ioasa_host_pci_addr =
96d21f00 9015 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9016 }
1da177e4
LT
9017 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9018 ipr_cmd->cmd_index = i;
9019 ipr_cmd->ioa_cfg = ioa_cfg;
9020 ipr_cmd->sense_buffer_dma = dma_addr +
9021 offsetof(struct ipr_cmnd, sense_buffer);
9022
05a6538a 9023 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9024 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9025 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9026 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9027 hrrq_id++;
1da177e4
LT
9028 }
9029
9030 return 0;
9031}
9032
9033/**
9034 * ipr_alloc_mem - Allocate memory for an adapter
9035 * @ioa_cfg: ioa config struct
9036 *
9037 * Return value:
9038 * 0 on success / non-zero for error
9039 **/
6f039790 9040static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9041{
9042 struct pci_dev *pdev = ioa_cfg->pdev;
9043 int i, rc = -ENOMEM;
9044
9045 ENTER;
0bc42e35 9046 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 9047 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
9048
9049 if (!ioa_cfg->res_entries)
9050 goto out;
9051
3e7ebdfa 9052 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9053 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9054 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9055 }
1da177e4 9056
d73341bf
AB
9057 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9058 sizeof(struct ipr_misc_cbs),
9059 &ioa_cfg->vpd_cbs_dma,
9060 GFP_KERNEL);
1da177e4
LT
9061
9062 if (!ioa_cfg->vpd_cbs)
9063 goto out_free_res_entries;
9064
9065 if (ipr_alloc_cmd_blks(ioa_cfg))
9066 goto out_free_vpd_cbs;
9067
05a6538a 9068 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9069 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9070 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9071 &ioa_cfg->hrrq[i].host_rrq_dma,
9072 GFP_KERNEL);
05a6538a 9073
9074 if (!ioa_cfg->hrrq[i].host_rrq) {
9075 while (--i > 0)
d73341bf 9076 dma_free_coherent(&pdev->dev,
05a6538a 9077 sizeof(u32) * ioa_cfg->hrrq[i].size,
9078 ioa_cfg->hrrq[i].host_rrq,
9079 ioa_cfg->hrrq[i].host_rrq_dma);
9080 goto out_ipr_free_cmd_blocks;
9081 }
9082 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9083 }
1da177e4 9084
d73341bf
AB
9085 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9086 ioa_cfg->cfg_table_size,
9087 &ioa_cfg->cfg_table_dma,
9088 GFP_KERNEL);
1da177e4 9089
3e7ebdfa 9090 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9091 goto out_free_host_rrq;
9092
9093 for (i = 0; i < IPR_NUM_HCAMS; i++) {
d73341bf
AB
9094 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9095 sizeof(struct ipr_hostrcb),
9096 &ioa_cfg->hostrcb_dma[i],
9097 GFP_KERNEL);
1da177e4
LT
9098
9099 if (!ioa_cfg->hostrcb[i])
9100 goto out_free_hostrcb_dma;
9101
9102 ioa_cfg->hostrcb[i]->hostrcb_dma =
9103 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9104 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9105 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9106 }
9107
0bc42e35 9108 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
9109 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9110
9111 if (!ioa_cfg->trace)
9112 goto out_free_hostrcb_dma;
9113
1da177e4
LT
9114 rc = 0;
9115out:
9116 LEAVE;
9117 return rc;
9118
9119out_free_hostrcb_dma:
9120 while (i-- > 0) {
d73341bf
AB
9121 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9122 ioa_cfg->hostrcb[i],
9123 ioa_cfg->hostrcb_dma[i]);
1da177e4 9124 }
d73341bf
AB
9125 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9126 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9127out_free_host_rrq:
05a6538a 9128 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9129 dma_free_coherent(&pdev->dev,
9130 sizeof(u32) * ioa_cfg->hrrq[i].size,
9131 ioa_cfg->hrrq[i].host_rrq,
9132 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9133 }
1da177e4
LT
9134out_ipr_free_cmd_blocks:
9135 ipr_free_cmd_blks(ioa_cfg);
9136out_free_vpd_cbs:
d73341bf
AB
9137 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9138 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9139out_free_res_entries:
9140 kfree(ioa_cfg->res_entries);
9141 goto out;
9142}
9143
9144/**
9145 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9146 * @ioa_cfg: ioa config struct
9147 *
9148 * Return value:
9149 * none
9150 **/
6f039790 9151static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9152{
9153 int i;
9154
9155 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9156 ioa_cfg->bus_attr[i].bus = i;
9157 ioa_cfg->bus_attr[i].qas_enabled = 0;
9158 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9159 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9160 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9161 else
9162 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9163 }
9164}
9165
6270e593
BK
9166/**
9167 * ipr_init_regs - Initialize IOA registers
9168 * @ioa_cfg: ioa config struct
9169 *
9170 * Return value:
9171 * none
9172 **/
9173static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9174{
9175 const struct ipr_interrupt_offsets *p;
9176 struct ipr_interrupts *t;
9177 void __iomem *base;
9178
9179 p = &ioa_cfg->chip_cfg->regs;
9180 t = &ioa_cfg->regs;
9181 base = ioa_cfg->hdw_dma_regs;
9182
9183 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9184 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9185 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9186 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9187 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9188 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9189 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9190 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9191 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9192 t->ioarrin_reg = base + p->ioarrin_reg;
9193 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9194 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9195 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9196 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9197 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9198 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9199
9200 if (ioa_cfg->sis64) {
9201 t->init_feedback_reg = base + p->init_feedback_reg;
9202 t->dump_addr_reg = base + p->dump_addr_reg;
9203 t->dump_data_reg = base + p->dump_data_reg;
9204 t->endian_swap_reg = base + p->endian_swap_reg;
9205 }
9206}
9207
1da177e4
LT
9208/**
9209 * ipr_init_ioa_cfg - Initialize IOA config struct
9210 * @ioa_cfg: ioa config struct
9211 * @host: scsi host struct
9212 * @pdev: PCI dev struct
9213 *
9214 * Return value:
9215 * none
9216 **/
6f039790
GKH
9217static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9218 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9219{
6270e593 9220 int i;
1da177e4
LT
9221
9222 ioa_cfg->host = host;
9223 ioa_cfg->pdev = pdev;
9224 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9225 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9226 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9227 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9228 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9229 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9230 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9231 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9232
1da177e4
LT
9233 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9234 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9235 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9236 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9237 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9238 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9239 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9240 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9241 ioa_cfg->sdt_state = INACTIVE;
9242
9243 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9244 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9245
3e7ebdfa
WB
9246 if (ioa_cfg->sis64) {
9247 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9248 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9249 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9250 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9251 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9252 + ((sizeof(struct ipr_config_table_entry64)
9253 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9254 } else {
9255 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9256 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9257 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9258 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9259 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9260 + ((sizeof(struct ipr_config_table_entry)
9261 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9262 }
6270e593 9263
f688f96d 9264 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9265 host->unique_id = host->host_no;
9266 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9267 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9268 pci_set_drvdata(pdev, ioa_cfg);
9269
6270e593
BK
9270 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9271 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9272 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9273 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9274 if (i == 0)
9275 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9276 else
9277 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9278 }
1da177e4
LT
9279}
9280
9281/**
1be7bd82 9282 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9283 * @dev_id: PCI device id struct
9284 *
9285 * Return value:
1be7bd82 9286 * ptr to chip information on success / NULL on failure
1da177e4 9287 **/
6f039790 9288static const struct ipr_chip_t *
1be7bd82 9289ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9290{
9291 int i;
9292
1da177e4
LT
9293 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9294 if (ipr_chip[i].vendor == dev_id->vendor &&
9295 ipr_chip[i].device == dev_id->device)
1be7bd82 9296 return &ipr_chip[i];
1da177e4
LT
9297 return NULL;
9298}
9299
6270e593
BK
9300/**
9301 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9302 * during probe time
9303 * @ioa_cfg: ioa config struct
9304 *
9305 * Return value:
9306 * None
9307 **/
9308static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9309{
9310 struct pci_dev *pdev = ioa_cfg->pdev;
9311
9312 if (pci_channel_offline(pdev)) {
9313 wait_event_timeout(ioa_cfg->eeh_wait_q,
9314 !pci_channel_offline(pdev),
9315 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9316 pci_restore_state(pdev);
9317 }
9318}
9319
05a6538a 9320static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9321{
9322 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
60e76b77 9323 int i, vectors;
05a6538a 9324
9325 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9326 entries[i].entry = i;
9327
60e76b77
AG
9328 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9329 entries, 1, ipr_number_of_msix);
9330 if (vectors < 0) {
6270e593 9331 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9332 return vectors;
05a6538a 9333 }
9334
60e76b77
AG
9335 for (i = 0; i < vectors; i++)
9336 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9337 ioa_cfg->nvectors = vectors;
05a6538a 9338
60e76b77 9339 return 0;
05a6538a 9340}
9341
9342static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9343{
60e76b77 9344 int i, vectors;
05a6538a 9345
60e76b77
AG
9346 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9347 if (vectors < 0) {
6270e593 9348 ipr_wait_for_pci_err_recovery(ioa_cfg);
60e76b77 9349 return vectors;
05a6538a 9350 }
9351
60e76b77
AG
9352 for (i = 0; i < vectors; i++)
9353 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9354 ioa_cfg->nvectors = vectors;
05a6538a 9355
60e76b77 9356 return 0;
05a6538a 9357}
9358
9359static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9360{
9361 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9362
9363 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9364 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9365 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9366 ioa_cfg->vectors_info[vec_idx].
9367 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9368 }
9369}
9370
9371static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9372{
9373 int i, rc;
9374
9375 for (i = 1; i < ioa_cfg->nvectors; i++) {
9376 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9377 ipr_isr_mhrrq,
9378 0,
9379 ioa_cfg->vectors_info[i].desc,
9380 &ioa_cfg->hrrq[i]);
9381 if (rc) {
9382 while (--i >= 0)
9383 free_irq(ioa_cfg->vectors_info[i].vec,
9384 &ioa_cfg->hrrq[i]);
9385 return rc;
9386 }
9387 }
9388 return 0;
9389}
9390
95fecd90
WB
9391/**
9392 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9393 * @pdev: PCI device struct
9394 *
9395 * Description: Simply set the msi_received flag to 1 indicating that
9396 * Message Signaled Interrupts are supported.
9397 *
9398 * Return value:
9399 * 0 on success / non-zero on failure
9400 **/
6f039790 9401static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
9402{
9403 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9404 unsigned long lock_flags = 0;
9405 irqreturn_t rc = IRQ_HANDLED;
9406
05a6538a 9407 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
9408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9409
9410 ioa_cfg->msi_received = 1;
9411 wake_up(&ioa_cfg->msi_wait_q);
9412
9413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9414 return rc;
9415}
9416
9417/**
9418 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9419 * @pdev: PCI device struct
9420 *
60e76b77 9421 * Description: The return value from pci_enable_msi_range() can not always be
95fecd90
WB
9422 * trusted. This routine sets up and initiates a test interrupt to determine
9423 * if the interrupt is received via the ipr_test_intr() service routine.
9424 * If the tests fails, the driver will fall back to LSI.
9425 *
9426 * Return value:
9427 * 0 on success / non-zero on failure
9428 **/
6f039790 9429static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
9430{
9431 int rc;
9432 volatile u32 int_reg;
9433 unsigned long lock_flags = 0;
9434
9435 ENTER;
9436
9437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9438 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9439 ioa_cfg->msi_received = 0;
9440 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 9441 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
9442 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9444
f19799f4 9445 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9446 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9447 else
9448 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90
WB
9449 if (rc) {
9450 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9451 return rc;
9452 } else if (ipr_debug)
9453 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9454
214777ba 9455 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
9456 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9457 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 9458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
9459 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9460
95fecd90
WB
9461 if (!ioa_cfg->msi_received) {
9462 /* MSI test failed */
9463 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9464 rc = -EOPNOTSUPP;
9465 } else if (ipr_debug)
9466 dev_info(&pdev->dev, "MSI test succeeded.\n");
9467
9468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9469
f19799f4 9470 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9471 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9472 else
9473 free_irq(pdev->irq, ioa_cfg);
95fecd90
WB
9474
9475 LEAVE;
9476
9477 return rc;
9478}
9479
05a6538a 9480 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
9481 * @pdev: PCI device struct
9482 * @dev_id: PCI device id struct
9483 *
9484 * Return value:
9485 * 0 on success / non-zero on failure
9486 **/
6f039790
GKH
9487static int ipr_probe_ioa(struct pci_dev *pdev,
9488 const struct pci_device_id *dev_id)
1da177e4
LT
9489{
9490 struct ipr_ioa_cfg *ioa_cfg;
9491 struct Scsi_Host *host;
9492 unsigned long ipr_regs_pci;
9493 void __iomem *ipr_regs;
a2a65a3e 9494 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 9495 volatile u32 mask, uproc, interrupts;
feccada9 9496 unsigned long lock_flags, driver_lock_flags;
1da177e4
LT
9497
9498 ENTER;
9499
1da177e4 9500 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
9501 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9502
9503 if (!host) {
9504 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9505 rc = -ENOMEM;
6270e593 9506 goto out;
1da177e4
LT
9507 }
9508
9509 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9510 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 9511 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 9512
1be7bd82 9513 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 9514
1be7bd82 9515 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
9516 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9517 dev_id->vendor, dev_id->device);
9518 goto out_scsi_host_put;
9519 }
9520
a32c055f
WB
9521 /* set SIS 32 or SIS 64 */
9522 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 9523 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 9524 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 9525 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 9526
5469cb5b
BK
9527 if (ipr_transop_timeout)
9528 ioa_cfg->transop_timeout = ipr_transop_timeout;
9529 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9530 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9531 else
9532 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9533
44c10138 9534 ioa_cfg->revid = pdev->revision;
463fc696 9535
6270e593
BK
9536 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9537
1da177e4
LT
9538 ipr_regs_pci = pci_resource_start(pdev, 0);
9539
9540 rc = pci_request_regions(pdev, IPR_NAME);
9541 if (rc < 0) {
9542 dev_err(&pdev->dev,
9543 "Couldn't register memory range of registers\n");
9544 goto out_scsi_host_put;
9545 }
9546
6270e593
BK
9547 rc = pci_enable_device(pdev);
9548
9549 if (rc || pci_channel_offline(pdev)) {
9550 if (pci_channel_offline(pdev)) {
9551 ipr_wait_for_pci_err_recovery(ioa_cfg);
9552 rc = pci_enable_device(pdev);
9553 }
9554
9555 if (rc) {
9556 dev_err(&pdev->dev, "Cannot enable adapter\n");
9557 ipr_wait_for_pci_err_recovery(ioa_cfg);
9558 goto out_release_regions;
9559 }
9560 }
9561
25729a7f 9562 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
9563
9564 if (!ipr_regs) {
9565 dev_err(&pdev->dev,
9566 "Couldn't map memory range of registers\n");
9567 rc = -ENOMEM;
6270e593 9568 goto out_disable;
1da177e4
LT
9569 }
9570
9571 ioa_cfg->hdw_dma_regs = ipr_regs;
9572 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9573 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9574
6270e593 9575 ipr_init_regs(ioa_cfg);
1da177e4 9576
a32c055f 9577 if (ioa_cfg->sis64) {
869404cb 9578 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 9579 if (rc < 0) {
869404cb
AB
9580 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9581 rc = dma_set_mask_and_coherent(&pdev->dev,
9582 DMA_BIT_MASK(32));
a32c055f 9583 }
a32c055f 9584 } else
869404cb 9585 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 9586
1da177e4 9587 if (rc < 0) {
869404cb 9588 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
9589 goto cleanup_nomem;
9590 }
9591
9592 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9593 ioa_cfg->chip_cfg->cache_line_size);
9594
9595 if (rc != PCIBIOS_SUCCESSFUL) {
9596 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 9597 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
9598 rc = -EIO;
9599 goto cleanup_nomem;
9600 }
9601
6270e593
BK
9602 /* Issue MMIO read to ensure card is not in EEH */
9603 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9604 ipr_wait_for_pci_err_recovery(ioa_cfg);
9605
05a6538a 9606 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9607 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9608 IPR_MAX_MSIX_VECTORS);
9609 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9610 }
9611
9612 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9613 ipr_enable_msix(ioa_cfg) == 0)
05a6538a 9614 ioa_cfg->intr_flag = IPR_USE_MSIX;
9615 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
56d6aa33 9616 ipr_enable_msi(ioa_cfg) == 0)
05a6538a 9617 ioa_cfg->intr_flag = IPR_USE_MSI;
9618 else {
9619 ioa_cfg->intr_flag = IPR_USE_LSI;
9620 ioa_cfg->nvectors = 1;
9621 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9622 }
9623
6270e593
BK
9624 pci_set_master(pdev);
9625
9626 if (pci_channel_offline(pdev)) {
9627 ipr_wait_for_pci_err_recovery(ioa_cfg);
9628 pci_set_master(pdev);
9629 if (pci_channel_offline(pdev)) {
9630 rc = -EIO;
9631 goto out_msi_disable;
9632 }
9633 }
9634
05a6538a 9635 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9636 ioa_cfg->intr_flag == IPR_USE_MSIX) {
95fecd90 9637 rc = ipr_test_msi(ioa_cfg, pdev);
05a6538a 9638 if (rc == -EOPNOTSUPP) {
6270e593 9639 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9640 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9641 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9642 pci_disable_msi(pdev);
9643 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9644 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9645 pci_disable_msix(pdev);
9646 }
9647
9648 ioa_cfg->intr_flag = IPR_USE_LSI;
9649 ioa_cfg->nvectors = 1;
9650 }
95fecd90
WB
9651 else if (rc)
9652 goto out_msi_disable;
05a6538a 9653 else {
9654 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9655 dev_info(&pdev->dev,
9656 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9657 ioa_cfg->nvectors, pdev->irq);
9658 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9659 dev_info(&pdev->dev,
9660 "Request for %d MSIXs succeeded.",
9661 ioa_cfg->nvectors);
9662 }
9663 }
9664
9665 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9666 (unsigned int)num_online_cpus(),
9667 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 9668
1da177e4 9669 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 9670 goto out_msi_disable;
1da177e4
LT
9671
9672 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 9673 goto out_msi_disable;
1da177e4
LT
9674
9675 rc = ipr_alloc_mem(ioa_cfg);
9676 if (rc < 0) {
9677 dev_err(&pdev->dev,
9678 "Couldn't allocate enough memory for device driver!\n");
f170c684 9679 goto out_msi_disable;
1da177e4
LT
9680 }
9681
6270e593
BK
9682 /* Save away PCI config space for use following IOA reset */
9683 rc = pci_save_state(pdev);
9684
9685 if (rc != PCIBIOS_SUCCESSFUL) {
9686 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9687 rc = -EIO;
9688 goto cleanup_nolog;
9689 }
9690
ce155cce
BK
9691 /*
9692 * If HRRQ updated interrupt is not masked, or reset alert is set,
9693 * the card is in an unknown state and needs a hard reset
9694 */
214777ba
WB
9695 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9696 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9697 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9698 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9699 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9700 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9701 ioa_cfg->needs_hard_reset = 1;
9702 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9703 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9704
56d6aa33 9705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9706 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 9707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 9708
05a6538a 9709 if (ioa_cfg->intr_flag == IPR_USE_MSI
9710 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9711 name_msi_vectors(ioa_cfg);
9712 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9713 0,
9714 ioa_cfg->vectors_info[0].desc,
9715 &ioa_cfg->hrrq[0]);
9716 if (!rc)
9717 rc = ipr_request_other_msi_irqs(ioa_cfg);
9718 } else {
9719 rc = request_irq(pdev->irq, ipr_isr,
9720 IRQF_SHARED,
9721 IPR_NAME, &ioa_cfg->hrrq[0]);
9722 }
1da177e4
LT
9723 if (rc) {
9724 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9725 pdev->irq, rc);
9726 goto cleanup_nolog;
9727 }
9728
463fc696
BK
9729 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9730 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9731 ioa_cfg->needs_warm_reset = 1;
9732 ioa_cfg->reset = ipr_reset_slot_reset;
9733 } else
9734 ioa_cfg->reset = ipr_reset_start_bist;
9735
feccada9 9736 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 9737 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 9738 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
9739
9740 LEAVE;
9741out:
9742 return rc;
9743
9744cleanup_nolog:
9745 ipr_free_mem(ioa_cfg);
95fecd90 9746out_msi_disable:
6270e593 9747 ipr_wait_for_pci_err_recovery(ioa_cfg);
05a6538a 9748 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9749 pci_disable_msi(pdev);
9750 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9751 pci_disable_msix(pdev);
f170c684
JL
9752cleanup_nomem:
9753 iounmap(ipr_regs);
6270e593
BK
9754out_disable:
9755 pci_disable_device(pdev);
1da177e4
LT
9756out_release_regions:
9757 pci_release_regions(pdev);
9758out_scsi_host_put:
9759 scsi_host_put(host);
1da177e4
LT
9760 goto out;
9761}
9762
1da177e4
LT
9763/**
9764 * ipr_initiate_ioa_bringdown - Bring down an adapter
9765 * @ioa_cfg: ioa config struct
9766 * @shutdown_type: shutdown type
9767 *
9768 * Description: This function will initiate bringing down the adapter.
9769 * This consists of issuing an IOA shutdown to the adapter
9770 * to flush the cache, and running BIST.
9771 * If the caller needs to wait on the completion of the reset,
9772 * the caller must sleep on the reset_wait_q.
9773 *
9774 * Return value:
9775 * none
9776 **/
9777static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9778 enum ipr_shutdown_type shutdown_type)
9779{
9780 ENTER;
9781 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9782 ioa_cfg->sdt_state = ABORT_DUMP;
9783 ioa_cfg->reset_retries = 0;
9784 ioa_cfg->in_ioa_bringdown = 1;
9785 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9786 LEAVE;
9787}
9788
9789/**
9790 * __ipr_remove - Remove a single adapter
9791 * @pdev: pci device struct
9792 *
9793 * Adapter hot plug remove entry point.
9794 *
9795 * Return value:
9796 * none
9797 **/
9798static void __ipr_remove(struct pci_dev *pdev)
9799{
9800 unsigned long host_lock_flags = 0;
9801 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 9802 int i;
feccada9 9803 unsigned long driver_lock_flags;
1da177e4
LT
9804 ENTER;
9805
9806 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9807 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9809 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9810 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9811 }
9812
bfae7820
BK
9813 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9814 spin_lock(&ioa_cfg->hrrq[i]._lock);
9815 ioa_cfg->hrrq[i].removing_ioa = 1;
9816 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9817 }
9818 wmb();
1da177e4
LT
9819 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9820
9821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9822 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9823 flush_work(&ioa_cfg->work_q);
9077a944 9824 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
9825 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9826
feccada9 9827 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 9828 list_del(&ioa_cfg->queue);
feccada9 9829 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
9830
9831 if (ioa_cfg->sdt_state == ABORT_DUMP)
9832 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9834
9835 ipr_free_all_resources(ioa_cfg);
9836
9837 LEAVE;
9838}
9839
9840/**
9841 * ipr_remove - IOA hot plug remove entry point
9842 * @pdev: pci device struct
9843 *
9844 * Adapter hot plug remove entry point.
9845 *
9846 * Return value:
9847 * none
9848 **/
6f039790 9849static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9850{
9851 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9852
9853 ENTER;
9854
ee959b00 9855 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9856 &ipr_trace_attr);
ee959b00 9857 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9858 &ipr_dump_attr);
9859 scsi_remove_host(ioa_cfg->host);
9860
9861 __ipr_remove(pdev);
9862
9863 LEAVE;
9864}
9865
9866/**
9867 * ipr_probe - Adapter hot plug add entry point
9868 *
9869 * Return value:
9870 * 0 on success / non-zero on failure
9871 **/
6f039790 9872static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9873{
9874 struct ipr_ioa_cfg *ioa_cfg;
b53d124a 9875 int rc, i;
1da177e4
LT
9876
9877 rc = ipr_probe_ioa(pdev, dev_id);
9878
9879 if (rc)
9880 return rc;
9881
9882 ioa_cfg = pci_get_drvdata(pdev);
9883 rc = ipr_probe_ioa_part2(ioa_cfg);
9884
9885 if (rc) {
9886 __ipr_remove(pdev);
9887 return rc;
9888 }
9889
9890 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9891
9892 if (rc) {
9893 __ipr_remove(pdev);
9894 return rc;
9895 }
9896
ee959b00 9897 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9898 &ipr_trace_attr);
9899
9900 if (rc) {
9901 scsi_remove_host(ioa_cfg->host);
9902 __ipr_remove(pdev);
9903 return rc;
9904 }
9905
ee959b00 9906 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9907 &ipr_dump_attr);
9908
9909 if (rc) {
ee959b00 9910 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9911 &ipr_trace_attr);
9912 scsi_remove_host(ioa_cfg->host);
9913 __ipr_remove(pdev);
9914 return rc;
9915 }
9916
9917 scsi_scan_host(ioa_cfg->host);
b53d124a 9918 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9919
89f8b33c 9920 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 9921 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9922 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9923 ioa_cfg->iopoll_weight, ipr_iopoll);
9924 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9925 }
9926 }
9927
1da177e4
LT
9928 schedule_work(&ioa_cfg->work_q);
9929 return 0;
9930}
9931
9932/**
9933 * ipr_shutdown - Shutdown handler.
d18c3db5 9934 * @pdev: pci device struct
1da177e4
LT
9935 *
9936 * This function is invoked upon system shutdown/reboot. It will issue
9937 * an adapter shutdown to the adapter to flush the write cache.
9938 *
9939 * Return value:
9940 * none
9941 **/
d18c3db5 9942static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9943{
d18c3db5 9944 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 9945 unsigned long lock_flags = 0;
b53d124a 9946 int i;
1da177e4
LT
9947
9948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 9949 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 9950 ioa_cfg->iopoll_weight = 0;
9951 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9952 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9953 }
9954
203fa3fe 9955 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9957 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9958 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9959 }
9960
1da177e4
LT
9961 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9964}
9965
6f039790 9966static struct pci_device_id ipr_pci_table[] = {
1da177e4 9967 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9969 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9973 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9975 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9977 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9979 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9981 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9982 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9983 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9984 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9985 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9986 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9987 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9988 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9989 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9990 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9991 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9992 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9994 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9995 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9996 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9997 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9998 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9999 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10000 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10001 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10002 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10007 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10008 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10009 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10010 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10011 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10012 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10013 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10014 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10015 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10016 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10017 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10018 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10019 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10020 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10022 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10023 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10024 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10048 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10049 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10050 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10051 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10052 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10053 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10055 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10056 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10057 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10058 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10059 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10060 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10061 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10062 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10063 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10064 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10065 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10066 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10067 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10068 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10069 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10070 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10071 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
1da177e4
LT
10072 { }
10073};
10074MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10075
a55b2d21 10076static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10077 .error_detected = ipr_pci_error_detected,
6270e593 10078 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10079 .slot_reset = ipr_pci_slot_reset,
10080};
10081
1da177e4
LT
10082static struct pci_driver ipr_driver = {
10083 .name = IPR_NAME,
10084 .id_table = ipr_pci_table,
10085 .probe = ipr_probe,
6f039790 10086 .remove = ipr_remove,
d18c3db5 10087 .shutdown = ipr_shutdown,
f8a88b19 10088 .err_handler = &ipr_err_handler,
1da177e4
LT
10089};
10090
f72919ec
WB
10091/**
10092 * ipr_halt_done - Shutdown prepare completion
10093 *
10094 * Return value:
10095 * none
10096 **/
10097static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10098{
05a6538a 10099 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10100}
10101
10102/**
10103 * ipr_halt - Issue shutdown prepare to all adapters
10104 *
10105 * Return value:
10106 * NOTIFY_OK on success / NOTIFY_DONE on failure
10107 **/
10108static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10109{
10110 struct ipr_cmnd *ipr_cmd;
10111 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10112 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10113
10114 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10115 return NOTIFY_DONE;
10116
feccada9 10117 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10118
10119 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10120 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
56d6aa33 10121 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
f72919ec
WB
10122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10123 continue;
10124 }
10125
10126 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10127 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10128 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10129 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10130 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10131
10132 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10134 }
feccada9 10135 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10136
10137 return NOTIFY_OK;
10138}
10139
10140static struct notifier_block ipr_notifier = {
10141 ipr_halt, NULL, 0
10142};
10143
1da177e4
LT
10144/**
10145 * ipr_init - Module entry point
10146 *
10147 * Return value:
10148 * 0 on success / negative value on failure
10149 **/
10150static int __init ipr_init(void)
10151{
10152 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10153 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10154
f72919ec 10155 register_reboot_notifier(&ipr_notifier);
dcbccbde 10156 return pci_register_driver(&ipr_driver);
1da177e4
LT
10157}
10158
10159/**
10160 * ipr_exit - Module unload
10161 *
10162 * Module unload entry point.
10163 *
10164 * Return value:
10165 * none
10166 **/
10167static void __exit ipr_exit(void)
10168{
f72919ec 10169 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10170 pci_unregister_driver(&ipr_driver);
10171}
10172
10173module_init(ipr_init);
10174module_exit(ipr_exit);