]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
0ce3a7e5 74#include <linux/hdreg.h>
f72919ec 75#include <linux/reboot.h>
3e7ebdfa 76#include <linux/stringify.h>
1da177e4
LT
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/processor.h>
80#include <scsi/scsi.h>
81#include <scsi/scsi_host.h>
82#include <scsi/scsi_tcq.h>
83#include <scsi/scsi_eh.h>
84#include <scsi/scsi_cmnd.h>
1da177e4
LT
85#include "ipr.h"
86
87/*
88 * Global Data
89 */
b7d68ca3 90static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
91static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92static unsigned int ipr_max_speed = 1;
93static int ipr_testmode = 0;
94static unsigned int ipr_fastfail = 0;
5469cb5b 95static unsigned int ipr_transop_timeout = 0;
d3c74871 96static unsigned int ipr_debug = 0;
3e7ebdfa 97static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 98static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
99static DEFINE_SPINLOCK(ipr_driver_lock);
100
101/* This table describes the differences between DMA controller chips */
102static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
106 {
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
214777ba 109 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 110 .sense_interrupt_mask_reg = 0x0022C,
214777ba 111 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 112 .clr_interrupt_reg = 0x00228,
214777ba 113 .clr_interrupt_reg32 = 0x00228,
1da177e4 114 .sense_interrupt_reg = 0x00224,
214777ba 115 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
116 .ioarrin_reg = 0x00404,
117 .sense_uproc_interrupt_reg = 0x00214,
214777ba 118 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 119 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
123 }
124 },
125 { /* Snipe and Scamp */
126 .mailbox = 0x0052C,
127 .cache_line_size = 0x20,
128 {
129 .set_interrupt_mask_reg = 0x00288,
130 .clr_interrupt_mask_reg = 0x0028C,
214777ba 131 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 132 .sense_interrupt_mask_reg = 0x00288,
214777ba 133 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 134 .clr_interrupt_reg = 0x00284,
214777ba 135 .clr_interrupt_reg32 = 0x00284,
1da177e4 136 .sense_interrupt_reg = 0x00280,
214777ba 137 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
138 .ioarrin_reg = 0x00504,
139 .sense_uproc_interrupt_reg = 0x00290,
214777ba 140 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 141 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
145 }
146 },
a74c1639
WB
147 { /* CRoC */
148 .mailbox = 0x00040,
149 .cache_line_size = 0x20,
150 {
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
214777ba 153 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 154 .sense_interrupt_mask_reg = 0x00010,
214777ba 155 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 156 .clr_interrupt_reg = 0x00008,
214777ba 157 .clr_interrupt_reg32 = 0x0000C,
a74c1639 158 .sense_interrupt_reg = 0x00000,
214777ba 159 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
214777ba 162 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 163 .set_uproc_interrupt_reg = 0x00020,
214777ba 164 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 165 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
dcbad00e
WB
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
a74c1639
WB
170 }
171 },
1da177e4
LT
172};
173
174static const struct ipr_chip_t ipr_chip[] = {
a32c055f
WB
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
d7b4627f
WB
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
1da177e4
LT
184};
185
186static int ipr_max_bus_speeds [] = {
187 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
188};
189
190MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
191MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
192module_param_named(max_speed, ipr_max_speed, uint, 0);
193MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
194module_param_named(log_level, ipr_log_level, uint, 0);
195MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
196module_param_named(testmode, ipr_testmode, int, 0);
197MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 198module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
199MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
200module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
201MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 202module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 203MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
204module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
205MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
206module_param_named(max_devs, ipr_max_devs, int, 0);
207MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
208 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
209MODULE_LICENSE("GPL");
210MODULE_VERSION(IPR_DRIVER_VERSION);
211
1da177e4
LT
212/* A constant array of IOASCs/URCs/Error Messages */
213static const
214struct ipr_error_table_t ipr_error_table[] = {
933916f3 215 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
216 "8155: An unknown error was received"},
217 {0x00330000, 0, 0,
218 "Soft underlength error"},
219 {0x005A0000, 0, 0,
220 "Command to be cancelled not found"},
221 {0x00808000, 0, 0,
222 "Qualified success"},
933916f3 223 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 224 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 225 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 226 "4101: Soft device bus fabric error"},
5aa3a333
WB
227 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFFC: Logical block guard error recovered by the device"},
229 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
230 "FFFC: Logical block reference tag error recovered by the device"},
231 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4171: Recovered scatter list tag / sequence number error"},
233 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
235 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
236 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
237 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
238 "FFFD: Recovered logical block reference tag error detected by the IOA"},
239 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 241 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFF9: Device sector reassign successful"},
933916f3 243 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 245 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "7001: IOA sector reassignment successful"},
933916f3 247 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 249 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 251 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 252 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 253 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF6: Device hardware error recovered by the IOA"},
933916f3 255 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FFF6: Device hardware error recovered by the device"},
933916f3 257 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 259 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFFA: Undefined device response recovered by the IOA"},
933916f3 261 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF6: Device bus error, message or command phase"},
933916f3 263 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 264 "FFFE: Task Management Function failed"},
933916f3 265 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFF6: Failure prediction threshold exceeded"},
933916f3 267 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
268 "8009: Impending cache battery pack failure"},
269 {0x02040400, 0, 0,
270 "34FF: Disk device format in progress"},
65f56475
BK
271 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "9070: IOA requested reset"},
1da177e4
LT
273 {0x023F0000, 0, 0,
274 "Synchronization required"},
275 {0x024E0000, 0, 0,
276 "No ready, IOA shutdown"},
277 {0x025A0000, 0, 0,
278 "Not ready, IOA has been shutdown"},
933916f3 279 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
280 "3020: Storage subsystem configuration error"},
281 {0x03110B00, 0, 0,
282 "FFF5: Medium error, data unreadable, recommend reassign"},
283 {0x03110C00, 0, 0,
284 "7000: Medium error, data unreadable, do not reassign"},
933916f3 285 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 286 "FFF3: Disk media format bad"},
933916f3 287 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 288 "3002: Addressed device failed to respond to selection"},
933916f3 289 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 290 "3100: Device bus error"},
933916f3 291 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
292 "3109: IOA timed out a device command"},
293 {0x04088000, 0, 0,
294 "3120: SCSI bus is not operational"},
933916f3 295 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 296 "4100: Hard device bus fabric error"},
5aa3a333
WB
297 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
298 "310C: Logical block guard error detected by the device"},
299 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
300 "310C: Logical block reference tag error detected by the device"},
301 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
302 "4170: Scatter list tag / sequence number error"},
303 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
304 "8150: Logical block CRC error on IOA to Host transfer"},
305 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
306 "4170: Logical block sequence number error on IOA to Host transfer"},
307 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
308 "310D: Logical block reference tag error detected by the IOA"},
309 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310D: Logical block guard error detected by the IOA"},
933916f3 311 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 312 "9000: IOA reserved area data check"},
933916f3 313 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "9001: IOA reserved area invalid data pattern"},
933916f3 315 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "9002: IOA reserved area LRC error"},
5aa3a333
WB
317 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
318 "Hardware Error, IOA metadata access error"},
933916f3 319 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 320 "102E: Out of alternate sectors for disk storage"},
933916f3 321 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "FFF4: Data transfer underlength error"},
933916f3 323 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 324 "FFF4: Data transfer overlength error"},
933916f3 325 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "3400: Logical unit failure"},
933916f3 327 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "FFF4: Device microcode is corrupt"},
933916f3 329 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
330 "8150: PCI bus error"},
331 {0x04430000, 1, 0,
332 "Unsupported device bus message received"},
933916f3 333 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "FFF4: Disk device problem"},
933916f3 335 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "8150: Permanent IOA failure"},
933916f3 337 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 338 "3010: Disk device returned wrong response to IOA"},
933916f3 339 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
340 "8151: IOA microcode error"},
341 {0x04448500, 0, 0,
342 "Device bus status error"},
933916f3 343 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
345 {0x04448700, 0, 0,
346 "ATA device status error"},
1da177e4
LT
347 {0x04490000, 0, 0,
348 "Message reject received from the device"},
933916f3 349 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "8008: A permanent cache battery pack failure occurred"},
933916f3 351 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "9090: Disk unit has been modified after the last known status"},
933916f3 353 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "9081: IOA detected device error"},
933916f3 355 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "9082: IOA detected device error"},
933916f3 357 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 358 "3110: Device bus error, message or command phase"},
933916f3 359 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 360 "3110: SAS Command / Task Management Function failed"},
933916f3 361 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "9091: Incorrect hardware configuration change has been detected"},
933916f3 363 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 364 "9073: Invalid multi-adapter configuration"},
933916f3 365 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 366 "4010: Incorrect connection between cascaded expanders"},
933916f3 367 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 368 "4020: Connections exceed IOA design limits"},
933916f3 369 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 370 "4030: Incorrect multipath connection"},
933916f3 371 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 372 "4110: Unsupported enclosure function"},
933916f3 373 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
374 "FFF4: Command to logical unit failed"},
375 {0x05240000, 1, 0,
376 "Illegal request, invalid request type or request packet"},
377 {0x05250000, 0, 0,
378 "Illegal request, invalid resource handle"},
b0df54bb
BK
379 {0x05258000, 0, 0,
380 "Illegal request, commands not allowed to this device"},
381 {0x05258100, 0, 0,
382 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
383 {0x05258200, 0, 0,
384 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
385 {0x05260000, 0, 0,
386 "Illegal request, invalid field in parameter list"},
387 {0x05260100, 0, 0,
388 "Illegal request, parameter not supported"},
389 {0x05260200, 0, 0,
390 "Illegal request, parameter value invalid"},
391 {0x052C0000, 0, 0,
392 "Illegal request, command sequence error"},
b0df54bb
BK
393 {0x052C8000, 1, 0,
394 "Illegal request, dual adapter support not enabled"},
933916f3 395 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 396 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 397 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 398 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 399 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 400 "3140: Device bus not ready to ready transition"},
933916f3 401 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
402 "FFFB: SCSI bus was reset"},
403 {0x06290500, 0, 0,
404 "FFFE: SCSI bus transition to single ended"},
405 {0x06290600, 0, 0,
406 "FFFE: SCSI bus transition to LVD"},
933916f3 407 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "FFFB: SCSI bus was reset by another initiator"},
933916f3 409 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "3029: A device replacement has occurred"},
933916f3 411 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 412 "9051: IOA cache data exists for a missing or failed device"},
933916f3 413 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 414 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 415 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 416 "9025: Disk unit is not supported at its physical location"},
933916f3 417 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 418 "3020: IOA detected a SCSI bus configuration error"},
933916f3 419 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 420 "3150: SCSI bus configuration error"},
933916f3 421 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 422 "9074: Asymmetric advanced function disk configuration"},
933916f3 423 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 424 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 425 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 426 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 427 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 428 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 429 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 430 "9076: Configuration error, missing remote IOA"},
933916f3 431 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 432 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
433 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4070: Logically bad block written on device"},
933916f3 435 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 436 "9041: Array protection temporarily suspended"},
933916f3 437 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 438 "9042: Corrupt array parity detected on specified device"},
933916f3 439 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 440 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 441 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 442 "9071: Link operational transition"},
933916f3 443 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 444 "9072: Link not operational transition"},
933916f3 445 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "9032: Array exposed but still protected"},
e435340c
BK
447 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
448 "70DD: Device forced failed by disrupt device command"},
933916f3 449 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 450 "4061: Multipath redundancy level got better"},
933916f3 451 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 452 "4060: Multipath redundancy level got worse"},
1da177e4
LT
453 {0x07270000, 0, 0,
454 "Failure due to other device"},
933916f3 455 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 456 "9008: IOA does not support functions expected by devices"},
933916f3 457 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 458 "9010: Cache data associated with attached devices cannot be found"},
933916f3 459 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 460 "9011: Cache data belongs to devices other than those attached"},
933916f3 461 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 462 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 463 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 464 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 465 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 466 "9022: Exposed array is missing a required device"},
933916f3 467 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9023: Array member(s) not at required physical locations"},
933916f3 469 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9024: Array not functional due to present hardware configuration"},
933916f3 471 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9026: Array not functional due to present hardware configuration"},
933916f3 473 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9027: Array is missing a device and parity is out of sync"},
933916f3 475 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9028: Maximum number of arrays already exist"},
933916f3 477 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9050: Required cache data cannot be located for a disk unit"},
933916f3 479 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9052: Cache data exists for a device that has been modified"},
933916f3 481 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9054: IOA resources not available due to previous problems"},
933916f3 483 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9092: Disk unit requires initialization before use"},
933916f3 485 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9029: Incorrect hardware configuration change has been detected"},
933916f3 487 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9060: One or more disk pairs are missing from an array"},
933916f3 489 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9061: One or more disks are missing from an array"},
933916f3 491 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 492 "9062: One or more disks are missing from an array"},
933916f3 493 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
494 "9063: Maximum number of functional arrays has been exceeded"},
495 {0x0B260000, 0, 0,
496 "Aborted command, invalid descriptor"},
497 {0x0B5A0000, 0, 0,
498 "Command terminated by host"}
499};
500
501static const struct ipr_ses_table_entry ipr_ses_table[] = {
502 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
503 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
505 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
506 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
507 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
508 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
509 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
511 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
513 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
514 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
515};
516
517/*
518 * Function Prototypes
519 */
520static int ipr_reset_alert(struct ipr_cmnd *);
521static void ipr_process_ccn(struct ipr_cmnd *);
522static void ipr_process_error(struct ipr_cmnd *);
523static void ipr_reset_ioa_job(struct ipr_cmnd *);
524static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
525 enum ipr_shutdown_type);
526
527#ifdef CONFIG_SCSI_IPR_TRACE
528/**
529 * ipr_trc_hook - Add a trace entry to the driver trace
530 * @ipr_cmd: ipr command struct
531 * @type: trace type
532 * @add_data: additional data
533 *
534 * Return value:
535 * none
536 **/
537static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
538 u8 type, u32 add_data)
539{
540 struct ipr_trace_entry *trace_entry;
541 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
542
543 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
544 trace_entry->time = jiffies;
545 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
546 trace_entry->type = type;
a32c055f
WB
547 if (ipr_cmd->ioa_cfg->sis64)
548 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
549 else
550 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 551 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
552 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
553 trace_entry->u.add_data = add_data;
554}
555#else
556#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
557#endif
558
559/**
560 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
561 * @ipr_cmd: ipr command struct
562 *
563 * Return value:
564 * none
565 **/
566static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
567{
568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
569 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 570 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
571
572 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 573 ioarcb->data_transfer_length = 0;
1da177e4 574 ioarcb->read_data_transfer_length = 0;
a32c055f 575 ioarcb->ioadl_len = 0;
1da177e4 576 ioarcb->read_ioadl_len = 0;
a32c055f
WB
577
578 if (ipr_cmd->ioa_cfg->sis64)
579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
581 else {
582 ioarcb->write_ioadl_addr =
583 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
584 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
585 }
586
1da177e4
LT
587 ioasa->ioasc = 0;
588 ioasa->residual_data_len = 0;
35a39691 589 ioasa->u.gata.status = 0;
1da177e4
LT
590
591 ipr_cmd->scsi_cmd = NULL;
35a39691 592 ipr_cmd->qc = NULL;
1da177e4
LT
593 ipr_cmd->sense_buffer[0] = 0;
594 ipr_cmd->dma_use_sg = 0;
595}
596
597/**
598 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
599 * @ipr_cmd: ipr command struct
600 *
601 * Return value:
602 * none
603 **/
604static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
605{
606 ipr_reinit_ipr_cmnd(ipr_cmd);
607 ipr_cmd->u.scratch = 0;
608 ipr_cmd->sibling = NULL;
609 init_timer(&ipr_cmd->timer);
610}
611
612/**
613 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
614 * @ioa_cfg: ioa config struct
615 *
616 * Return value:
617 * pointer to ipr command struct
618 **/
619static
620struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
621{
622 struct ipr_cmnd *ipr_cmd;
623
624 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
625 list_del(&ipr_cmd->queue);
626 ipr_init_ipr_cmnd(ipr_cmd);
627
628 return ipr_cmd;
629}
630
1da177e4
LT
631/**
632 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
633 * @ioa_cfg: ioa config struct
634 * @clr_ints: interrupts to clear
635 *
636 * This function masks all interrupts on the adapter, then clears the
637 * interrupts specified in the mask
638 *
639 * Return value:
640 * none
641 **/
642static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
643 u32 clr_ints)
644{
645 volatile u32 int_reg;
646
647 /* Stop new interrupts */
648 ioa_cfg->allow_interrupts = 0;
649
650 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
651 if (ioa_cfg->sis64)
652 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
653 else
654 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
655
656 /* Clear any pending interrupts */
214777ba
WB
657 if (ioa_cfg->sis64)
658 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
659 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
660 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
661}
662
663/**
664 * ipr_save_pcix_cmd_reg - Save PCI-X command register
665 * @ioa_cfg: ioa config struct
666 *
667 * Return value:
668 * 0 on success / -EIO on failure
669 **/
670static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
671{
672 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
673
7dce0e1c
BK
674 if (pcix_cmd_reg == 0)
675 return 0;
1da177e4
LT
676
677 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
678 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
679 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
680 return -EIO;
681 }
682
683 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
684 return 0;
685}
686
687/**
688 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
689 * @ioa_cfg: ioa config struct
690 *
691 * Return value:
692 * 0 on success / -EIO on failure
693 **/
694static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
695{
696 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
697
698 if (pcix_cmd_reg) {
699 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
700 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
701 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
702 return -EIO;
703 }
1da177e4
LT
704 }
705
706 return 0;
707}
708
35a39691
BK
709/**
710 * ipr_sata_eh_done - done function for aborted SATA commands
711 * @ipr_cmd: ipr command struct
712 *
713 * This function is invoked for ops generated to SATA
714 * devices which are being aborted.
715 *
716 * Return value:
717 * none
718 **/
719static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
720{
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722 struct ata_queued_cmd *qc = ipr_cmd->qc;
723 struct ipr_sata_port *sata_port = qc->ap->private_data;
724
725 qc->err_mask |= AC_ERR_OTHER;
726 sata_port->ioasa.status |= ATA_BUSY;
727 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
728 ata_qc_complete(qc);
729}
730
1da177e4
LT
731/**
732 * ipr_scsi_eh_done - mid-layer done function for aborted ops
733 * @ipr_cmd: ipr command struct
734 *
735 * This function is invoked by the interrupt handler for
736 * ops generated by the SCSI mid-layer which are being aborted.
737 *
738 * Return value:
739 * none
740 **/
741static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
742{
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
744 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
745
746 scsi_cmd->result |= (DID_ERROR << 16);
747
63015bc9 748 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
749 scsi_cmd->scsi_done(scsi_cmd);
750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
751}
752
753/**
754 * ipr_fail_all_ops - Fails all outstanding ops.
755 * @ioa_cfg: ioa config struct
756 *
757 * This function fails all outstanding ops.
758 *
759 * Return value:
760 * none
761 **/
762static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
763{
764 struct ipr_cmnd *ipr_cmd, *temp;
765
766 ENTER;
767 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
768 list_del(&ipr_cmd->queue);
769
770 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
771 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
772
773 if (ipr_cmd->scsi_cmd)
774 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
775 else if (ipr_cmd->qc)
776 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
777
778 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
779 del_timer(&ipr_cmd->timer);
780 ipr_cmd->done(ipr_cmd);
781 }
782
783 LEAVE;
784}
785
a32c055f
WB
786/**
787 * ipr_send_command - Send driver initiated requests.
788 * @ipr_cmd: ipr command struct
789 *
790 * This function sends a command to the adapter using the correct write call.
791 * In the case of sis64, calculate the ioarcb size required. Then or in the
792 * appropriate bits.
793 *
794 * Return value:
795 * none
796 **/
797static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
798{
799 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
800 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
801
802 if (ioa_cfg->sis64) {
803 /* The default size is 256 bytes */
804 send_dma_addr |= 0x1;
805
806 /* If the number of ioadls * size of ioadl > 128 bytes,
807 then use a 512 byte ioarcb */
808 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
809 send_dma_addr |= 0x4;
810 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
811 } else
812 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813}
814
1da177e4
LT
815/**
816 * ipr_do_req - Send driver initiated requests.
817 * @ipr_cmd: ipr command struct
818 * @done: done function
819 * @timeout_func: timeout function
820 * @timeout: timeout value
821 *
822 * This function sends the specified command to the adapter with the
823 * timeout given. The done function is invoked on command completion.
824 *
825 * Return value:
826 * none
827 **/
828static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
829 void (*done) (struct ipr_cmnd *),
830 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
831{
832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
833
834 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
835
836 ipr_cmd->done = done;
837
838 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
839 ipr_cmd->timer.expires = jiffies + timeout;
840 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
841
842 add_timer(&ipr_cmd->timer);
843
844 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
845
846 mb();
a32c055f
WB
847
848 ipr_send_command(ipr_cmd);
1da177e4
LT
849}
850
851/**
852 * ipr_internal_cmd_done - Op done function for an internally generated op.
853 * @ipr_cmd: ipr command struct
854 *
855 * This function is the op done function for an internally generated,
856 * blocking op. It simply wakes the sleeping thread.
857 *
858 * Return value:
859 * none
860 **/
861static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
862{
863 if (ipr_cmd->sibling)
864 ipr_cmd->sibling = NULL;
865 else
866 complete(&ipr_cmd->completion);
867}
868
a32c055f
WB
869/**
870 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
871 * @ipr_cmd: ipr command struct
872 * @dma_addr: dma address
873 * @len: transfer length
874 * @flags: ioadl flag value
875 *
876 * This function initializes an ioadl in the case where there is only a single
877 * descriptor.
878 *
879 * Return value:
880 * nothing
881 **/
882static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
883 u32 len, int flags)
884{
885 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
886 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
887
888 ipr_cmd->dma_use_sg = 1;
889
890 if (ipr_cmd->ioa_cfg->sis64) {
891 ioadl64->flags = cpu_to_be32(flags);
892 ioadl64->data_len = cpu_to_be32(len);
893 ioadl64->address = cpu_to_be64(dma_addr);
894
895 ipr_cmd->ioarcb.ioadl_len =
896 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
897 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
898 } else {
899 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
900 ioadl->address = cpu_to_be32(dma_addr);
901
902 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
903 ipr_cmd->ioarcb.read_ioadl_len =
904 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
905 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
906 } else {
907 ipr_cmd->ioarcb.ioadl_len =
908 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
909 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
910 }
911 }
912}
913
1da177e4
LT
914/**
915 * ipr_send_blocking_cmd - Send command and sleep on its completion.
916 * @ipr_cmd: ipr command struct
917 * @timeout_func: function to invoke if command times out
918 * @timeout: timeout
919 *
920 * Return value:
921 * none
922 **/
923static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
924 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
925 u32 timeout)
926{
927 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
928
929 init_completion(&ipr_cmd->completion);
930 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
931
932 spin_unlock_irq(ioa_cfg->host->host_lock);
933 wait_for_completion(&ipr_cmd->completion);
934 spin_lock_irq(ioa_cfg->host->host_lock);
935}
936
937/**
938 * ipr_send_hcam - Send an HCAM to the adapter.
939 * @ioa_cfg: ioa config struct
940 * @type: HCAM type
941 * @hostrcb: hostrcb struct
942 *
943 * This function will send a Host Controlled Async command to the adapter.
944 * If HCAMs are currently not allowed to be issued to the adapter, it will
945 * place the hostrcb on the free queue.
946 *
947 * Return value:
948 * none
949 **/
950static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
951 struct ipr_hostrcb *hostrcb)
952{
953 struct ipr_cmnd *ipr_cmd;
954 struct ipr_ioarcb *ioarcb;
955
956 if (ioa_cfg->allow_cmds) {
957 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
958 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
959 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
960
961 ipr_cmd->u.hostrcb = hostrcb;
962 ioarcb = &ipr_cmd->ioarcb;
963
964 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
965 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
966 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
967 ioarcb->cmd_pkt.cdb[1] = type;
968 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
969 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
970
a32c055f
WB
971 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
972 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
973
974 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
975 ipr_cmd->done = ipr_process_ccn;
976 else
977 ipr_cmd->done = ipr_process_error;
978
979 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
980
981 mb();
a32c055f
WB
982
983 ipr_send_command(ipr_cmd);
1da177e4
LT
984 } else {
985 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
986 }
987}
988
3e7ebdfa
WB
989/**
990 * ipr_update_ata_class - Update the ata class in the resource entry
991 * @res: resource entry struct
992 * @proto: cfgte device bus protocol value
993 *
994 * Return value:
995 * none
996 **/
997static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
998{
999 switch(proto) {
1000 case IPR_PROTO_SATA:
1001 case IPR_PROTO_SAS_STP:
1002 res->ata_class = ATA_DEV_ATA;
1003 break;
1004 case IPR_PROTO_SATA_ATAPI:
1005 case IPR_PROTO_SAS_STP_ATAPI:
1006 res->ata_class = ATA_DEV_ATAPI;
1007 break;
1008 default:
1009 res->ata_class = ATA_DEV_UNKNOWN;
1010 break;
1011 };
1012}
1013
1da177e4
LT
1014/**
1015 * ipr_init_res_entry - Initialize a resource entry struct.
1016 * @res: resource entry struct
3e7ebdfa 1017 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1018 *
1019 * Return value:
1020 * none
1021 **/
3e7ebdfa
WB
1022static void ipr_init_res_entry(struct ipr_resource_entry *res,
1023 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1024{
3e7ebdfa
WB
1025 int found = 0;
1026 unsigned int proto;
1027 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1028 struct ipr_resource_entry *gscsi_res = NULL;
1029
ee0a90fa 1030 res->needs_sync_complete = 0;
1da177e4
LT
1031 res->in_erp = 0;
1032 res->add_to_ml = 0;
1033 res->del_from_ml = 0;
1034 res->resetting_device = 0;
1035 res->sdev = NULL;
35a39691 1036 res->sata_port = NULL;
3e7ebdfa
WB
1037
1038 if (ioa_cfg->sis64) {
1039 proto = cfgtew->u.cfgte64->proto;
1040 res->res_flags = cfgtew->u.cfgte64->res_flags;
1041 res->qmodel = IPR_QUEUEING_MODEL64(res);
1042 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1043
1044 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1045 sizeof(res->res_path));
1046
1047 res->bus = 0;
1048 res->lun = scsilun_to_int(&res->dev_lun);
1049
1050 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1051 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1052 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1053 found = 1;
1054 res->target = gscsi_res->target;
1055 break;
1056 }
1057 }
1058 if (!found) {
1059 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1060 ioa_cfg->max_devs_supported);
1061 set_bit(res->target, ioa_cfg->target_ids);
1062 }
1063
1064 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1065 sizeof(res->dev_lun.scsi_lun));
1066 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1067 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1068 res->target = 0;
1069 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1070 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1071 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1072 ioa_cfg->max_devs_supported);
1073 set_bit(res->target, ioa_cfg->array_ids);
1074 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1075 res->bus = IPR_VSET_VIRTUAL_BUS;
1076 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1077 ioa_cfg->max_devs_supported);
1078 set_bit(res->target, ioa_cfg->vset_ids);
1079 } else {
1080 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1081 ioa_cfg->max_devs_supported);
1082 set_bit(res->target, ioa_cfg->target_ids);
1083 }
1084 } else {
1085 proto = cfgtew->u.cfgte->proto;
1086 res->qmodel = IPR_QUEUEING_MODEL(res);
1087 res->flags = cfgtew->u.cfgte->flags;
1088 if (res->flags & IPR_IS_IOA_RESOURCE)
1089 res->type = IPR_RES_TYPE_IOAFP;
1090 else
1091 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1092
1093 res->bus = cfgtew->u.cfgte->res_addr.bus;
1094 res->target = cfgtew->u.cfgte->res_addr.target;
1095 res->lun = cfgtew->u.cfgte->res_addr.lun;
1096 }
1097
1098 ipr_update_ata_class(res, proto);
1099}
1100
1101/**
1102 * ipr_is_same_device - Determine if two devices are the same.
1103 * @res: resource entry struct
1104 * @cfgtew: config table entry wrapper struct
1105 *
1106 * Return value:
1107 * 1 if the devices are the same / 0 otherwise
1108 **/
1109static int ipr_is_same_device(struct ipr_resource_entry *res,
1110 struct ipr_config_table_entry_wrapper *cfgtew)
1111{
1112 if (res->ioa_cfg->sis64) {
1113 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1114 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1115 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(cfgtew->u.cfgte64->lun))) {
1117 return 1;
1118 }
1119 } else {
1120 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1121 res->target == cfgtew->u.cfgte->res_addr.target &&
1122 res->lun == cfgtew->u.cfgte->res_addr.lun)
1123 return 1;
1124 }
1125
1126 return 0;
1127}
1128
1129/**
1130 * ipr_format_resource_path - Format the resource path for printing.
1131 * @res_path: resource path
1132 * @buf: buffer
1133 *
1134 * Return value:
1135 * pointer to buffer
1136 **/
1137static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1138{
1139 int i;
1140
1141 sprintf(buffer, "%02X", res_path[0]);
1142 for (i=1; res_path[i] != 0xff; i++)
4565e370 1143 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
3e7ebdfa
WB
1144
1145 return buffer;
1146}
1147
1148/**
1149 * ipr_update_res_entry - Update the resource entry.
1150 * @res: resource entry struct
1151 * @cfgtew: config table entry wrapper struct
1152 *
1153 * Return value:
1154 * none
1155 **/
1156static void ipr_update_res_entry(struct ipr_resource_entry *res,
1157 struct ipr_config_table_entry_wrapper *cfgtew)
1158{
1159 char buffer[IPR_MAX_RES_PATH_LENGTH];
1160 unsigned int proto;
1161 int new_path = 0;
1162
1163 if (res->ioa_cfg->sis64) {
1164 res->flags = cfgtew->u.cfgte64->flags;
1165 res->res_flags = cfgtew->u.cfgte64->res_flags;
1166 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1167
1168 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1169 sizeof(struct ipr_std_inq_data));
1170
1171 res->qmodel = IPR_QUEUEING_MODEL64(res);
1172 proto = cfgtew->u.cfgte64->proto;
1173 res->res_handle = cfgtew->u.cfgte64->res_handle;
1174 res->dev_id = cfgtew->u.cfgte64->dev_id;
1175
1176 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1177 sizeof(res->dev_lun.scsi_lun));
1178
1179 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1180 sizeof(res->res_path))) {
1181 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1182 sizeof(res->res_path));
1183 new_path = 1;
1184 }
1185
1186 if (res->sdev && new_path)
1187 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1188 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1189 } else {
1190 res->flags = cfgtew->u.cfgte->flags;
1191 if (res->flags & IPR_IS_IOA_RESOURCE)
1192 res->type = IPR_RES_TYPE_IOAFP;
1193 else
1194 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1195
1196 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1197 sizeof(struct ipr_std_inq_data));
1198
1199 res->qmodel = IPR_QUEUEING_MODEL(res);
1200 proto = cfgtew->u.cfgte->proto;
1201 res->res_handle = cfgtew->u.cfgte->res_handle;
1202 }
1203
1204 ipr_update_ata_class(res, proto);
1205}
1206
1207/**
1208 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1209 * for the resource.
1210 * @res: resource entry struct
1211 * @cfgtew: config table entry wrapper struct
1212 *
1213 * Return value:
1214 * none
1215 **/
1216static void ipr_clear_res_target(struct ipr_resource_entry *res)
1217{
1218 struct ipr_resource_entry *gscsi_res = NULL;
1219 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1220
1221 if (!ioa_cfg->sis64)
1222 return;
1223
1224 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1225 clear_bit(res->target, ioa_cfg->array_ids);
1226 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1227 clear_bit(res->target, ioa_cfg->vset_ids);
1228 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1229 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1230 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1231 return;
1232 clear_bit(res->target, ioa_cfg->target_ids);
1233
1234 } else if (res->bus == 0)
1235 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1236}
1237
1238/**
1239 * ipr_handle_config_change - Handle a config change from the adapter
1240 * @ioa_cfg: ioa config struct
1241 * @hostrcb: hostrcb
1242 *
1243 * Return value:
1244 * none
1245 **/
1246static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1247 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1248{
1249 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1250 struct ipr_config_table_entry_wrapper cfgtew;
1251 __be32 cc_res_handle;
1252
1da177e4
LT
1253 u32 is_ndn = 1;
1254
3e7ebdfa
WB
1255 if (ioa_cfg->sis64) {
1256 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1257 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1258 } else {
1259 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1260 cc_res_handle = cfgtew.u.cfgte->res_handle;
1261 }
1da177e4
LT
1262
1263 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1264 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1265 is_ndn = 0;
1266 break;
1267 }
1268 }
1269
1270 if (is_ndn) {
1271 if (list_empty(&ioa_cfg->free_res_q)) {
1272 ipr_send_hcam(ioa_cfg,
1273 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1274 hostrcb);
1275 return;
1276 }
1277
1278 res = list_entry(ioa_cfg->free_res_q.next,
1279 struct ipr_resource_entry, queue);
1280
1281 list_del(&res->queue);
3e7ebdfa 1282 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1283 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1284 }
1285
3e7ebdfa 1286 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1287
1288 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1289 if (res->sdev) {
1da177e4 1290 res->del_from_ml = 1;
3e7ebdfa 1291 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1292 if (ioa_cfg->allow_ml_add_del)
1293 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1294 } else {
1295 ipr_clear_res_target(res);
1da177e4 1296 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1297 }
1da177e4
LT
1298 } else if (!res->sdev) {
1299 res->add_to_ml = 1;
1300 if (ioa_cfg->allow_ml_add_del)
1301 schedule_work(&ioa_cfg->work_q);
1302 }
1303
1304 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1305}
1306
1307/**
1308 * ipr_process_ccn - Op done function for a CCN.
1309 * @ipr_cmd: ipr command struct
1310 *
1311 * This function is the op done function for a configuration
1312 * change notification host controlled async from the adapter.
1313 *
1314 * Return value:
1315 * none
1316 **/
1317static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1318{
1319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1320 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1321 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1322
1323 list_del(&hostrcb->queue);
1324 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1325
1326 if (ioasc) {
1327 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1328 dev_err(&ioa_cfg->pdev->dev,
1329 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1330
1331 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1332 } else {
1333 ipr_handle_config_change(ioa_cfg, hostrcb);
1334 }
1335}
1336
8cf093e2
BK
1337/**
1338 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1339 * @i: index into buffer
1340 * @buf: string to modify
1341 *
1342 * This function will strip all trailing whitespace, pad the end
1343 * of the string with a single space, and NULL terminate the string.
1344 *
1345 * Return value:
1346 * new length of string
1347 **/
1348static int strip_and_pad_whitespace(int i, char *buf)
1349{
1350 while (i && buf[i] == ' ')
1351 i--;
1352 buf[i+1] = ' ';
1353 buf[i+2] = '\0';
1354 return i + 2;
1355}
1356
1357/**
1358 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1359 * @prefix: string to print at start of printk
1360 * @hostrcb: hostrcb pointer
1361 * @vpd: vendor/product id/sn struct
1362 *
1363 * Return value:
1364 * none
1365 **/
1366static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1367 struct ipr_vpd *vpd)
1368{
1369 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1370 int i = 0;
1371
1372 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1373 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1374
1375 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1376 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1377
1378 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1379 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1380
1381 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1382}
1383
1da177e4
LT
1384/**
1385 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1386 * @vpd: vendor/product id/sn struct
1da177e4
LT
1387 *
1388 * Return value:
1389 * none
1390 **/
cfc32139 1391static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1392{
1393 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1394 + IPR_SERIAL_NUM_LEN];
1395
cfc32139
BK
1396 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1397 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1398 IPR_PROD_ID_LEN);
1399 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1400 ipr_err("Vendor/Product ID: %s\n", buffer);
1401
cfc32139 1402 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1403 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1404 ipr_err(" Serial Number: %s\n", buffer);
1405}
1406
8cf093e2
BK
1407/**
1408 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1409 * @prefix: string to print at start of printk
1410 * @hostrcb: hostrcb pointer
1411 * @vpd: vendor/product id/sn/wwn struct
1412 *
1413 * Return value:
1414 * none
1415 **/
1416static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1417 struct ipr_ext_vpd *vpd)
1418{
1419 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1420 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1421 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1422}
1423
ee0f05b8
BK
1424/**
1425 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1426 * @vpd: vendor/product id/sn/wwn struct
1427 *
1428 * Return value:
1429 * none
1430 **/
1431static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1432{
1433 ipr_log_vpd(&vpd->vpd);
1434 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1435 be32_to_cpu(vpd->wwid[1]));
1436}
1437
1438/**
1439 * ipr_log_enhanced_cache_error - Log a cache error.
1440 * @ioa_cfg: ioa config struct
1441 * @hostrcb: hostrcb struct
1442 *
1443 * Return value:
1444 * none
1445 **/
1446static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1447 struct ipr_hostrcb *hostrcb)
1448{
4565e370
WB
1449 struct ipr_hostrcb_type_12_error *error;
1450
1451 if (ioa_cfg->sis64)
1452 error = &hostrcb->hcam.u.error64.u.type_12_error;
1453 else
1454 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1455
1456 ipr_err("-----Current Configuration-----\n");
1457 ipr_err("Cache Directory Card Information:\n");
1458 ipr_log_ext_vpd(&error->ioa_vpd);
1459 ipr_err("Adapter Card Information:\n");
1460 ipr_log_ext_vpd(&error->cfc_vpd);
1461
1462 ipr_err("-----Expected Configuration-----\n");
1463 ipr_err("Cache Directory Card Information:\n");
1464 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1465 ipr_err("Adapter Card Information:\n");
1466 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1467
1468 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1469 be32_to_cpu(error->ioa_data[0]),
1470 be32_to_cpu(error->ioa_data[1]),
1471 be32_to_cpu(error->ioa_data[2]));
1472}
1473
1da177e4
LT
1474/**
1475 * ipr_log_cache_error - Log a cache error.
1476 * @ioa_cfg: ioa config struct
1477 * @hostrcb: hostrcb struct
1478 *
1479 * Return value:
1480 * none
1481 **/
1482static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1483 struct ipr_hostrcb *hostrcb)
1484{
1485 struct ipr_hostrcb_type_02_error *error =
1486 &hostrcb->hcam.u.error.u.type_02_error;
1487
1488 ipr_err("-----Current Configuration-----\n");
1489 ipr_err("Cache Directory Card Information:\n");
cfc32139 1490 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1491 ipr_err("Adapter Card Information:\n");
cfc32139 1492 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1493
1494 ipr_err("-----Expected Configuration-----\n");
1495 ipr_err("Cache Directory Card Information:\n");
cfc32139 1496 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1497 ipr_err("Adapter Card Information:\n");
cfc32139 1498 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1499
1500 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1501 be32_to_cpu(error->ioa_data[0]),
1502 be32_to_cpu(error->ioa_data[1]),
1503 be32_to_cpu(error->ioa_data[2]));
1504}
1505
ee0f05b8
BK
1506/**
1507 * ipr_log_enhanced_config_error - Log a configuration error.
1508 * @ioa_cfg: ioa config struct
1509 * @hostrcb: hostrcb struct
1510 *
1511 * Return value:
1512 * none
1513 **/
1514static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1515 struct ipr_hostrcb *hostrcb)
1516{
1517 int errors_logged, i;
1518 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1519 struct ipr_hostrcb_type_13_error *error;
1520
1521 error = &hostrcb->hcam.u.error.u.type_13_error;
1522 errors_logged = be32_to_cpu(error->errors_logged);
1523
1524 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1525 be32_to_cpu(error->errors_detected), errors_logged);
1526
1527 dev_entry = error->dev;
1528
1529 for (i = 0; i < errors_logged; i++, dev_entry++) {
1530 ipr_err_separator;
1531
1532 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1533 ipr_log_ext_vpd(&dev_entry->vpd);
1534
1535 ipr_err("-----New Device Information-----\n");
1536 ipr_log_ext_vpd(&dev_entry->new_vpd);
1537
1538 ipr_err("Cache Directory Card Information:\n");
1539 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1540
1541 ipr_err("Adapter Card Information:\n");
1542 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1543 }
1544}
1545
4565e370
WB
1546/**
1547 * ipr_log_sis64_config_error - Log a device error.
1548 * @ioa_cfg: ioa config struct
1549 * @hostrcb: hostrcb struct
1550 *
1551 * Return value:
1552 * none
1553 **/
1554static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1555 struct ipr_hostrcb *hostrcb)
1556{
1557 int errors_logged, i;
1558 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1559 struct ipr_hostrcb_type_23_error *error;
1560 char buffer[IPR_MAX_RES_PATH_LENGTH];
1561
1562 error = &hostrcb->hcam.u.error64.u.type_23_error;
1563 errors_logged = be32_to_cpu(error->errors_logged);
1564
1565 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1566 be32_to_cpu(error->errors_detected), errors_logged);
1567
1568 dev_entry = error->dev;
1569
1570 for (i = 0; i < errors_logged; i++, dev_entry++) {
1571 ipr_err_separator;
1572
1573 ipr_err("Device %d : %s", i + 1,
1574 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1575 ipr_log_ext_vpd(&dev_entry->vpd);
1576
1577 ipr_err("-----New Device Information-----\n");
1578 ipr_log_ext_vpd(&dev_entry->new_vpd);
1579
1580 ipr_err("Cache Directory Card Information:\n");
1581 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1582
1583 ipr_err("Adapter Card Information:\n");
1584 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1585 }
1586}
1587
1da177e4
LT
1588/**
1589 * ipr_log_config_error - Log a configuration error.
1590 * @ioa_cfg: ioa config struct
1591 * @hostrcb: hostrcb struct
1592 *
1593 * Return value:
1594 * none
1595 **/
1596static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1597 struct ipr_hostrcb *hostrcb)
1598{
1599 int errors_logged, i;
1600 struct ipr_hostrcb_device_data_entry *dev_entry;
1601 struct ipr_hostrcb_type_03_error *error;
1602
1603 error = &hostrcb->hcam.u.error.u.type_03_error;
1604 errors_logged = be32_to_cpu(error->errors_logged);
1605
1606 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1607 be32_to_cpu(error->errors_detected), errors_logged);
1608
cfc32139 1609 dev_entry = error->dev;
1da177e4
LT
1610
1611 for (i = 0; i < errors_logged; i++, dev_entry++) {
1612 ipr_err_separator;
1613
fa15b1f6 1614 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1615 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1616
1617 ipr_err("-----New Device Information-----\n");
cfc32139 1618 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1619
1620 ipr_err("Cache Directory Card Information:\n");
cfc32139 1621 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1622
1623 ipr_err("Adapter Card Information:\n");
cfc32139 1624 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1625
1626 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1627 be32_to_cpu(dev_entry->ioa_data[0]),
1628 be32_to_cpu(dev_entry->ioa_data[1]),
1629 be32_to_cpu(dev_entry->ioa_data[2]),
1630 be32_to_cpu(dev_entry->ioa_data[3]),
1631 be32_to_cpu(dev_entry->ioa_data[4]));
1632 }
1633}
1634
ee0f05b8
BK
1635/**
1636 * ipr_log_enhanced_array_error - Log an array configuration error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1639 *
1640 * Return value:
1641 * none
1642 **/
1643static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1645{
1646 int i, num_entries;
1647 struct ipr_hostrcb_type_14_error *error;
1648 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1649 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1650
1651 error = &hostrcb->hcam.u.error.u.type_14_error;
1652
1653 ipr_err_separator;
1654
1655 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1656 error->protection_level,
1657 ioa_cfg->host->host_no,
1658 error->last_func_vset_res_addr.bus,
1659 error->last_func_vset_res_addr.target,
1660 error->last_func_vset_res_addr.lun);
1661
1662 ipr_err_separator;
1663
1664 array_entry = error->array_member;
1665 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1666 sizeof(error->array_member));
1667
1668 for (i = 0; i < num_entries; i++, array_entry++) {
1669 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1670 continue;
1671
1672 if (be32_to_cpu(error->exposed_mode_adn) == i)
1673 ipr_err("Exposed Array Member %d:\n", i);
1674 else
1675 ipr_err("Array Member %d:\n", i);
1676
1677 ipr_log_ext_vpd(&array_entry->vpd);
1678 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1679 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1680 "Expected Location");
1681
1682 ipr_err_separator;
1683 }
1684}
1685
1da177e4
LT
1686/**
1687 * ipr_log_array_error - Log an array configuration error.
1688 * @ioa_cfg: ioa config struct
1689 * @hostrcb: hostrcb struct
1690 *
1691 * Return value:
1692 * none
1693 **/
1694static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1695 struct ipr_hostrcb *hostrcb)
1696{
1697 int i;
1698 struct ipr_hostrcb_type_04_error *error;
1699 struct ipr_hostrcb_array_data_entry *array_entry;
1700 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1701
1702 error = &hostrcb->hcam.u.error.u.type_04_error;
1703
1704 ipr_err_separator;
1705
1706 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1707 error->protection_level,
1708 ioa_cfg->host->host_no,
1709 error->last_func_vset_res_addr.bus,
1710 error->last_func_vset_res_addr.target,
1711 error->last_func_vset_res_addr.lun);
1712
1713 ipr_err_separator;
1714
1715 array_entry = error->array_member;
1716
1717 for (i = 0; i < 18; i++) {
cfc32139 1718 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1719 continue;
1720
fa15b1f6 1721 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1722 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1723 else
1da177e4 1724 ipr_err("Array Member %d:\n", i);
1da177e4 1725
cfc32139 1726 ipr_log_vpd(&array_entry->vpd);
1da177e4 1727
fa15b1f6
BK
1728 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1729 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1730 "Expected Location");
1da177e4
LT
1731
1732 ipr_err_separator;
1733
1734 if (i == 9)
1735 array_entry = error->array_member2;
1736 else
1737 array_entry++;
1738 }
1739}
1740
1741/**
b0df54bb 1742 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1743 * @ioa_cfg: ioa config struct
b0df54bb
BK
1744 * @data: IOA error data
1745 * @len: data length
1da177e4
LT
1746 *
1747 * Return value:
1748 * none
1749 **/
ac719aba 1750static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1751{
1752 int i;
1da177e4 1753
b0df54bb 1754 if (len == 0)
1da177e4
LT
1755 return;
1756
ac719aba
BK
1757 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1758 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1759
b0df54bb 1760 for (i = 0; i < len / 4; i += 4) {
1da177e4 1761 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1762 be32_to_cpu(data[i]),
1763 be32_to_cpu(data[i+1]),
1764 be32_to_cpu(data[i+2]),
1765 be32_to_cpu(data[i+3]));
1da177e4
LT
1766 }
1767}
1768
ee0f05b8
BK
1769/**
1770 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1771 * @ioa_cfg: ioa config struct
1772 * @hostrcb: hostrcb struct
1773 *
1774 * Return value:
1775 * none
1776 **/
1777static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1778 struct ipr_hostrcb *hostrcb)
1779{
1780 struct ipr_hostrcb_type_17_error *error;
1781
4565e370
WB
1782 if (ioa_cfg->sis64)
1783 error = &hostrcb->hcam.u.error64.u.type_17_error;
1784 else
1785 error = &hostrcb->hcam.u.error.u.type_17_error;
1786
ee0f05b8 1787 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1788 strim(error->failure_reason);
ee0f05b8 1789
8cf093e2
BK
1790 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1791 be32_to_cpu(hostrcb->hcam.u.error.prc));
1792 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1793 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1794 be32_to_cpu(hostrcb->hcam.length) -
1795 (offsetof(struct ipr_hostrcb_error, u) +
1796 offsetof(struct ipr_hostrcb_type_17_error, data)));
1797}
1798
b0df54bb
BK
1799/**
1800 * ipr_log_dual_ioa_error - Log a dual adapter error.
1801 * @ioa_cfg: ioa config struct
1802 * @hostrcb: hostrcb struct
1803 *
1804 * Return value:
1805 * none
1806 **/
1807static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1808 struct ipr_hostrcb *hostrcb)
1809{
1810 struct ipr_hostrcb_type_07_error *error;
1811
1812 error = &hostrcb->hcam.u.error.u.type_07_error;
1813 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1814 strim(error->failure_reason);
b0df54bb 1815
8cf093e2
BK
1816 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1817 be32_to_cpu(hostrcb->hcam.u.error.prc));
1818 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1819 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1820 be32_to_cpu(hostrcb->hcam.length) -
1821 (offsetof(struct ipr_hostrcb_error, u) +
1822 offsetof(struct ipr_hostrcb_type_07_error, data)));
1823}
1824
49dc6a18
BK
1825static const struct {
1826 u8 active;
1827 char *desc;
1828} path_active_desc[] = {
1829 { IPR_PATH_NO_INFO, "Path" },
1830 { IPR_PATH_ACTIVE, "Active path" },
1831 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1832};
1833
1834static const struct {
1835 u8 state;
1836 char *desc;
1837} path_state_desc[] = {
1838 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1839 { IPR_PATH_HEALTHY, "is healthy" },
1840 { IPR_PATH_DEGRADED, "is degraded" },
1841 { IPR_PATH_FAILED, "is failed" }
1842};
1843
1844/**
1845 * ipr_log_fabric_path - Log a fabric path error
1846 * @hostrcb: hostrcb struct
1847 * @fabric: fabric descriptor
1848 *
1849 * Return value:
1850 * none
1851 **/
1852static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1853 struct ipr_hostrcb_fabric_desc *fabric)
1854{
1855 int i, j;
1856 u8 path_state = fabric->path_state;
1857 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1858 u8 state = path_state & IPR_PATH_STATE_MASK;
1859
1860 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1861 if (path_active_desc[i].active != active)
1862 continue;
1863
1864 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1865 if (path_state_desc[j].state != state)
1866 continue;
1867
1868 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1869 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1870 path_active_desc[i].desc, path_state_desc[j].desc,
1871 fabric->ioa_port);
1872 } else if (fabric->cascaded_expander == 0xff) {
1873 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1874 path_active_desc[i].desc, path_state_desc[j].desc,
1875 fabric->ioa_port, fabric->phy);
1876 } else if (fabric->phy == 0xff) {
1877 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1878 path_active_desc[i].desc, path_state_desc[j].desc,
1879 fabric->ioa_port, fabric->cascaded_expander);
1880 } else {
1881 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1882 path_active_desc[i].desc, path_state_desc[j].desc,
1883 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1884 }
1885 return;
1886 }
1887 }
1888
1889 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1890 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891}
1892
4565e370
WB
1893/**
1894 * ipr_log64_fabric_path - Log a fabric path error
1895 * @hostrcb: hostrcb struct
1896 * @fabric: fabric descriptor
1897 *
1898 * Return value:
1899 * none
1900 **/
1901static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1902 struct ipr_hostrcb64_fabric_desc *fabric)
1903{
1904 int i, j;
1905 u8 path_state = fabric->path_state;
1906 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1907 u8 state = path_state & IPR_PATH_STATE_MASK;
1908 char buffer[IPR_MAX_RES_PATH_LENGTH];
1909
1910 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1911 if (path_active_desc[i].active != active)
1912 continue;
1913
1914 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1915 if (path_state_desc[j].state != state)
1916 continue;
1917
1918 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1919 path_active_desc[i].desc, path_state_desc[j].desc,
1920 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1921 return;
1922 }
1923 }
1924
1925 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1926 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1927}
1928
49dc6a18
BK
1929static const struct {
1930 u8 type;
1931 char *desc;
1932} path_type_desc[] = {
1933 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1934 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1935 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1936 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1937};
1938
1939static const struct {
1940 u8 status;
1941 char *desc;
1942} path_status_desc[] = {
1943 { IPR_PATH_CFG_NO_PROB, "Functional" },
1944 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1945 { IPR_PATH_CFG_FAILED, "Failed" },
1946 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1947 { IPR_PATH_NOT_DETECTED, "Missing" },
1948 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1949};
1950
1951static const char *link_rate[] = {
1952 "unknown",
1953 "disabled",
1954 "phy reset problem",
1955 "spinup hold",
1956 "port selector",
1957 "unknown",
1958 "unknown",
1959 "unknown",
1960 "1.5Gbps",
1961 "3.0Gbps",
1962 "unknown",
1963 "unknown",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "unknown"
1968};
1969
1970/**
1971 * ipr_log_path_elem - Log a fabric path element.
1972 * @hostrcb: hostrcb struct
1973 * @cfg: fabric path element struct
1974 *
1975 * Return value:
1976 * none
1977 **/
1978static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1979 struct ipr_hostrcb_config_element *cfg)
1980{
1981 int i, j;
1982 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1983 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1984
1985 if (type == IPR_PATH_CFG_NOT_EXIST)
1986 return;
1987
1988 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1989 if (path_type_desc[i].type != type)
1990 continue;
1991
1992 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1993 if (path_status_desc[j].status != status)
1994 continue;
1995
1996 if (type == IPR_PATH_CFG_IOA_PORT) {
1997 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1998 path_status_desc[j].desc, path_type_desc[i].desc,
1999 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2000 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2001 } else {
2002 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2003 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2004 path_status_desc[j].desc, path_type_desc[i].desc,
2005 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2006 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2007 } else if (cfg->cascaded_expander == 0xff) {
2008 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2009 "WWN=%08X%08X\n", path_status_desc[j].desc,
2010 path_type_desc[i].desc, cfg->phy,
2011 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2012 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2013 } else if (cfg->phy == 0xff) {
2014 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2015 "WWN=%08X%08X\n", path_status_desc[j].desc,
2016 path_type_desc[i].desc, cfg->cascaded_expander,
2017 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2018 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2019 } else {
2020 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2021 "WWN=%08X%08X\n", path_status_desc[j].desc,
2022 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2023 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2024 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2025 }
2026 }
2027 return;
2028 }
2029 }
2030
2031 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2032 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2033 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2034 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2035}
2036
4565e370
WB
2037/**
2038 * ipr_log64_path_elem - Log a fabric path element.
2039 * @hostrcb: hostrcb struct
2040 * @cfg: fabric path element struct
2041 *
2042 * Return value:
2043 * none
2044 **/
2045static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2046 struct ipr_hostrcb64_config_element *cfg)
2047{
2048 int i, j;
2049 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2050 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2051 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2052 char buffer[IPR_MAX_RES_PATH_LENGTH];
2053
2054 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2055 return;
2056
2057 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2058 if (path_type_desc[i].type != type)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2062 if (path_status_desc[j].status != status)
2063 continue;
2064
2065 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2066 path_status_desc[j].desc, path_type_desc[i].desc,
2067 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2068 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2069 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2070 return;
2071 }
2072 }
2073 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2074 "WWN=%08X%08X\n", cfg->type_status,
2075 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078}
2079
49dc6a18
BK
2080/**
2081 * ipr_log_fabric_error - Log a fabric error.
2082 * @ioa_cfg: ioa config struct
2083 * @hostrcb: hostrcb struct
2084 *
2085 * Return value:
2086 * none
2087 **/
2088static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2089 struct ipr_hostrcb *hostrcb)
2090{
2091 struct ipr_hostrcb_type_20_error *error;
2092 struct ipr_hostrcb_fabric_desc *fabric;
2093 struct ipr_hostrcb_config_element *cfg;
2094 int i, add_len;
2095
2096 error = &hostrcb->hcam.u.error.u.type_20_error;
2097 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2098 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2099
2100 add_len = be32_to_cpu(hostrcb->hcam.length) -
2101 (offsetof(struct ipr_hostrcb_error, u) +
2102 offsetof(struct ipr_hostrcb_type_20_error, desc));
2103
2104 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2105 ipr_log_fabric_path(hostrcb, fabric);
2106 for_each_fabric_cfg(fabric, cfg)
2107 ipr_log_path_elem(hostrcb, cfg);
2108
2109 add_len -= be16_to_cpu(fabric->length);
2110 fabric = (struct ipr_hostrcb_fabric_desc *)
2111 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2112 }
2113
ac719aba 2114 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2115}
2116
4565e370
WB
2117/**
2118 * ipr_log_sis64_array_error - Log a sis64 array error.
2119 * @ioa_cfg: ioa config struct
2120 * @hostrcb: hostrcb struct
2121 *
2122 * Return value:
2123 * none
2124 **/
2125static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2126 struct ipr_hostrcb *hostrcb)
2127{
2128 int i, num_entries;
2129 struct ipr_hostrcb_type_24_error *error;
2130 struct ipr_hostrcb64_array_data_entry *array_entry;
2131 char buffer[IPR_MAX_RES_PATH_LENGTH];
2132 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2133
2134 error = &hostrcb->hcam.u.error64.u.type_24_error;
2135
2136 ipr_err_separator;
2137
2138 ipr_err("RAID %s Array Configuration: %s\n",
2139 error->protection_level,
2140 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2141
2142 ipr_err_separator;
2143
2144 array_entry = error->array_member;
2145 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2146 sizeof(error->array_member));
2147
2148 for (i = 0; i < num_entries; i++, array_entry++) {
2149
2150 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2151 continue;
2152
2153 if (error->exposed_mode_adn == i)
2154 ipr_err("Exposed Array Member %d:\n", i);
2155 else
2156 ipr_err("Array Member %d:\n", i);
2157
2158 ipr_err("Array Member %d:\n", i);
2159 ipr_log_ext_vpd(&array_entry->vpd);
2160 ipr_err("Current Location: %s",
2161 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2162 ipr_err("Expected Location: %s",
2163 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2164
2165 ipr_err_separator;
2166 }
2167}
2168
2169/**
2170 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2171 * @ioa_cfg: ioa config struct
2172 * @hostrcb: hostrcb struct
2173 *
2174 * Return value:
2175 * none
2176 **/
2177static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2178 struct ipr_hostrcb *hostrcb)
2179{
2180 struct ipr_hostrcb_type_30_error *error;
2181 struct ipr_hostrcb64_fabric_desc *fabric;
2182 struct ipr_hostrcb64_config_element *cfg;
2183 int i, add_len;
2184
2185 error = &hostrcb->hcam.u.error64.u.type_30_error;
2186
2187 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2188 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2189
2190 add_len = be32_to_cpu(hostrcb->hcam.length) -
2191 (offsetof(struct ipr_hostrcb64_error, u) +
2192 offsetof(struct ipr_hostrcb_type_30_error, desc));
2193
2194 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2195 ipr_log64_fabric_path(hostrcb, fabric);
2196 for_each_fabric_cfg(fabric, cfg)
2197 ipr_log64_path_elem(hostrcb, cfg);
2198
2199 add_len -= be16_to_cpu(fabric->length);
2200 fabric = (struct ipr_hostrcb64_fabric_desc *)
2201 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2202 }
2203
2204 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2205}
2206
b0df54bb
BK
2207/**
2208 * ipr_log_generic_error - Log an adapter error.
2209 * @ioa_cfg: ioa config struct
2210 * @hostrcb: hostrcb struct
2211 *
2212 * Return value:
2213 * none
2214 **/
2215static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2216 struct ipr_hostrcb *hostrcb)
2217{
ac719aba 2218 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2219 be32_to_cpu(hostrcb->hcam.length));
2220}
2221
1da177e4
LT
2222/**
2223 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2224 * @ioasc: IOASC
2225 *
2226 * This function will return the index of into the ipr_error_table
2227 * for the specified IOASC. If the IOASC is not in the table,
2228 * 0 will be returned, which points to the entry used for unknown errors.
2229 *
2230 * Return value:
2231 * index into the ipr_error_table
2232 **/
2233static u32 ipr_get_error(u32 ioasc)
2234{
2235 int i;
2236
2237 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2238 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2239 return i;
2240
2241 return 0;
2242}
2243
2244/**
2245 * ipr_handle_log_data - Log an adapter error.
2246 * @ioa_cfg: ioa config struct
2247 * @hostrcb: hostrcb struct
2248 *
2249 * This function logs an adapter error to the system.
2250 *
2251 * Return value:
2252 * none
2253 **/
2254static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2255 struct ipr_hostrcb *hostrcb)
2256{
2257 u32 ioasc;
2258 int error_index;
2259
2260 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2261 return;
2262
2263 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2264 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2265
4565e370
WB
2266 if (ioa_cfg->sis64)
2267 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2268 else
2269 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2270
4565e370
WB
2271 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2272 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2273 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2274 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2275 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2276 }
2277
2278 error_index = ipr_get_error(ioasc);
2279
2280 if (!ipr_error_table[error_index].log_hcam)
2281 return;
2282
49dc6a18 2283 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2284
2285 /* Set indication we have logged an error */
2286 ioa_cfg->errors_logged++;
2287
933916f3 2288 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2289 return;
cf852037
BK
2290 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2291 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2292
2293 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2294 case IPR_HOST_RCB_OVERLAY_ID_2:
2295 ipr_log_cache_error(ioa_cfg, hostrcb);
2296 break;
2297 case IPR_HOST_RCB_OVERLAY_ID_3:
2298 ipr_log_config_error(ioa_cfg, hostrcb);
2299 break;
2300 case IPR_HOST_RCB_OVERLAY_ID_4:
2301 case IPR_HOST_RCB_OVERLAY_ID_6:
2302 ipr_log_array_error(ioa_cfg, hostrcb);
2303 break;
b0df54bb
BK
2304 case IPR_HOST_RCB_OVERLAY_ID_7:
2305 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2306 break;
ee0f05b8
BK
2307 case IPR_HOST_RCB_OVERLAY_ID_12:
2308 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2309 break;
2310 case IPR_HOST_RCB_OVERLAY_ID_13:
2311 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2312 break;
2313 case IPR_HOST_RCB_OVERLAY_ID_14:
2314 case IPR_HOST_RCB_OVERLAY_ID_16:
2315 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2316 break;
2317 case IPR_HOST_RCB_OVERLAY_ID_17:
2318 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2319 break;
49dc6a18
BK
2320 case IPR_HOST_RCB_OVERLAY_ID_20:
2321 ipr_log_fabric_error(ioa_cfg, hostrcb);
2322 break;
4565e370
WB
2323 case IPR_HOST_RCB_OVERLAY_ID_23:
2324 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2325 break;
2326 case IPR_HOST_RCB_OVERLAY_ID_24:
2327 case IPR_HOST_RCB_OVERLAY_ID_26:
2328 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2329 break;
2330 case IPR_HOST_RCB_OVERLAY_ID_30:
2331 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2332 break;
cf852037 2333 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2334 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2335 default:
a9cfca96 2336 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2337 break;
2338 }
2339}
2340
2341/**
2342 * ipr_process_error - Op done function for an adapter error log.
2343 * @ipr_cmd: ipr command struct
2344 *
2345 * This function is the op done function for an error log host
2346 * controlled async from the adapter. It will log the error and
2347 * send the HCAM back to the adapter.
2348 *
2349 * Return value:
2350 * none
2351 **/
2352static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2353{
2354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2355 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2356 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4565e370
WB
2357 u32 fd_ioasc;
2358
2359 if (ioa_cfg->sis64)
2360 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2361 else
2362 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2363
2364 list_del(&hostrcb->queue);
2365 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2366
2367 if (!ioasc) {
2368 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2369 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2370 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2371 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2372 dev_err(&ioa_cfg->pdev->dev,
2373 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2374 }
2375
2376 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2377}
2378
2379/**
2380 * ipr_timeout - An internally generated op has timed out.
2381 * @ipr_cmd: ipr command struct
2382 *
2383 * This function blocks host requests and initiates an
2384 * adapter reset.
2385 *
2386 * Return value:
2387 * none
2388 **/
2389static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2390{
2391 unsigned long lock_flags = 0;
2392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2393
2394 ENTER;
2395 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2396
2397 ioa_cfg->errors_logged++;
2398 dev_err(&ioa_cfg->pdev->dev,
2399 "Adapter being reset due to command timeout.\n");
2400
2401 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2402 ioa_cfg->sdt_state = GET_DUMP;
2403
2404 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2406
2407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2408 LEAVE;
2409}
2410
2411/**
2412 * ipr_oper_timeout - Adapter timed out transitioning to operational
2413 * @ipr_cmd: ipr command struct
2414 *
2415 * This function blocks host requests and initiates an
2416 * adapter reset.
2417 *
2418 * Return value:
2419 * none
2420 **/
2421static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2422{
2423 unsigned long lock_flags = 0;
2424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2425
2426 ENTER;
2427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2428
2429 ioa_cfg->errors_logged++;
2430 dev_err(&ioa_cfg->pdev->dev,
2431 "Adapter timed out transitioning to operational.\n");
2432
2433 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2434 ioa_cfg->sdt_state = GET_DUMP;
2435
2436 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2437 if (ipr_fastfail)
2438 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2439 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2440 }
2441
2442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2443 LEAVE;
2444}
2445
2446/**
2447 * ipr_reset_reload - Reset/Reload the IOA
2448 * @ioa_cfg: ioa config struct
2449 * @shutdown_type: shutdown type
2450 *
2451 * This function resets the adapter and re-initializes it.
2452 * This function assumes that all new host commands have been stopped.
2453 * Return value:
2454 * SUCCESS / FAILED
2455 **/
2456static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2457 enum ipr_shutdown_type shutdown_type)
2458{
2459 if (!ioa_cfg->in_reset_reload)
2460 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2461
2462 spin_unlock_irq(ioa_cfg->host->host_lock);
2463 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2464 spin_lock_irq(ioa_cfg->host->host_lock);
2465
2466 /* If we got hit with a host reset while we were already resetting
2467 the adapter for some reason, and the reset failed. */
2468 if (ioa_cfg->ioa_is_dead) {
2469 ipr_trace;
2470 return FAILED;
2471 }
2472
2473 return SUCCESS;
2474}
2475
2476/**
2477 * ipr_find_ses_entry - Find matching SES in SES table
2478 * @res: resource entry struct of SES
2479 *
2480 * Return value:
2481 * pointer to SES table entry / NULL on failure
2482 **/
2483static const struct ipr_ses_table_entry *
2484ipr_find_ses_entry(struct ipr_resource_entry *res)
2485{
2486 int i, j, matches;
3e7ebdfa 2487 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2488 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2489
2490 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2491 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2492 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2493 vpids = &res->std_inq_data.vpids;
2494 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2495 matches++;
2496 else
2497 break;
2498 } else
2499 matches++;
2500 }
2501
2502 if (matches == IPR_PROD_ID_LEN)
2503 return ste;
2504 }
2505
2506 return NULL;
2507}
2508
2509/**
2510 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2511 * @ioa_cfg: ioa config struct
2512 * @bus: SCSI bus
2513 * @bus_width: bus width
2514 *
2515 * Return value:
2516 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2517 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2518 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2519 * max 160MHz = max 320MB/sec).
2520 **/
2521static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2522{
2523 struct ipr_resource_entry *res;
2524 const struct ipr_ses_table_entry *ste;
2525 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2526
2527 /* Loop through each config table entry in the config table buffer */
2528 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2529 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2530 continue;
2531
3e7ebdfa 2532 if (bus != res->bus)
1da177e4
LT
2533 continue;
2534
2535 if (!(ste = ipr_find_ses_entry(res)))
2536 continue;
2537
2538 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2539 }
2540
2541 return max_xfer_rate;
2542}
2543
2544/**
2545 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2546 * @ioa_cfg: ioa config struct
2547 * @max_delay: max delay in micro-seconds to wait
2548 *
2549 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2550 *
2551 * Return value:
2552 * 0 on success / other on failure
2553 **/
2554static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2555{
2556 volatile u32 pcii_reg;
2557 int delay = 1;
2558
2559 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2560 while (delay < max_delay) {
2561 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2562
2563 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2564 return 0;
2565
2566 /* udelay cannot be used if delay is more than a few milliseconds */
2567 if ((delay / 1000) > MAX_UDELAY_MS)
2568 mdelay(delay / 1000);
2569 else
2570 udelay(delay);
2571
2572 delay += delay;
2573 }
2574 return -EIO;
2575}
2576
dcbad00e
WB
2577/**
2578 * ipr_get_sis64_dump_data_section - Dump IOA memory
2579 * @ioa_cfg: ioa config struct
2580 * @start_addr: adapter address to dump
2581 * @dest: destination kernel buffer
2582 * @length_in_words: length to dump in 4 byte words
2583 *
2584 * Return value:
2585 * 0 on success
2586 **/
2587static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2588 u32 start_addr,
2589 __be32 *dest, u32 length_in_words)
2590{
2591 int i;
2592
2593 for (i = 0; i < length_in_words; i++) {
2594 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2595 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2596 dest++;
2597 }
2598
2599 return 0;
2600}
2601
1da177e4
LT
2602/**
2603 * ipr_get_ldump_data_section - Dump IOA memory
2604 * @ioa_cfg: ioa config struct
2605 * @start_addr: adapter address to dump
2606 * @dest: destination kernel buffer
2607 * @length_in_words: length to dump in 4 byte words
2608 *
2609 * Return value:
2610 * 0 on success / -EIO on failure
2611 **/
2612static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2613 u32 start_addr,
2614 __be32 *dest, u32 length_in_words)
2615{
2616 volatile u32 temp_pcii_reg;
2617 int i, delay = 0;
2618
dcbad00e
WB
2619 if (ioa_cfg->sis64)
2620 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2621 dest, length_in_words);
2622
1da177e4
LT
2623 /* Write IOA interrupt reg starting LDUMP state */
2624 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2625 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2626
2627 /* Wait for IO debug acknowledge */
2628 if (ipr_wait_iodbg_ack(ioa_cfg,
2629 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2630 dev_err(&ioa_cfg->pdev->dev,
2631 "IOA dump long data transfer timeout\n");
2632 return -EIO;
2633 }
2634
2635 /* Signal LDUMP interlocked - clear IO debug ack */
2636 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2637 ioa_cfg->regs.clr_interrupt_reg);
2638
2639 /* Write Mailbox with starting address */
2640 writel(start_addr, ioa_cfg->ioa_mailbox);
2641
2642 /* Signal address valid - clear IOA Reset alert */
2643 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2644 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2645
2646 for (i = 0; i < length_in_words; i++) {
2647 /* Wait for IO debug acknowledge */
2648 if (ipr_wait_iodbg_ack(ioa_cfg,
2649 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2650 dev_err(&ioa_cfg->pdev->dev,
2651 "IOA dump short data transfer timeout\n");
2652 return -EIO;
2653 }
2654
2655 /* Read data from mailbox and increment destination pointer */
2656 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2657 dest++;
2658
2659 /* For all but the last word of data, signal data received */
2660 if (i < (length_in_words - 1)) {
2661 /* Signal dump data received - Clear IO debug Ack */
2662 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2663 ioa_cfg->regs.clr_interrupt_reg);
2664 }
2665 }
2666
2667 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2668 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2669 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2670
2671 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2672 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2673
2674 /* Signal dump data received - Clear IO debug Ack */
2675 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2676 ioa_cfg->regs.clr_interrupt_reg);
2677
2678 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2679 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2680 temp_pcii_reg =
214777ba 2681 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2682
2683 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2684 return 0;
2685
2686 udelay(10);
2687 delay += 10;
2688 }
2689
2690 return 0;
2691}
2692
2693#ifdef CONFIG_SCSI_IPR_DUMP
2694/**
2695 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2696 * @ioa_cfg: ioa config struct
2697 * @pci_address: adapter address
2698 * @length: length of data to copy
2699 *
2700 * Copy data from PCI adapter to kernel buffer.
2701 * Note: length MUST be a 4 byte multiple
2702 * Return value:
2703 * 0 on success / other on failure
2704 **/
2705static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2706 unsigned long pci_address, u32 length)
2707{
2708 int bytes_copied = 0;
2709 int cur_len, rc, rem_len, rem_page_len;
2710 __be32 *page;
2711 unsigned long lock_flags = 0;
2712 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2713
2714 while (bytes_copied < length &&
2715 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2716 if (ioa_dump->page_offset >= PAGE_SIZE ||
2717 ioa_dump->page_offset == 0) {
2718 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2719
2720 if (!page) {
2721 ipr_trace;
2722 return bytes_copied;
2723 }
2724
2725 ioa_dump->page_offset = 0;
2726 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2727 ioa_dump->next_page_index++;
2728 } else
2729 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2730
2731 rem_len = length - bytes_copied;
2732 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2733 cur_len = min(rem_len, rem_page_len);
2734
2735 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2736 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2737 rc = -EIO;
2738 } else {
2739 rc = ipr_get_ldump_data_section(ioa_cfg,
2740 pci_address + bytes_copied,
2741 &page[ioa_dump->page_offset / 4],
2742 (cur_len / sizeof(u32)));
2743 }
2744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2745
2746 if (!rc) {
2747 ioa_dump->page_offset += cur_len;
2748 bytes_copied += cur_len;
2749 } else {
2750 ipr_trace;
2751 break;
2752 }
2753 schedule();
2754 }
2755
2756 return bytes_copied;
2757}
2758
2759/**
2760 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2761 * @hdr: dump entry header struct
2762 *
2763 * Return value:
2764 * nothing
2765 **/
2766static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2767{
2768 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2769 hdr->num_elems = 1;
2770 hdr->offset = sizeof(*hdr);
2771 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2772}
2773
2774/**
2775 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2776 * @ioa_cfg: ioa config struct
2777 * @driver_dump: driver dump struct
2778 *
2779 * Return value:
2780 * nothing
2781 **/
2782static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2783 struct ipr_driver_dump *driver_dump)
2784{
2785 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2786
2787 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2788 driver_dump->ioa_type_entry.hdr.len =
2789 sizeof(struct ipr_dump_ioa_type_entry) -
2790 sizeof(struct ipr_dump_entry_header);
2791 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2792 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2793 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2794 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2795 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2796 ucode_vpd->minor_release[1];
2797 driver_dump->hdr.num_entries++;
2798}
2799
2800/**
2801 * ipr_dump_version_data - Fill in the driver version in the dump.
2802 * @ioa_cfg: ioa config struct
2803 * @driver_dump: driver dump struct
2804 *
2805 * Return value:
2806 * nothing
2807 **/
2808static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2809 struct ipr_driver_dump *driver_dump)
2810{
2811 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2812 driver_dump->version_entry.hdr.len =
2813 sizeof(struct ipr_dump_version_entry) -
2814 sizeof(struct ipr_dump_entry_header);
2815 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2816 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2817 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2818 driver_dump->hdr.num_entries++;
2819}
2820
2821/**
2822 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2823 * @ioa_cfg: ioa config struct
2824 * @driver_dump: driver dump struct
2825 *
2826 * Return value:
2827 * nothing
2828 **/
2829static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2830 struct ipr_driver_dump *driver_dump)
2831{
2832 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2833 driver_dump->trace_entry.hdr.len =
2834 sizeof(struct ipr_dump_trace_entry) -
2835 sizeof(struct ipr_dump_entry_header);
2836 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2837 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2838 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2839 driver_dump->hdr.num_entries++;
2840}
2841
2842/**
2843 * ipr_dump_location_data - Fill in the IOA location in the dump.
2844 * @ioa_cfg: ioa config struct
2845 * @driver_dump: driver dump struct
2846 *
2847 * Return value:
2848 * nothing
2849 **/
2850static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2851 struct ipr_driver_dump *driver_dump)
2852{
2853 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2854 driver_dump->location_entry.hdr.len =
2855 sizeof(struct ipr_dump_location_entry) -
2856 sizeof(struct ipr_dump_entry_header);
2857 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2858 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2859 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2860 driver_dump->hdr.num_entries++;
2861}
2862
2863/**
2864 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2865 * @ioa_cfg: ioa config struct
2866 * @dump: dump struct
2867 *
2868 * Return value:
2869 * nothing
2870 **/
2871static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2872{
2873 unsigned long start_addr, sdt_word;
2874 unsigned long lock_flags = 0;
2875 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2876 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2877 u32 num_entries, start_off, end_off;
2878 u32 bytes_to_copy, bytes_copied, rc;
2879 struct ipr_sdt *sdt;
dcbad00e 2880 int valid = 1;
1da177e4
LT
2881 int i;
2882
2883 ENTER;
2884
2885 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2886
2887 if (ioa_cfg->sdt_state != GET_DUMP) {
2888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2889 return;
2890 }
2891
2892 start_addr = readl(ioa_cfg->ioa_mailbox);
2893
dcbad00e 2894 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2895 dev_err(&ioa_cfg->pdev->dev,
2896 "Invalid dump table format: %lx\n", start_addr);
2897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2898 return;
2899 }
2900
2901 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2902
2903 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2904
2905 /* Initialize the overall dump header */
2906 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2907 driver_dump->hdr.num_entries = 1;
2908 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2909 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2910 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2911 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2912
2913 ipr_dump_version_data(ioa_cfg, driver_dump);
2914 ipr_dump_location_data(ioa_cfg, driver_dump);
2915 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2916 ipr_dump_trace_data(ioa_cfg, driver_dump);
2917
2918 /* Update dump_header */
2919 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2920
2921 /* IOA Dump entry */
2922 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2923 ioa_dump->hdr.len = 0;
2924 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2925 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2926
2927 /* First entries in sdt are actually a list of dump addresses and
2928 lengths to gather the real dump data. sdt represents the pointer
2929 to the ioa generated dump table. Dump data will be extracted based
2930 on entries in this table */
2931 sdt = &ioa_dump->sdt;
2932
2933 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2934 sizeof(struct ipr_sdt) / sizeof(__be32));
2935
2936 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2937 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2938 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2939 dev_err(&ioa_cfg->pdev->dev,
2940 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2941 rc, be32_to_cpu(sdt->hdr.state));
2942 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2943 ioa_cfg->sdt_state = DUMP_OBTAINED;
2944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2945 return;
2946 }
2947
2948 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2949
2950 if (num_entries > IPR_NUM_SDT_ENTRIES)
2951 num_entries = IPR_NUM_SDT_ENTRIES;
2952
2953 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2954
2955 for (i = 0; i < num_entries; i++) {
2956 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2957 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2958 break;
2959 }
2960
2961 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2962 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2963 if (ioa_cfg->sis64)
2964 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2965 else {
2966 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2967 end_off = be32_to_cpu(sdt->entry[i].end_token);
2968
2969 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2970 bytes_to_copy = end_off - start_off;
2971 else
2972 valid = 0;
2973 }
2974 if (valid) {
1da177e4
LT
2975 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2976 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2977 continue;
2978 }
2979
2980 /* Copy data from adapter to driver buffers */
2981 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2982 bytes_to_copy);
2983
2984 ioa_dump->hdr.len += bytes_copied;
2985
2986 if (bytes_copied != bytes_to_copy) {
2987 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2988 break;
2989 }
2990 }
2991 }
2992 }
2993
2994 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2995
2996 /* Update dump_header */
2997 driver_dump->hdr.len += ioa_dump->hdr.len;
2998 wmb();
2999 ioa_cfg->sdt_state = DUMP_OBTAINED;
3000 LEAVE;
3001}
3002
3003#else
3004#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3005#endif
3006
3007/**
3008 * ipr_release_dump - Free adapter dump memory
3009 * @kref: kref struct
3010 *
3011 * Return value:
3012 * nothing
3013 **/
3014static void ipr_release_dump(struct kref *kref)
3015{
3016 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3017 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3018 unsigned long lock_flags = 0;
3019 int i;
3020
3021 ENTER;
3022 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3023 ioa_cfg->dump = NULL;
3024 ioa_cfg->sdt_state = INACTIVE;
3025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3026
3027 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3028 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3029
3030 kfree(dump);
3031 LEAVE;
3032}
3033
3034/**
3035 * ipr_worker_thread - Worker thread
c4028958 3036 * @work: ioa config struct
1da177e4
LT
3037 *
3038 * Called at task level from a work thread. This function takes care
3039 * of adding and removing device from the mid-layer as configuration
3040 * changes are detected by the adapter.
3041 *
3042 * Return value:
3043 * nothing
3044 **/
c4028958 3045static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3046{
3047 unsigned long lock_flags;
3048 struct ipr_resource_entry *res;
3049 struct scsi_device *sdev;
3050 struct ipr_dump *dump;
c4028958
DH
3051 struct ipr_ioa_cfg *ioa_cfg =
3052 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3053 u8 bus, target, lun;
3054 int did_work;
3055
3056 ENTER;
3057 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058
3059 if (ioa_cfg->sdt_state == GET_DUMP) {
3060 dump = ioa_cfg->dump;
3061 if (!dump) {
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3063 return;
3064 }
3065 kref_get(&dump->kref);
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 ipr_get_ioa_dump(ioa_cfg, dump);
3068 kref_put(&dump->kref, ipr_release_dump);
3069
3070 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3071 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3072 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 return;
3075 }
3076
3077restart:
3078 do {
3079 did_work = 0;
3080 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082 return;
3083 }
3084
3085 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3086 if (res->del_from_ml && res->sdev) {
3087 did_work = 1;
3088 sdev = res->sdev;
3089 if (!scsi_device_get(sdev)) {
1da177e4
LT
3090 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 scsi_remove_device(sdev);
3093 scsi_device_put(sdev);
3094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3095 }
3096 break;
3097 }
3098 }
3099 } while(did_work);
3100
3101 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3102 if (res->add_to_ml) {
3e7ebdfa
WB
3103 bus = res->bus;
3104 target = res->target;
3105 lun = res->lun;
1121b794 3106 res->add_to_ml = 0;
1da177e4
LT
3107 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3108 scsi_add_device(ioa_cfg->host, bus, target, lun);
3109 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3110 goto restart;
3111 }
3112 }
3113
3114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3115 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3116 LEAVE;
3117}
3118
3119#ifdef CONFIG_SCSI_IPR_TRACE
3120/**
3121 * ipr_read_trace - Dump the adapter trace
3122 * @kobj: kobject struct
91a69029 3123 * @bin_attr: bin_attribute struct
1da177e4
LT
3124 * @buf: buffer
3125 * @off: offset
3126 * @count: buffer size
3127 *
3128 * Return value:
3129 * number of bytes printed to buffer
3130 **/
91a69029
ZR
3131static ssize_t ipr_read_trace(struct kobject *kobj,
3132 struct bin_attribute *bin_attr,
3133 char *buf, loff_t off, size_t count)
1da177e4 3134{
ee959b00
TJ
3135 struct device *dev = container_of(kobj, struct device, kobj);
3136 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3137 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3138 unsigned long lock_flags = 0;
d777aaf3 3139 ssize_t ret;
1da177e4
LT
3140
3141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3142 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3143 IPR_TRACE_SIZE);
1da177e4 3144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3145
3146 return ret;
1da177e4
LT
3147}
3148
3149static struct bin_attribute ipr_trace_attr = {
3150 .attr = {
3151 .name = "trace",
3152 .mode = S_IRUGO,
3153 },
3154 .size = 0,
3155 .read = ipr_read_trace,
3156};
3157#endif
3158
3159/**
3160 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3161 * @dev: class device struct
3162 * @buf: buffer
1da177e4
LT
3163 *
3164 * Return value:
3165 * number of bytes printed to buffer
3166 **/
ee959b00
TJ
3167static ssize_t ipr_show_fw_version(struct device *dev,
3168 struct device_attribute *attr, char *buf)
1da177e4 3169{
ee959b00 3170 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3172 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3173 unsigned long lock_flags = 0;
3174 int len;
3175
3176 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3177 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3178 ucode_vpd->major_release, ucode_vpd->card_type,
3179 ucode_vpd->minor_release[0],
3180 ucode_vpd->minor_release[1]);
3181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3182 return len;
3183}
3184
ee959b00 3185static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3186 .attr = {
3187 .name = "fw_version",
3188 .mode = S_IRUGO,
3189 },
3190 .show = ipr_show_fw_version,
3191};
3192
3193/**
3194 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3195 * @dev: class device struct
3196 * @buf: buffer
1da177e4
LT
3197 *
3198 * Return value:
3199 * number of bytes printed to buffer
3200 **/
ee959b00
TJ
3201static ssize_t ipr_show_log_level(struct device *dev,
3202 struct device_attribute *attr, char *buf)
1da177e4 3203{
ee959b00 3204 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3205 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3206 unsigned long lock_flags = 0;
3207 int len;
3208
3209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3210 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 return len;
3213}
3214
3215/**
3216 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3217 * @dev: class device struct
3218 * @buf: buffer
1da177e4
LT
3219 *
3220 * Return value:
3221 * number of bytes printed to buffer
3222 **/
ee959b00
TJ
3223static ssize_t ipr_store_log_level(struct device *dev,
3224 struct device_attribute *attr,
1da177e4
LT
3225 const char *buf, size_t count)
3226{
ee959b00 3227 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3228 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3229 unsigned long lock_flags = 0;
3230
3231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3232 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 return strlen(buf);
3235}
3236
ee959b00 3237static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3238 .attr = {
3239 .name = "log_level",
3240 .mode = S_IRUGO | S_IWUSR,
3241 },
3242 .show = ipr_show_log_level,
3243 .store = ipr_store_log_level
3244};
3245
3246/**
3247 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3248 * @dev: device struct
3249 * @buf: buffer
3250 * @count: buffer size
1da177e4
LT
3251 *
3252 * This function will reset the adapter and wait a reasonable
3253 * amount of time for any errors that the adapter might log.
3254 *
3255 * Return value:
3256 * count on success / other on failure
3257 **/
ee959b00
TJ
3258static ssize_t ipr_store_diagnostics(struct device *dev,
3259 struct device_attribute *attr,
1da177e4
LT
3260 const char *buf, size_t count)
3261{
ee959b00 3262 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3264 unsigned long lock_flags = 0;
3265 int rc = count;
3266
3267 if (!capable(CAP_SYS_ADMIN))
3268 return -EACCES;
3269
1da177e4 3270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3271 while(ioa_cfg->in_reset_reload) {
3272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3273 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3274 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3275 }
3276
1da177e4
LT
3277 ioa_cfg->errors_logged = 0;
3278 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3279
3280 if (ioa_cfg->in_reset_reload) {
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3283
3284 /* Wait for a second for any errors to be logged */
3285 msleep(1000);
3286 } else {
3287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3288 return -EIO;
3289 }
3290
3291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3293 rc = -EIO;
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295
3296 return rc;
3297}
3298
ee959b00 3299static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3300 .attr = {
3301 .name = "run_diagnostics",
3302 .mode = S_IWUSR,
3303 },
3304 .store = ipr_store_diagnostics
3305};
3306
f37eb54b
BK
3307/**
3308 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3309 * @class_dev: device struct
3310 * @buf: buffer
f37eb54b
BK
3311 *
3312 * Return value:
3313 * number of bytes printed to buffer
3314 **/
ee959b00
TJ
3315static ssize_t ipr_show_adapter_state(struct device *dev,
3316 struct device_attribute *attr, char *buf)
f37eb54b 3317{
ee959b00 3318 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3319 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3320 unsigned long lock_flags = 0;
3321 int len;
3322
3323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3324 if (ioa_cfg->ioa_is_dead)
3325 len = snprintf(buf, PAGE_SIZE, "offline\n");
3326 else
3327 len = snprintf(buf, PAGE_SIZE, "online\n");
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 return len;
3330}
3331
3332/**
3333 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3334 * @dev: device struct
3335 * @buf: buffer
3336 * @count: buffer size
f37eb54b
BK
3337 *
3338 * This function will change the adapter's state.
3339 *
3340 * Return value:
3341 * count on success / other on failure
3342 **/
ee959b00
TJ
3343static ssize_t ipr_store_adapter_state(struct device *dev,
3344 struct device_attribute *attr,
f37eb54b
BK
3345 const char *buf, size_t count)
3346{
ee959b00 3347 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3348 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349 unsigned long lock_flags;
3350 int result = count;
3351
3352 if (!capable(CAP_SYS_ADMIN))
3353 return -EACCES;
3354
3355 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3356 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3357 ioa_cfg->ioa_is_dead = 0;
3358 ioa_cfg->reset_retries = 0;
3359 ioa_cfg->in_ioa_bringdown = 0;
3360 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3361 }
3362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3363 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3364
3365 return result;
3366}
3367
ee959b00 3368static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3369 .attr = {
49dd0961 3370 .name = "online_state",
f37eb54b
BK
3371 .mode = S_IRUGO | S_IWUSR,
3372 },
3373 .show = ipr_show_adapter_state,
3374 .store = ipr_store_adapter_state
3375};
3376
1da177e4
LT
3377/**
3378 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3379 * @dev: device struct
3380 * @buf: buffer
3381 * @count: buffer size
1da177e4
LT
3382 *
3383 * This function will reset the adapter.
3384 *
3385 * Return value:
3386 * count on success / other on failure
3387 **/
ee959b00
TJ
3388static ssize_t ipr_store_reset_adapter(struct device *dev,
3389 struct device_attribute *attr,
1da177e4
LT
3390 const char *buf, size_t count)
3391{
ee959b00 3392 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3393 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3394 unsigned long lock_flags;
3395 int result = count;
3396
3397 if (!capable(CAP_SYS_ADMIN))
3398 return -EACCES;
3399
3400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3401 if (!ioa_cfg->in_reset_reload)
3402 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3405
3406 return result;
3407}
3408
ee959b00 3409static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3410 .attr = {
3411 .name = "reset_host",
3412 .mode = S_IWUSR,
3413 },
3414 .store = ipr_store_reset_adapter
3415};
3416
3417/**
3418 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3419 * @buf_len: buffer length
3420 *
3421 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3422 * list to use for microcode download
3423 *
3424 * Return value:
3425 * pointer to sglist / NULL on failure
3426 **/
3427static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3428{
3429 int sg_size, order, bsize_elem, num_elem, i, j;
3430 struct ipr_sglist *sglist;
3431 struct scatterlist *scatterlist;
3432 struct page *page;
3433
3434 /* Get the minimum size per scatter/gather element */
3435 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3436
3437 /* Get the actual size per element */
3438 order = get_order(sg_size);
3439
3440 /* Determine the actual number of bytes per element */
3441 bsize_elem = PAGE_SIZE * (1 << order);
3442
3443 /* Determine the actual number of sg entries needed */
3444 if (buf_len % bsize_elem)
3445 num_elem = (buf_len / bsize_elem) + 1;
3446 else
3447 num_elem = buf_len / bsize_elem;
3448
3449 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3450 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3451 (sizeof(struct scatterlist) * (num_elem - 1)),
3452 GFP_KERNEL);
3453
3454 if (sglist == NULL) {
3455 ipr_trace;
3456 return NULL;
3457 }
3458
1da177e4 3459 scatterlist = sglist->scatterlist;
45711f1a 3460 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3461
3462 sglist->order = order;
3463 sglist->num_sg = num_elem;
3464
3465 /* Allocate a bunch of sg elements */
3466 for (i = 0; i < num_elem; i++) {
3467 page = alloc_pages(GFP_KERNEL, order);
3468 if (!page) {
3469 ipr_trace;
3470
3471 /* Free up what we already allocated */
3472 for (j = i - 1; j >= 0; j--)
45711f1a 3473 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3474 kfree(sglist);
3475 return NULL;
3476 }
3477
642f1490 3478 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3479 }
3480
3481 return sglist;
3482}
3483
3484/**
3485 * ipr_free_ucode_buffer - Frees a microcode download buffer
3486 * @p_dnld: scatter/gather list pointer
3487 *
3488 * Free a DMA'able ucode download buffer previously allocated with
3489 * ipr_alloc_ucode_buffer
3490 *
3491 * Return value:
3492 * nothing
3493 **/
3494static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3495{
3496 int i;
3497
3498 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3499 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3500
3501 kfree(sglist);
3502}
3503
3504/**
3505 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3506 * @sglist: scatter/gather list pointer
3507 * @buffer: buffer pointer
3508 * @len: buffer length
3509 *
3510 * Copy a microcode image from a user buffer into a buffer allocated by
3511 * ipr_alloc_ucode_buffer
3512 *
3513 * Return value:
3514 * 0 on success / other on failure
3515 **/
3516static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3517 u8 *buffer, u32 len)
3518{
3519 int bsize_elem, i, result = 0;
3520 struct scatterlist *scatterlist;
3521 void *kaddr;
3522
3523 /* Determine the actual number of bytes per element */
3524 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3525
3526 scatterlist = sglist->scatterlist;
3527
3528 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3529 struct page *page = sg_page(&scatterlist[i]);
3530
3531 kaddr = kmap(page);
1da177e4 3532 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3533 kunmap(page);
1da177e4
LT
3534
3535 scatterlist[i].length = bsize_elem;
3536
3537 if (result != 0) {
3538 ipr_trace;
3539 return result;
3540 }
3541 }
3542
3543 if (len % bsize_elem) {
45711f1a
JA
3544 struct page *page = sg_page(&scatterlist[i]);
3545
3546 kaddr = kmap(page);
1da177e4 3547 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3548 kunmap(page);
1da177e4
LT
3549
3550 scatterlist[i].length = len % bsize_elem;
3551 }
3552
3553 sglist->buffer_len = len;
3554 return result;
3555}
3556
a32c055f
WB
3557/**
3558 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3559 * @ipr_cmd: ipr command struct
3560 * @sglist: scatter/gather list
3561 *
3562 * Builds a microcode download IOA data list (IOADL).
3563 *
3564 **/
3565static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3566 struct ipr_sglist *sglist)
3567{
3568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3569 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3570 struct scatterlist *scatterlist = sglist->scatterlist;
3571 int i;
3572
3573 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3575 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3576
3577 ioarcb->ioadl_len =
3578 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3579 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3580 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3581 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3582 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3583 }
3584
3585 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3586}
3587
1da177e4 3588/**
12baa420 3589 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3590 * @ipr_cmd: ipr command struct
3591 * @sglist: scatter/gather list
1da177e4 3592 *
12baa420 3593 * Builds a microcode download IOA data list (IOADL).
1da177e4 3594 *
1da177e4 3595 **/
12baa420
BK
3596static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3597 struct ipr_sglist *sglist)
1da177e4 3598{
1da177e4 3599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3600 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3601 struct scatterlist *scatterlist = sglist->scatterlist;
3602 int i;
3603
12baa420 3604 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3606 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3607
3608 ioarcb->ioadl_len =
1da177e4
LT
3609 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3610
3611 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3612 ioadl[i].flags_and_data_len =
3613 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3614 ioadl[i].address =
3615 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3616 }
3617
12baa420
BK
3618 ioadl[i-1].flags_and_data_len |=
3619 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3620}
3621
3622/**
3623 * ipr_update_ioa_ucode - Update IOA's microcode
3624 * @ioa_cfg: ioa config struct
3625 * @sglist: scatter/gather list
3626 *
3627 * Initiate an adapter reset to update the IOA's microcode
3628 *
3629 * Return value:
3630 * 0 on success / -EIO on failure
3631 **/
3632static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3633 struct ipr_sglist *sglist)
3634{
3635 unsigned long lock_flags;
3636
3637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3638 while(ioa_cfg->in_reset_reload) {
3639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3640 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3641 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3642 }
12baa420
BK
3643
3644 if (ioa_cfg->ucode_sglist) {
3645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 dev_err(&ioa_cfg->pdev->dev,
3647 "Microcode download already in progress\n");
3648 return -EIO;
1da177e4 3649 }
12baa420
BK
3650
3651 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3652 sglist->num_sg, DMA_TO_DEVICE);
3653
3654 if (!sglist->num_dma_sg) {
3655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3656 dev_err(&ioa_cfg->pdev->dev,
3657 "Failed to map microcode download buffer!\n");
1da177e4
LT
3658 return -EIO;
3659 }
3660
12baa420
BK
3661 ioa_cfg->ucode_sglist = sglist;
3662 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3665
3666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3667 ioa_cfg->ucode_sglist = NULL;
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3669 return 0;
3670}
3671
3672/**
3673 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3674 * @class_dev: device struct
3675 * @buf: buffer
3676 * @count: buffer size
1da177e4
LT
3677 *
3678 * This function will update the firmware on the adapter.
3679 *
3680 * Return value:
3681 * count on success / other on failure
3682 **/
ee959b00
TJ
3683static ssize_t ipr_store_update_fw(struct device *dev,
3684 struct device_attribute *attr,
3685 const char *buf, size_t count)
1da177e4 3686{
ee959b00 3687 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3689 struct ipr_ucode_image_header *image_hdr;
3690 const struct firmware *fw_entry;
3691 struct ipr_sglist *sglist;
1da177e4
LT
3692 char fname[100];
3693 char *src;
3694 int len, result, dnld_size;
3695
3696 if (!capable(CAP_SYS_ADMIN))
3697 return -EACCES;
3698
3699 len = snprintf(fname, 99, "%s", buf);
3700 fname[len-1] = '\0';
3701
3702 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3703 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3704 return -EIO;
3705 }
3706
3707 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3708
3709 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3710 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3711 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3712 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3713 release_firmware(fw_entry);
3714 return -EINVAL;
3715 }
3716
3717 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3718 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3719 sglist = ipr_alloc_ucode_buffer(dnld_size);
3720
3721 if (!sglist) {
3722 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3723 release_firmware(fw_entry);
3724 return -ENOMEM;
3725 }
3726
3727 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3728
3729 if (result) {
3730 dev_err(&ioa_cfg->pdev->dev,
3731 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3732 goto out;
1da177e4
LT
3733 }
3734
12baa420 3735 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3736
12baa420
BK
3737 if (!result)
3738 result = count;
3739out:
1da177e4
LT
3740 ipr_free_ucode_buffer(sglist);
3741 release_firmware(fw_entry);
12baa420 3742 return result;
1da177e4
LT
3743}
3744
ee959b00 3745static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3746 .attr = {
3747 .name = "update_fw",
3748 .mode = S_IWUSR,
3749 },
3750 .store = ipr_store_update_fw
3751};
3752
ee959b00 3753static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3754 &ipr_fw_version_attr,
3755 &ipr_log_level_attr,
3756 &ipr_diagnostics_attr,
f37eb54b 3757 &ipr_ioa_state_attr,
1da177e4
LT
3758 &ipr_ioa_reset_attr,
3759 &ipr_update_fw_attr,
3760 NULL,
3761};
3762
3763#ifdef CONFIG_SCSI_IPR_DUMP
3764/**
3765 * ipr_read_dump - Dump the adapter
3766 * @kobj: kobject struct
91a69029 3767 * @bin_attr: bin_attribute struct
1da177e4
LT
3768 * @buf: buffer
3769 * @off: offset
3770 * @count: buffer size
3771 *
3772 * Return value:
3773 * number of bytes printed to buffer
3774 **/
91a69029
ZR
3775static ssize_t ipr_read_dump(struct kobject *kobj,
3776 struct bin_attribute *bin_attr,
3777 char *buf, loff_t off, size_t count)
1da177e4 3778{
ee959b00 3779 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3780 struct Scsi_Host *shost = class_to_shost(cdev);
3781 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3782 struct ipr_dump *dump;
3783 unsigned long lock_flags = 0;
3784 char *src;
3785 int len;
3786 size_t rc = count;
3787
3788 if (!capable(CAP_SYS_ADMIN))
3789 return -EACCES;
3790
3791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3792 dump = ioa_cfg->dump;
3793
3794 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3796 return 0;
3797 }
3798 kref_get(&dump->kref);
3799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3800
3801 if (off > dump->driver_dump.hdr.len) {
3802 kref_put(&dump->kref, ipr_release_dump);
3803 return 0;
3804 }
3805
3806 if (off + count > dump->driver_dump.hdr.len) {
3807 count = dump->driver_dump.hdr.len - off;
3808 rc = count;
3809 }
3810
3811 if (count && off < sizeof(dump->driver_dump)) {
3812 if (off + count > sizeof(dump->driver_dump))
3813 len = sizeof(dump->driver_dump) - off;
3814 else
3815 len = count;
3816 src = (u8 *)&dump->driver_dump + off;
3817 memcpy(buf, src, len);
3818 buf += len;
3819 off += len;
3820 count -= len;
3821 }
3822
3823 off -= sizeof(dump->driver_dump);
3824
3825 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3826 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3827 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3828 else
3829 len = count;
3830 src = (u8 *)&dump->ioa_dump + off;
3831 memcpy(buf, src, len);
3832 buf += len;
3833 off += len;
3834 count -= len;
3835 }
3836
3837 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3838
3839 while (count) {
3840 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3841 len = PAGE_ALIGN(off) - off;
3842 else
3843 len = count;
3844 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3845 src += off & ~PAGE_MASK;
3846 memcpy(buf, src, len);
3847 buf += len;
3848 off += len;
3849 count -= len;
3850 }
3851
3852 kref_put(&dump->kref, ipr_release_dump);
3853 return rc;
3854}
3855
3856/**
3857 * ipr_alloc_dump - Prepare for adapter dump
3858 * @ioa_cfg: ioa config struct
3859 *
3860 * Return value:
3861 * 0 on success / other on failure
3862 **/
3863static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3864{
3865 struct ipr_dump *dump;
3866 unsigned long lock_flags = 0;
3867
0bc42e35 3868 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3869
3870 if (!dump) {
3871 ipr_err("Dump memory allocation failed\n");
3872 return -ENOMEM;
3873 }
3874
1da177e4
LT
3875 kref_init(&dump->kref);
3876 dump->ioa_cfg = ioa_cfg;
3877
3878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3879
3880 if (INACTIVE != ioa_cfg->sdt_state) {
3881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3882 kfree(dump);
3883 return 0;
3884 }
3885
3886 ioa_cfg->dump = dump;
3887 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3888 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3889 ioa_cfg->dump_taken = 1;
3890 schedule_work(&ioa_cfg->work_q);
3891 }
3892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3893
1da177e4
LT
3894 return 0;
3895}
3896
3897/**
3898 * ipr_free_dump - Free adapter dump memory
3899 * @ioa_cfg: ioa config struct
3900 *
3901 * Return value:
3902 * 0 on success / other on failure
3903 **/
3904static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3905{
3906 struct ipr_dump *dump;
3907 unsigned long lock_flags = 0;
3908
3909 ENTER;
3910
3911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3912 dump = ioa_cfg->dump;
3913 if (!dump) {
3914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3915 return 0;
3916 }
3917
3918 ioa_cfg->dump = NULL;
3919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3920
3921 kref_put(&dump->kref, ipr_release_dump);
3922
3923 LEAVE;
3924 return 0;
3925}
3926
3927/**
3928 * ipr_write_dump - Setup dump state of adapter
3929 * @kobj: kobject struct
91a69029 3930 * @bin_attr: bin_attribute struct
1da177e4
LT
3931 * @buf: buffer
3932 * @off: offset
3933 * @count: buffer size
3934 *
3935 * Return value:
3936 * number of bytes printed to buffer
3937 **/
91a69029
ZR
3938static ssize_t ipr_write_dump(struct kobject *kobj,
3939 struct bin_attribute *bin_attr,
3940 char *buf, loff_t off, size_t count)
1da177e4 3941{
ee959b00 3942 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3943 struct Scsi_Host *shost = class_to_shost(cdev);
3944 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3945 int rc;
3946
3947 if (!capable(CAP_SYS_ADMIN))
3948 return -EACCES;
3949
3950 if (buf[0] == '1')
3951 rc = ipr_alloc_dump(ioa_cfg);
3952 else if (buf[0] == '0')
3953 rc = ipr_free_dump(ioa_cfg);
3954 else
3955 return -EINVAL;
3956
3957 if (rc)
3958 return rc;
3959 else
3960 return count;
3961}
3962
3963static struct bin_attribute ipr_dump_attr = {
3964 .attr = {
3965 .name = "dump",
3966 .mode = S_IRUSR | S_IWUSR,
3967 },
3968 .size = 0,
3969 .read = ipr_read_dump,
3970 .write = ipr_write_dump
3971};
3972#else
3973static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3974#endif
3975
3976/**
3977 * ipr_change_queue_depth - Change the device's queue depth
3978 * @sdev: scsi device struct
3979 * @qdepth: depth to set
e881a172 3980 * @reason: calling context
1da177e4
LT
3981 *
3982 * Return value:
3983 * actual depth set
3984 **/
e881a172
MC
3985static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3986 int reason)
1da177e4 3987{
35a39691
BK
3988 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3989 struct ipr_resource_entry *res;
3990 unsigned long lock_flags = 0;
3991
e881a172
MC
3992 if (reason != SCSI_QDEPTH_DEFAULT)
3993 return -EOPNOTSUPP;
3994
35a39691
BK
3995 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3996 res = (struct ipr_resource_entry *)sdev->hostdata;
3997
3998 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3999 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4001
1da177e4
LT
4002 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4003 return sdev->queue_depth;
4004}
4005
4006/**
4007 * ipr_change_queue_type - Change the device's queue type
4008 * @dsev: scsi device struct
4009 * @tag_type: type of tags to use
4010 *
4011 * Return value:
4012 * actual queue type set
4013 **/
4014static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4015{
4016 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4017 struct ipr_resource_entry *res;
4018 unsigned long lock_flags = 0;
4019
4020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4021 res = (struct ipr_resource_entry *)sdev->hostdata;
4022
4023 if (res) {
4024 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4025 /*
4026 * We don't bother quiescing the device here since the
4027 * adapter firmware does it for us.
4028 */
4029 scsi_set_tag_type(sdev, tag_type);
4030
4031 if (tag_type)
4032 scsi_activate_tcq(sdev, sdev->queue_depth);
4033 else
4034 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4035 } else
4036 tag_type = 0;
4037 } else
4038 tag_type = 0;
4039
4040 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4041 return tag_type;
4042}
4043
4044/**
4045 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4046 * @dev: device struct
4047 * @buf: buffer
4048 *
4049 * Return value:
4050 * number of bytes printed to buffer
4051 **/
10523b3b 4052static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4053{
4054 struct scsi_device *sdev = to_scsi_device(dev);
4055 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4056 struct ipr_resource_entry *res;
4057 unsigned long lock_flags = 0;
4058 ssize_t len = -ENXIO;
4059
4060 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4061 res = (struct ipr_resource_entry *)sdev->hostdata;
4062 if (res)
3e7ebdfa 4063 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4065 return len;
4066}
4067
4068static struct device_attribute ipr_adapter_handle_attr = {
4069 .attr = {
4070 .name = "adapter_handle",
4071 .mode = S_IRUSR,
4072 },
4073 .show = ipr_show_adapter_handle
4074};
4075
3e7ebdfa
WB
4076/**
4077 * ipr_show_resource_path - Show the resource path for this device.
4078 * @dev: device struct
4079 * @buf: buffer
4080 *
4081 * Return value:
4082 * number of bytes printed to buffer
4083 **/
4084static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4085{
4086 struct scsi_device *sdev = to_scsi_device(dev);
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4088 struct ipr_resource_entry *res;
4089 unsigned long lock_flags = 0;
4090 ssize_t len = -ENXIO;
4091 char buffer[IPR_MAX_RES_PATH_LENGTH];
4092
4093 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4094 res = (struct ipr_resource_entry *)sdev->hostdata;
4095 if (res)
4096 len = snprintf(buf, PAGE_SIZE, "%s\n",
4097 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4099 return len;
4100}
4101
4102static struct device_attribute ipr_resource_path_attr = {
4103 .attr = {
4104 .name = "resource_path",
4105 .mode = S_IRUSR,
4106 },
4107 .show = ipr_show_resource_path
4108};
4109
1da177e4
LT
4110static struct device_attribute *ipr_dev_attrs[] = {
4111 &ipr_adapter_handle_attr,
3e7ebdfa 4112 &ipr_resource_path_attr,
1da177e4
LT
4113 NULL,
4114};
4115
4116/**
4117 * ipr_biosparam - Return the HSC mapping
4118 * @sdev: scsi device struct
4119 * @block_device: block device pointer
4120 * @capacity: capacity of the device
4121 * @parm: Array containing returned HSC values.
4122 *
4123 * This function generates the HSC parms that fdisk uses.
4124 * We want to make sure we return something that places partitions
4125 * on 4k boundaries for best performance with the IOA.
4126 *
4127 * Return value:
4128 * 0 on success
4129 **/
4130static int ipr_biosparam(struct scsi_device *sdev,
4131 struct block_device *block_device,
4132 sector_t capacity, int *parm)
4133{
4134 int heads, sectors;
4135 sector_t cylinders;
4136
4137 heads = 128;
4138 sectors = 32;
4139
4140 cylinders = capacity;
4141 sector_div(cylinders, (128 * 32));
4142
4143 /* return result */
4144 parm[0] = heads;
4145 parm[1] = sectors;
4146 parm[2] = cylinders;
4147
4148 return 0;
4149}
4150
35a39691
BK
4151/**
4152 * ipr_find_starget - Find target based on bus/target.
4153 * @starget: scsi target struct
4154 *
4155 * Return value:
4156 * resource entry pointer if found / NULL if not found
4157 **/
4158static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4159{
4160 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4161 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4162 struct ipr_resource_entry *res;
4163
4164 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4165 if ((res->bus == starget->channel) &&
4166 (res->target == starget->id) &&
4167 (res->lun == 0)) {
35a39691
BK
4168 return res;
4169 }
4170 }
4171
4172 return NULL;
4173}
4174
4175static struct ata_port_info sata_port_info;
4176
4177/**
4178 * ipr_target_alloc - Prepare for commands to a SCSI target
4179 * @starget: scsi target struct
4180 *
4181 * If the device is a SATA device, this function allocates an
4182 * ATA port with libata, else it does nothing.
4183 *
4184 * Return value:
4185 * 0 on success / non-0 on failure
4186 **/
4187static int ipr_target_alloc(struct scsi_target *starget)
4188{
4189 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4190 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4191 struct ipr_sata_port *sata_port;
4192 struct ata_port *ap;
4193 struct ipr_resource_entry *res;
4194 unsigned long lock_flags;
4195
4196 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4197 res = ipr_find_starget(starget);
4198 starget->hostdata = NULL;
4199
4200 if (res && ipr_is_gata(res)) {
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4203 if (!sata_port)
4204 return -ENOMEM;
4205
4206 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4207 if (ap) {
4208 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4209 sata_port->ioa_cfg = ioa_cfg;
4210 sata_port->ap = ap;
4211 sata_port->res = res;
4212
4213 res->sata_port = sata_port;
4214 ap->private_data = sata_port;
4215 starget->hostdata = sata_port;
4216 } else {
4217 kfree(sata_port);
4218 return -ENOMEM;
4219 }
4220 }
4221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4222
4223 return 0;
4224}
4225
4226/**
4227 * ipr_target_destroy - Destroy a SCSI target
4228 * @starget: scsi target struct
4229 *
4230 * If the device was a SATA device, this function frees the libata
4231 * ATA port, else it does nothing.
4232 *
4233 **/
4234static void ipr_target_destroy(struct scsi_target *starget)
4235{
4236 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4237 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4238 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4239
4240 if (ioa_cfg->sis64) {
4241 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4242 clear_bit(starget->id, ioa_cfg->array_ids);
4243 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4244 clear_bit(starget->id, ioa_cfg->vset_ids);
4245 else if (starget->channel == 0)
4246 clear_bit(starget->id, ioa_cfg->target_ids);
4247 }
35a39691
BK
4248
4249 if (sata_port) {
4250 starget->hostdata = NULL;
4251 ata_sas_port_destroy(sata_port->ap);
4252 kfree(sata_port);
4253 }
4254}
4255
4256/**
4257 * ipr_find_sdev - Find device based on bus/target/lun.
4258 * @sdev: scsi device struct
4259 *
4260 * Return value:
4261 * resource entry pointer if found / NULL if not found
4262 **/
4263static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4264{
4265 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4266 struct ipr_resource_entry *res;
4267
4268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4269 if ((res->bus == sdev->channel) &&
4270 (res->target == sdev->id) &&
4271 (res->lun == sdev->lun))
35a39691
BK
4272 return res;
4273 }
4274
4275 return NULL;
4276}
4277
1da177e4
LT
4278/**
4279 * ipr_slave_destroy - Unconfigure a SCSI device
4280 * @sdev: scsi device struct
4281 *
4282 * Return value:
4283 * nothing
4284 **/
4285static void ipr_slave_destroy(struct scsi_device *sdev)
4286{
4287 struct ipr_resource_entry *res;
4288 struct ipr_ioa_cfg *ioa_cfg;
4289 unsigned long lock_flags = 0;
4290
4291 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4292
4293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4294 res = (struct ipr_resource_entry *) sdev->hostdata;
4295 if (res) {
35a39691
BK
4296 if (res->sata_port)
4297 ata_port_disable(res->sata_port->ap);
1da177e4
LT
4298 sdev->hostdata = NULL;
4299 res->sdev = NULL;
35a39691 4300 res->sata_port = NULL;
1da177e4
LT
4301 }
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4303}
4304
4305/**
4306 * ipr_slave_configure - Configure a SCSI device
4307 * @sdev: scsi device struct
4308 *
4309 * This function configures the specified scsi device.
4310 *
4311 * Return value:
4312 * 0 on success
4313 **/
4314static int ipr_slave_configure(struct scsi_device *sdev)
4315{
4316 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4317 struct ipr_resource_entry *res;
dd406ef8 4318 struct ata_port *ap = NULL;
1da177e4 4319 unsigned long lock_flags = 0;
3e7ebdfa 4320 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4321
4322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4323 res = sdev->hostdata;
4324 if (res) {
4325 if (ipr_is_af_dasd_device(res))
4326 sdev->type = TYPE_RAID;
0726ce26 4327 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4328 sdev->scsi_level = 4;
0726ce26
BK
4329 sdev->no_uld_attach = 1;
4330 }
1da177e4 4331 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4332 blk_queue_rq_timeout(sdev->request_queue,
4333 IPR_VSET_RW_TIMEOUT);
086fa5ff 4334 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4335 }
e4fbf44e 4336 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 4337 sdev->allow_restart = 1;
dd406ef8
BK
4338 if (ipr_is_gata(res) && res->sata_port)
4339 ap = res->sata_port->ap;
4340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4341
4342 if (ap) {
35a39691 4343 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4344 ata_sas_slave_configure(sdev, ap);
4345 } else
35a39691 4346 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4347 if (ioa_cfg->sis64)
4348 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4349 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
dd406ef8 4350 return 0;
1da177e4
LT
4351 }
4352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4353 return 0;
4354}
4355
35a39691
BK
4356/**
4357 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4358 * @sdev: scsi device struct
4359 *
4360 * This function initializes an ATA port so that future commands
4361 * sent through queuecommand will work.
4362 *
4363 * Return value:
4364 * 0 on success
4365 **/
4366static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4367{
4368 struct ipr_sata_port *sata_port = NULL;
4369 int rc = -ENXIO;
4370
4371 ENTER;
4372 if (sdev->sdev_target)
4373 sata_port = sdev->sdev_target->hostdata;
4374 if (sata_port)
4375 rc = ata_sas_port_init(sata_port->ap);
4376 if (rc)
4377 ipr_slave_destroy(sdev);
4378
4379 LEAVE;
4380 return rc;
4381}
4382
1da177e4
LT
4383/**
4384 * ipr_slave_alloc - Prepare for commands to a device.
4385 * @sdev: scsi device struct
4386 *
4387 * This function saves a pointer to the resource entry
4388 * in the scsi device struct if the device exists. We
4389 * can then use this pointer in ipr_queuecommand when
4390 * handling new commands.
4391 *
4392 * Return value:
692aebfc 4393 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4394 **/
4395static int ipr_slave_alloc(struct scsi_device *sdev)
4396{
4397 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4398 struct ipr_resource_entry *res;
4399 unsigned long lock_flags;
692aebfc 4400 int rc = -ENXIO;
1da177e4
LT
4401
4402 sdev->hostdata = NULL;
4403
4404 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4405
35a39691
BK
4406 res = ipr_find_sdev(sdev);
4407 if (res) {
4408 res->sdev = sdev;
4409 res->add_to_ml = 0;
4410 res->in_erp = 0;
4411 sdev->hostdata = res;
4412 if (!ipr_is_naca_model(res))
4413 res->needs_sync_complete = 1;
4414 rc = 0;
4415 if (ipr_is_gata(res)) {
4416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4418 }
4419 }
4420
4421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422
692aebfc 4423 return rc;
1da177e4
LT
4424}
4425
4426/**
4427 * ipr_eh_host_reset - Reset the host adapter
4428 * @scsi_cmd: scsi command struct
4429 *
4430 * Return value:
4431 * SUCCESS / FAILED
4432 **/
df0ae249 4433static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4434{
4435 struct ipr_ioa_cfg *ioa_cfg;
4436 int rc;
4437
4438 ENTER;
4439 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4440
4441 dev_err(&ioa_cfg->pdev->dev,
4442 "Adapter being reset as a result of error recovery.\n");
4443
4444 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4445 ioa_cfg->sdt_state = GET_DUMP;
4446
4447 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4448
4449 LEAVE;
4450 return rc;
4451}
4452
df0ae249
JG
4453static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4454{
4455 int rc;
4456
4457 spin_lock_irq(cmd->device->host->host_lock);
4458 rc = __ipr_eh_host_reset(cmd);
4459 spin_unlock_irq(cmd->device->host->host_lock);
4460
4461 return rc;
4462}
4463
c6513096
BK
4464/**
4465 * ipr_device_reset - Reset the device
4466 * @ioa_cfg: ioa config struct
4467 * @res: resource entry struct
4468 *
4469 * This function issues a device reset to the affected device.
4470 * If the device is a SCSI device, a LUN reset will be sent
4471 * to the device first. If that does not work, a target reset
35a39691
BK
4472 * will be sent. If the device is a SATA device, a PHY reset will
4473 * be sent.
c6513096
BK
4474 *
4475 * Return value:
4476 * 0 on success / non-zero on failure
4477 **/
4478static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4479 struct ipr_resource_entry *res)
4480{
4481 struct ipr_cmnd *ipr_cmd;
4482 struct ipr_ioarcb *ioarcb;
4483 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4484 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4485 u32 ioasc;
4486
4487 ENTER;
4488 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4489 ioarcb = &ipr_cmd->ioarcb;
4490 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4491
4492 if (ipr_cmd->ioa_cfg->sis64) {
4493 regs = &ipr_cmd->i.ata_ioadl.regs;
4494 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4495 } else
4496 regs = &ioarcb->u.add_data.u.regs;
c6513096 4497
3e7ebdfa 4498 ioarcb->res_handle = res->res_handle;
c6513096
BK
4499 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4500 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4501 if (ipr_is_gata(res)) {
4502 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4503 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4504 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4505 }
c6513096
BK
4506
4507 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4508 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4509 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
4510 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4511 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4512 sizeof(struct ipr_ioasa_gata));
c6513096
BK
4513
4514 LEAVE;
4515 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4516}
4517
35a39691
BK
4518/**
4519 * ipr_sata_reset - Reset the SATA port
cc0680a5 4520 * @link: SATA link to reset
35a39691
BK
4521 * @classes: class of the attached device
4522 *
cc0680a5 4523 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4524 *
4525 * Return value:
4526 * 0 on success / non-zero on failure
4527 **/
cc0680a5 4528static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4529 unsigned long deadline)
35a39691 4530{
cc0680a5 4531 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4532 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4533 struct ipr_resource_entry *res;
4534 unsigned long lock_flags = 0;
4535 int rc = -ENXIO;
4536
4537 ENTER;
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4539 while(ioa_cfg->in_reset_reload) {
4540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4541 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4542 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4543 }
4544
35a39691
BK
4545 res = sata_port->res;
4546 if (res) {
4547 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4548 *classes = res->ata_class;
35a39691
BK
4549 }
4550
4551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4552 LEAVE;
4553 return rc;
4554}
4555
1da177e4
LT
4556/**
4557 * ipr_eh_dev_reset - Reset the device
4558 * @scsi_cmd: scsi command struct
4559 *
4560 * This function issues a device reset to the affected device.
4561 * A LUN reset will be sent to the device first. If that does
4562 * not work, a target reset will be sent.
4563 *
4564 * Return value:
4565 * SUCCESS / FAILED
4566 **/
94d0e7b8 4567static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4568{
4569 struct ipr_cmnd *ipr_cmd;
4570 struct ipr_ioa_cfg *ioa_cfg;
4571 struct ipr_resource_entry *res;
35a39691
BK
4572 struct ata_port *ap;
4573 int rc = 0;
1da177e4
LT
4574
4575 ENTER;
4576 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4577 res = scsi_cmd->device->hostdata;
4578
eeb88307 4579 if (!res)
1da177e4
LT
4580 return FAILED;
4581
4582 /*
4583 * If we are currently going through reset/reload, return failed. This will force the
4584 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4585 * reset to complete
4586 */
4587 if (ioa_cfg->in_reset_reload)
4588 return FAILED;
4589 if (ioa_cfg->ioa_is_dead)
4590 return FAILED;
4591
4592 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4593 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4594 if (ipr_cmd->scsi_cmd)
4595 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4596 if (ipr_cmd->qc)
4597 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4598 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4599 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4600 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4601 }
1da177e4
LT
4602 }
4603 }
4604
4605 res->resetting_device = 1;
fb3ed3cb 4606 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4607
4608 if (ipr_is_gata(res) && res->sata_port) {
4609 ap = res->sata_port->ap;
4610 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4611 ata_std_error_handler(ap);
35a39691 4612 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4613
4614 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4615 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4616 rc = -EIO;
4617 break;
4618 }
4619 }
35a39691
BK
4620 } else
4621 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4622 res->resetting_device = 0;
4623
1da177e4 4624 LEAVE;
c6513096 4625 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4626}
4627
94d0e7b8
JG
4628static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4629{
4630 int rc;
4631
4632 spin_lock_irq(cmd->device->host->host_lock);
4633 rc = __ipr_eh_dev_reset(cmd);
4634 spin_unlock_irq(cmd->device->host->host_lock);
4635
4636 return rc;
4637}
4638
1da177e4
LT
4639/**
4640 * ipr_bus_reset_done - Op done function for bus reset.
4641 * @ipr_cmd: ipr command struct
4642 *
4643 * This function is the op done function for a bus reset
4644 *
4645 * Return value:
4646 * none
4647 **/
4648static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4649{
4650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4651 struct ipr_resource_entry *res;
4652
4653 ENTER;
3e7ebdfa
WB
4654 if (!ioa_cfg->sis64)
4655 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4656 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4657 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4658 break;
4659 }
1da177e4 4660 }
1da177e4
LT
4661
4662 /*
4663 * If abort has not completed, indicate the reset has, else call the
4664 * abort's done function to wake the sleeping eh thread
4665 */
4666 if (ipr_cmd->sibling->sibling)
4667 ipr_cmd->sibling->sibling = NULL;
4668 else
4669 ipr_cmd->sibling->done(ipr_cmd->sibling);
4670
4671 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4672 LEAVE;
4673}
4674
4675/**
4676 * ipr_abort_timeout - An abort task has timed out
4677 * @ipr_cmd: ipr command struct
4678 *
4679 * This function handles when an abort task times out. If this
4680 * happens we issue a bus reset since we have resources tied
4681 * up that must be freed before returning to the midlayer.
4682 *
4683 * Return value:
4684 * none
4685 **/
4686static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4687{
4688 struct ipr_cmnd *reset_cmd;
4689 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4690 struct ipr_cmd_pkt *cmd_pkt;
4691 unsigned long lock_flags = 0;
4692
4693 ENTER;
4694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4695 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4697 return;
4698 }
4699
fb3ed3cb 4700 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4701 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4702 ipr_cmd->sibling = reset_cmd;
4703 reset_cmd->sibling = ipr_cmd;
4704 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4705 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4706 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4707 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4708 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4709
4710 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4712 LEAVE;
4713}
4714
4715/**
4716 * ipr_cancel_op - Cancel specified op
4717 * @scsi_cmd: scsi command struct
4718 *
4719 * This function cancels specified op.
4720 *
4721 * Return value:
4722 * SUCCESS / FAILED
4723 **/
4724static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4725{
4726 struct ipr_cmnd *ipr_cmd;
4727 struct ipr_ioa_cfg *ioa_cfg;
4728 struct ipr_resource_entry *res;
4729 struct ipr_cmd_pkt *cmd_pkt;
4730 u32 ioasc;
4731 int op_found = 0;
4732
4733 ENTER;
4734 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4735 res = scsi_cmd->device->hostdata;
4736
8fa728a2
JG
4737 /* If we are currently going through reset/reload, return failed.
4738 * This will force the mid-layer to call ipr_eh_host_reset,
4739 * which will then go to sleep and wait for the reset to complete
4740 */
4741 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4742 return FAILED;
04d9768f 4743 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4744 return FAILED;
4745
4746 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4747 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4748 ipr_cmd->done = ipr_scsi_eh_done;
4749 op_found = 1;
4750 break;
4751 }
4752 }
4753
4754 if (!op_found)
4755 return SUCCESS;
4756
4757 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4758 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4759 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4760 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4761 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4762 ipr_cmd->u.sdev = scsi_cmd->device;
4763
fb3ed3cb
BK
4764 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4765 scsi_cmd->cmnd[0]);
1da177e4
LT
4766 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4767 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4768
4769 /*
4770 * If the abort task timed out and we sent a bus reset, we will get
4771 * one the following responses to the abort
4772 */
4773 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4774 ioasc = 0;
4775 ipr_trace;
4776 }
4777
4778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4779 if (!ipr_is_naca_model(res))
4780 res->needs_sync_complete = 1;
1da177e4
LT
4781
4782 LEAVE;
4783 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4784}
4785
4786/**
4787 * ipr_eh_abort - Abort a single op
4788 * @scsi_cmd: scsi command struct
4789 *
4790 * Return value:
4791 * SUCCESS / FAILED
4792 **/
4793static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4794{
8fa728a2
JG
4795 unsigned long flags;
4796 int rc;
1da177e4
LT
4797
4798 ENTER;
1da177e4 4799
8fa728a2
JG
4800 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4801 rc = ipr_cancel_op(scsi_cmd);
4802 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4803
4804 LEAVE;
8fa728a2 4805 return rc;
1da177e4
LT
4806}
4807
4808/**
4809 * ipr_handle_other_interrupt - Handle "other" interrupts
4810 * @ioa_cfg: ioa config struct
4811 * @int_reg: interrupt register
4812 *
4813 * Return value:
4814 * IRQ_NONE / IRQ_HANDLED
4815 **/
4816static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4817 volatile u32 int_reg)
4818{
4819 irqreturn_t rc = IRQ_HANDLED;
4820
4821 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4822 /* Mask the interrupt */
4823 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4824
4825 /* Clear the interrupt */
4826 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4827 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4828
4829 list_del(&ioa_cfg->reset_cmd->queue);
4830 del_timer(&ioa_cfg->reset_cmd->timer);
4831 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4832 } else {
4833 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4834 ioa_cfg->ioa_unit_checked = 1;
4835 else
4836 dev_err(&ioa_cfg->pdev->dev,
4837 "Permanent IOA failure. 0x%08X\n", int_reg);
4838
4839 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4840 ioa_cfg->sdt_state = GET_DUMP;
4841
4842 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4843 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4844 }
4845
4846 return rc;
4847}
4848
3feeb89d
WB
4849/**
4850 * ipr_isr_eh - Interrupt service routine error handler
4851 * @ioa_cfg: ioa config struct
4852 * @msg: message to log
4853 *
4854 * Return value:
4855 * none
4856 **/
4857static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4858{
4859 ioa_cfg->errors_logged++;
4860 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4861
4862 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4863 ioa_cfg->sdt_state = GET_DUMP;
4864
4865 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4866}
4867
1da177e4
LT
4868/**
4869 * ipr_isr - Interrupt service routine
4870 * @irq: irq number
4871 * @devp: pointer to ioa config struct
1da177e4
LT
4872 *
4873 * Return value:
4874 * IRQ_NONE / IRQ_HANDLED
4875 **/
7d12e780 4876static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4877{
4878 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4879 unsigned long lock_flags = 0;
4880 volatile u32 int_reg, int_mask_reg;
4881 u32 ioasc;
4882 u16 cmd_index;
3feeb89d 4883 int num_hrrq = 0;
1da177e4
LT
4884 struct ipr_cmnd *ipr_cmd;
4885 irqreturn_t rc = IRQ_NONE;
4886
4887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4888
4889 /* If interrupts are disabled, ignore the interrupt */
4890 if (!ioa_cfg->allow_interrupts) {
4891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4892 return IRQ_NONE;
4893 }
4894
214777ba
WB
4895 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4896 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
1da177e4 4897
214777ba
WB
4898 /* If an interrupt on the adapter did not occur, ignore it.
4899 * Or in the case of SIS 64, check for a stage change interrupt.
4900 */
1da177e4 4901 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
214777ba
WB
4902 if (ioa_cfg->sis64) {
4903 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4904 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4905 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4906
4907 /* clear stage change */
4908 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4909 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4910 list_del(&ioa_cfg->reset_cmd->queue);
4911 del_timer(&ioa_cfg->reset_cmd->timer);
4912 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4914 return IRQ_HANDLED;
4915 }
4916 }
4917
1da177e4
LT
4918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919 return IRQ_NONE;
4920 }
4921
4922 while (1) {
4923 ipr_cmd = NULL;
4924
4925 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4926 ioa_cfg->toggle_bit) {
4927
4928 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4929 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4930
4931 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4932 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934 return IRQ_HANDLED;
4935 }
4936
4937 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4938
4939 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4940
4941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4942
4943 list_del(&ipr_cmd->queue);
4944 del_timer(&ipr_cmd->timer);
4945 ipr_cmd->done(ipr_cmd);
4946
4947 rc = IRQ_HANDLED;
4948
4949 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4950 ioa_cfg->hrrq_curr++;
4951 } else {
4952 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4953 ioa_cfg->toggle_bit ^= 1u;
4954 }
4955 }
4956
4957 if (ipr_cmd != NULL) {
4958 /* Clear the PCI interrupt */
3feeb89d 4959 do {
214777ba
WB
4960 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4961 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
3feeb89d
WB
4962 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4963 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4964
4965 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4966 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968 return IRQ_HANDLED;
4969 }
4970
1da177e4
LT
4971 } else
4972 break;
4973 }
4974
4975 if (unlikely(rc == IRQ_NONE))
4976 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4977
4978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4979 return rc;
4980}
4981
a32c055f
WB
4982/**
4983 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4984 * @ioa_cfg: ioa config struct
4985 * @ipr_cmd: ipr command struct
4986 *
4987 * Return value:
4988 * 0 on success / -1 on failure
4989 **/
4990static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4991 struct ipr_cmnd *ipr_cmd)
4992{
4993 int i, nseg;
4994 struct scatterlist *sg;
4995 u32 length;
4996 u32 ioadl_flags = 0;
4997 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4998 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4999 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5000
5001 length = scsi_bufflen(scsi_cmd);
5002 if (!length)
5003 return 0;
5004
5005 nseg = scsi_dma_map(scsi_cmd);
5006 if (nseg < 0) {
5007 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5008 return -1;
5009 }
5010
5011 ipr_cmd->dma_use_sg = nseg;
5012
5013 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5014 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5015 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5016 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5017 ioadl_flags = IPR_IOADL_FLAGS_READ;
5018
5019 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5020 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5021 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5022 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5023 }
5024
5025 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5026 return 0;
5027}
5028
1da177e4
LT
5029/**
5030 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5031 * @ioa_cfg: ioa config struct
5032 * @ipr_cmd: ipr command struct
5033 *
5034 * Return value:
5035 * 0 on success / -1 on failure
5036 **/
5037static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5038 struct ipr_cmnd *ipr_cmd)
5039{
63015bc9
FT
5040 int i, nseg;
5041 struct scatterlist *sg;
1da177e4
LT
5042 u32 length;
5043 u32 ioadl_flags = 0;
5044 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5045 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5046 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5047
63015bc9
FT
5048 length = scsi_bufflen(scsi_cmd);
5049 if (!length)
1da177e4
LT
5050 return 0;
5051
63015bc9
FT
5052 nseg = scsi_dma_map(scsi_cmd);
5053 if (nseg < 0) {
5054 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5055 return -1;
5056 }
51b1c7e1 5057
63015bc9
FT
5058 ipr_cmd->dma_use_sg = nseg;
5059
5060 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5061 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5062 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5063 ioarcb->data_transfer_length = cpu_to_be32(length);
5064 ioarcb->ioadl_len =
63015bc9
FT
5065 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5066 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5067 ioadl_flags = IPR_IOADL_FLAGS_READ;
5068 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5069 ioarcb->read_ioadl_len =
5070 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5071 }
1da177e4 5072
a32c055f
WB
5073 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5074 ioadl = ioarcb->u.add_data.u.ioadl;
5075 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5076 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5077 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5078 }
1da177e4 5079
63015bc9
FT
5080 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5081 ioadl[i].flags_and_data_len =
5082 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5083 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5084 }
5085
63015bc9
FT
5086 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5087 return 0;
1da177e4
LT
5088}
5089
5090/**
5091 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5092 * @scsi_cmd: scsi command struct
5093 *
5094 * Return value:
5095 * task attributes
5096 **/
5097static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5098{
5099 u8 tag[2];
5100 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5101
5102 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5103 switch (tag[0]) {
5104 case MSG_SIMPLE_TAG:
5105 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5106 break;
5107 case MSG_HEAD_TAG:
5108 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5109 break;
5110 case MSG_ORDERED_TAG:
5111 rc = IPR_FLAGS_LO_ORDERED_TASK;
5112 break;
5113 };
5114 }
5115
5116 return rc;
5117}
5118
5119/**
5120 * ipr_erp_done - Process completion of ERP for a device
5121 * @ipr_cmd: ipr command struct
5122 *
5123 * This function copies the sense buffer into the scsi_cmd
5124 * struct and pushes the scsi_done function.
5125 *
5126 * Return value:
5127 * nothing
5128 **/
5129static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5130{
5131 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5132 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5135
5136 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5137 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5138 scmd_printk(KERN_ERR, scsi_cmd,
5139 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5140 } else {
5141 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5142 SCSI_SENSE_BUFFERSIZE);
5143 }
5144
5145 if (res) {
ee0a90fa
BK
5146 if (!ipr_is_naca_model(res))
5147 res->needs_sync_complete = 1;
1da177e4
LT
5148 res->in_erp = 0;
5149 }
63015bc9 5150 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5151 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5152 scsi_cmd->scsi_done(scsi_cmd);
5153}
5154
5155/**
5156 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5157 * @ipr_cmd: ipr command struct
5158 *
5159 * Return value:
5160 * none
5161 **/
5162static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5163{
51b1c7e1
BK
5164 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5165 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
a32c055f 5166 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5167
5168 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5169 ioarcb->data_transfer_length = 0;
1da177e4 5170 ioarcb->read_data_transfer_length = 0;
a32c055f 5171 ioarcb->ioadl_len = 0;
1da177e4
LT
5172 ioarcb->read_ioadl_len = 0;
5173 ioasa->ioasc = 0;
5174 ioasa->residual_data_len = 0;
a32c055f
WB
5175
5176 if (ipr_cmd->ioa_cfg->sis64)
5177 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5178 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5179 else {
5180 ioarcb->write_ioadl_addr =
5181 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5182 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5183 }
1da177e4
LT
5184}
5185
5186/**
5187 * ipr_erp_request_sense - Send request sense to a device
5188 * @ipr_cmd: ipr command struct
5189 *
5190 * This function sends a request sense to a device as a result
5191 * of a check condition.
5192 *
5193 * Return value:
5194 * nothing
5195 **/
5196static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5197{
5198 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5199 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5200
5201 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5202 ipr_erp_done(ipr_cmd);
5203 return;
5204 }
5205
5206 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5207
5208 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5209 cmd_pkt->cdb[0] = REQUEST_SENSE;
5210 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5211 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5212 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5213 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5214
a32c055f
WB
5215 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5216 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5217
5218 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5219 IPR_REQUEST_SENSE_TIMEOUT * 2);
5220}
5221
5222/**
5223 * ipr_erp_cancel_all - Send cancel all to a device
5224 * @ipr_cmd: ipr command struct
5225 *
5226 * This function sends a cancel all to a device to clear the
5227 * queue. If we are running TCQ on the device, QERR is set to 1,
5228 * which means all outstanding ops have been dropped on the floor.
5229 * Cancel all will return them to us.
5230 *
5231 * Return value:
5232 * nothing
5233 **/
5234static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5235{
5236 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5237 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5238 struct ipr_cmd_pkt *cmd_pkt;
5239
5240 res->in_erp = 1;
5241
5242 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5243
5244 if (!scsi_get_tag_type(scsi_cmd->device)) {
5245 ipr_erp_request_sense(ipr_cmd);
5246 return;
5247 }
5248
5249 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5250 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5251 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5252
5253 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5254 IPR_CANCEL_ALL_TIMEOUT);
5255}
5256
5257/**
5258 * ipr_dump_ioasa - Dump contents of IOASA
5259 * @ioa_cfg: ioa config struct
5260 * @ipr_cmd: ipr command struct
fe964d0a 5261 * @res: resource entry struct
1da177e4
LT
5262 *
5263 * This function is invoked by the interrupt handler when ops
5264 * fail. It will log the IOASA if appropriate. Only called
5265 * for GPDD ops.
5266 *
5267 * Return value:
5268 * none
5269 **/
5270static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5271 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5272{
5273 int i;
5274 u16 data_len;
b0692dd4 5275 u32 ioasc, fd_ioasc;
1da177e4
LT
5276 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5277 __be32 *ioasa_data = (__be32 *)ioasa;
5278 int error_index;
5279
5280 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 5281 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5282
5283 if (0 == ioasc)
5284 return;
5285
5286 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5287 return;
5288
b0692dd4
BK
5289 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5290 error_index = ipr_get_error(fd_ioasc);
5291 else
5292 error_index = ipr_get_error(ioasc);
1da177e4
LT
5293
5294 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5295 /* Don't log an error if the IOA already logged one */
5296 if (ioasa->ilid != 0)
5297 return;
5298
cc9bd5d4
BK
5299 if (!ipr_is_gscsi(res))
5300 return;
5301
1da177e4
LT
5302 if (ipr_error_table[error_index].log_ioasa == 0)
5303 return;
5304 }
5305
fe964d0a 5306 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
5307
5308 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5309 data_len = sizeof(struct ipr_ioasa);
5310 else
5311 data_len = be16_to_cpu(ioasa->ret_stat_len);
5312
5313 ipr_err("IOASA Dump:\n");
5314
5315 for (i = 0; i < data_len / 4; i += 4) {
5316 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5317 be32_to_cpu(ioasa_data[i]),
5318 be32_to_cpu(ioasa_data[i+1]),
5319 be32_to_cpu(ioasa_data[i+2]),
5320 be32_to_cpu(ioasa_data[i+3]));
5321 }
5322}
5323
5324/**
5325 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5326 * @ioasa: IOASA
5327 * @sense_buf: sense data buffer
5328 *
5329 * Return value:
5330 * none
5331 **/
5332static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5333{
5334 u32 failing_lba;
5335 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5336 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5337 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5338 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5339
5340 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5341
5342 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5343 return;
5344
5345 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5346
5347 if (ipr_is_vset_device(res) &&
5348 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5349 ioasa->u.vset.failing_lba_hi != 0) {
5350 sense_buf[0] = 0x72;
5351 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5352 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5353 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5354
5355 sense_buf[7] = 12;
5356 sense_buf[8] = 0;
5357 sense_buf[9] = 0x0A;
5358 sense_buf[10] = 0x80;
5359
5360 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5361
5362 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5363 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5364 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5365 sense_buf[15] = failing_lba & 0x000000ff;
5366
5367 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5368
5369 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5370 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5371 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5372 sense_buf[19] = failing_lba & 0x000000ff;
5373 } else {
5374 sense_buf[0] = 0x70;
5375 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5376 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5377 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5378
5379 /* Illegal request */
5380 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5381 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5382 sense_buf[7] = 10; /* additional length */
5383
5384 /* IOARCB was in error */
5385 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5386 sense_buf[15] = 0xC0;
5387 else /* Parameter data was invalid */
5388 sense_buf[15] = 0x80;
5389
5390 sense_buf[16] =
5391 ((IPR_FIELD_POINTER_MASK &
5392 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5393 sense_buf[17] =
5394 (IPR_FIELD_POINTER_MASK &
5395 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5396 } else {
5397 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5398 if (ipr_is_vset_device(res))
5399 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5400 else
5401 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5402
5403 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5404 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5405 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5406 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5407 sense_buf[6] = failing_lba & 0x000000ff;
5408 }
5409
5410 sense_buf[7] = 6; /* additional length */
5411 }
5412 }
5413}
5414
ee0a90fa
BK
5415/**
5416 * ipr_get_autosense - Copy autosense data to sense buffer
5417 * @ipr_cmd: ipr command struct
5418 *
5419 * This function copies the autosense buffer to the buffer
5420 * in the scsi_cmd, if there is autosense available.
5421 *
5422 * Return value:
5423 * 1 if autosense was available / 0 if not
5424 **/
5425static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5426{
5427 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5428
117d2ce1 5429 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5430 return 0;
5431
5432 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5433 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5434 SCSI_SENSE_BUFFERSIZE));
5435 return 1;
5436}
5437
1da177e4
LT
5438/**
5439 * ipr_erp_start - Process an error response for a SCSI op
5440 * @ioa_cfg: ioa config struct
5441 * @ipr_cmd: ipr command struct
5442 *
5443 * This function determines whether or not to initiate ERP
5444 * on the affected device.
5445 *
5446 * Return value:
5447 * nothing
5448 **/
5449static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5450 struct ipr_cmnd *ipr_cmd)
5451{
5452 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5453 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5454 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
8a048994 5455 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5456
5457 if (!res) {
5458 ipr_scsi_eh_done(ipr_cmd);
5459 return;
5460 }
5461
8a048994 5462 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5463 ipr_gen_sense(ipr_cmd);
5464
cc9bd5d4
BK
5465 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5466
8a048994 5467 switch (masked_ioasc) {
1da177e4 5468 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5469 if (ipr_is_naca_model(res))
5470 scsi_cmd->result |= (DID_ABORT << 16);
5471 else
5472 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5473 break;
5474 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5475 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5476 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5477 break;
5478 case IPR_IOASC_HW_SEL_TIMEOUT:
5479 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5480 if (!ipr_is_naca_model(res))
5481 res->needs_sync_complete = 1;
1da177e4
LT
5482 break;
5483 case IPR_IOASC_SYNC_REQUIRED:
5484 if (!res->in_erp)
5485 res->needs_sync_complete = 1;
5486 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5487 break;
5488 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5489 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5490 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5491 break;
5492 case IPR_IOASC_BUS_WAS_RESET:
5493 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5494 /*
5495 * Report the bus reset and ask for a retry. The device
5496 * will give CC/UA the next command.
5497 */
5498 if (!res->resetting_device)
5499 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5500 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5501 if (!ipr_is_naca_model(res))
5502 res->needs_sync_complete = 1;
1da177e4
LT
5503 break;
5504 case IPR_IOASC_HW_DEV_BUS_STATUS:
5505 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5506 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5507 if (!ipr_get_autosense(ipr_cmd)) {
5508 if (!ipr_is_naca_model(res)) {
5509 ipr_erp_cancel_all(ipr_cmd);
5510 return;
5511 }
5512 }
1da177e4 5513 }
ee0a90fa
BK
5514 if (!ipr_is_naca_model(res))
5515 res->needs_sync_complete = 1;
1da177e4
LT
5516 break;
5517 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5518 break;
5519 default:
5b7304fb
BK
5520 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5521 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5522 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5523 res->needs_sync_complete = 1;
5524 break;
5525 }
5526
63015bc9 5527 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5528 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5529 scsi_cmd->scsi_done(scsi_cmd);
5530}
5531
5532/**
5533 * ipr_scsi_done - mid-layer done function
5534 * @ipr_cmd: ipr command struct
5535 *
5536 * This function is invoked by the interrupt handler for
5537 * ops generated by the SCSI mid-layer
5538 *
5539 * Return value:
5540 * none
5541 **/
5542static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5543{
5544 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5545 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5546 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5547
63015bc9 5548 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
1da177e4
LT
5549
5550 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5551 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5552 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5553 scsi_cmd->scsi_done(scsi_cmd);
5554 } else
5555 ipr_erp_start(ioa_cfg, ipr_cmd);
5556}
5557
1da177e4
LT
5558/**
5559 * ipr_queuecommand - Queue a mid-layer request
5560 * @scsi_cmd: scsi command struct
5561 * @done: done function
5562 *
5563 * This function queues a request generated by the mid-layer.
5564 *
5565 * Return value:
5566 * 0 on success
5567 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5568 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5569 **/
5570static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5571 void (*done) (struct scsi_cmnd *))
5572{
5573 struct ipr_ioa_cfg *ioa_cfg;
5574 struct ipr_resource_entry *res;
5575 struct ipr_ioarcb *ioarcb;
5576 struct ipr_cmnd *ipr_cmd;
5577 int rc = 0;
5578
5579 scsi_cmd->scsi_done = done;
5580 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5581 res = scsi_cmd->device->hostdata;
5582 scsi_cmd->result = (DID_OK << 16);
5583
5584 /*
5585 * We are currently blocking all devices due to a host reset
5586 * We have told the host to stop giving us new requests, but
5587 * ERP ops don't count. FIXME
5588 */
5589 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5590 return SCSI_MLQUEUE_HOST_BUSY;
5591
5592 /*
5593 * FIXME - Create scsi_set_host_offline interface
5594 * and the ioa_is_dead check can be removed
5595 */
5596 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5597 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5598 scsi_cmd->result = (DID_NO_CONNECT << 16);
5599 scsi_cmd->scsi_done(scsi_cmd);
5600 return 0;
5601 }
5602
35a39691
BK
5603 if (ipr_is_gata(res) && res->sata_port)
5604 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5605
1da177e4
LT
5606 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5607 ioarcb = &ipr_cmd->ioarcb;
5608 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5609
5610 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5611 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5612 ioarcb->res_handle = res->res_handle;
1da177e4 5613 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5614 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5615
5616 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5617 if (scsi_cmd->underflow == 0)
5618 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5619
5620 if (res->needs_sync_complete) {
5621 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5622 res->needs_sync_complete = 0;
5623 }
5624
5625 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5626 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5627 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5628 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5629 }
5630
5631 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5632 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5633 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5634
a32c055f
WB
5635 if (likely(rc == 0)) {
5636 if (ioa_cfg->sis64)
5637 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5638 else
5639 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5640 }
1da177e4
LT
5641
5642 if (likely(rc == 0)) {
5643 mb();
a32c055f 5644 ipr_send_command(ipr_cmd);
1da177e4
LT
5645 } else {
5646 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5647 return SCSI_MLQUEUE_HOST_BUSY;
5648 }
5649
5650 return 0;
5651}
5652
35a39691
BK
5653/**
5654 * ipr_ioctl - IOCTL handler
5655 * @sdev: scsi device struct
5656 * @cmd: IOCTL cmd
5657 * @arg: IOCTL arg
5658 *
5659 * Return value:
5660 * 0 on success / other on failure
5661 **/
bd705f2d 5662static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5663{
5664 struct ipr_resource_entry *res;
5665
5666 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5667 if (res && ipr_is_gata(res)) {
5668 if (cmd == HDIO_GET_IDENTITY)
5669 return -ENOTTY;
94be9a58 5670 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5671 }
35a39691
BK
5672
5673 return -EINVAL;
5674}
5675
1da177e4
LT
5676/**
5677 * ipr_info - Get information about the card/driver
5678 * @scsi_host: scsi host struct
5679 *
5680 * Return value:
5681 * pointer to buffer with description string
5682 **/
5683static const char * ipr_ioa_info(struct Scsi_Host *host)
5684{
5685 static char buffer[512];
5686 struct ipr_ioa_cfg *ioa_cfg;
5687 unsigned long lock_flags = 0;
5688
5689 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5690
5691 spin_lock_irqsave(host->host_lock, lock_flags);
5692 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5693 spin_unlock_irqrestore(host->host_lock, lock_flags);
5694
5695 return buffer;
5696}
5697
5698static struct scsi_host_template driver_template = {
5699 .module = THIS_MODULE,
5700 .name = "IPR",
5701 .info = ipr_ioa_info,
35a39691 5702 .ioctl = ipr_ioctl,
1da177e4
LT
5703 .queuecommand = ipr_queuecommand,
5704 .eh_abort_handler = ipr_eh_abort,
5705 .eh_device_reset_handler = ipr_eh_dev_reset,
5706 .eh_host_reset_handler = ipr_eh_host_reset,
5707 .slave_alloc = ipr_slave_alloc,
5708 .slave_configure = ipr_slave_configure,
5709 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5710 .target_alloc = ipr_target_alloc,
5711 .target_destroy = ipr_target_destroy,
1da177e4
LT
5712 .change_queue_depth = ipr_change_queue_depth,
5713 .change_queue_type = ipr_change_queue_type,
5714 .bios_param = ipr_biosparam,
5715 .can_queue = IPR_MAX_COMMANDS,
5716 .this_id = -1,
5717 .sg_tablesize = IPR_MAX_SGLIST,
5718 .max_sectors = IPR_IOA_MAX_SECTORS,
5719 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5720 .use_clustering = ENABLE_CLUSTERING,
5721 .shost_attrs = ipr_ioa_attrs,
5722 .sdev_attrs = ipr_dev_attrs,
5723 .proc_name = IPR_NAME
5724};
5725
35a39691
BK
5726/**
5727 * ipr_ata_phy_reset - libata phy_reset handler
5728 * @ap: ata port to reset
5729 *
5730 **/
5731static void ipr_ata_phy_reset(struct ata_port *ap)
5732{
5733 unsigned long flags;
5734 struct ipr_sata_port *sata_port = ap->private_data;
5735 struct ipr_resource_entry *res = sata_port->res;
5736 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5737 int rc;
5738
5739 ENTER;
5740 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5741 while(ioa_cfg->in_reset_reload) {
5742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5743 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5744 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5745 }
5746
5747 if (!ioa_cfg->allow_cmds)
5748 goto out_unlock;
5749
5750 rc = ipr_device_reset(ioa_cfg, res);
5751
5752 if (rc) {
ac8869d5 5753 ata_port_disable(ap);
35a39691
BK
5754 goto out_unlock;
5755 }
5756
3e7ebdfa
WB
5757 ap->link.device[0].class = res->ata_class;
5758 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
ac8869d5 5759 ata_port_disable(ap);
35a39691
BK
5760
5761out_unlock:
5762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5763 LEAVE;
5764}
5765
5766/**
5767 * ipr_ata_post_internal - Cleanup after an internal command
5768 * @qc: ATA queued command
5769 *
5770 * Return value:
5771 * none
5772 **/
5773static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5774{
5775 struct ipr_sata_port *sata_port = qc->ap->private_data;
5776 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5777 struct ipr_cmnd *ipr_cmd;
5778 unsigned long flags;
5779
5780 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5781 while(ioa_cfg->in_reset_reload) {
5782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5783 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5784 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5785 }
5786
35a39691
BK
5787 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5788 if (ipr_cmd->qc == qc) {
5789 ipr_device_reset(ioa_cfg, sata_port->res);
5790 break;
5791 }
5792 }
5793 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5794}
5795
35a39691
BK
5796/**
5797 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5798 * @regs: destination
5799 * @tf: source ATA taskfile
5800 *
5801 * Return value:
5802 * none
5803 **/
5804static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5805 struct ata_taskfile *tf)
5806{
5807 regs->feature = tf->feature;
5808 regs->nsect = tf->nsect;
5809 regs->lbal = tf->lbal;
5810 regs->lbam = tf->lbam;
5811 regs->lbah = tf->lbah;
5812 regs->device = tf->device;
5813 regs->command = tf->command;
5814 regs->hob_feature = tf->hob_feature;
5815 regs->hob_nsect = tf->hob_nsect;
5816 regs->hob_lbal = tf->hob_lbal;
5817 regs->hob_lbam = tf->hob_lbam;
5818 regs->hob_lbah = tf->hob_lbah;
5819 regs->ctl = tf->ctl;
5820}
5821
5822/**
5823 * ipr_sata_done - done function for SATA commands
5824 * @ipr_cmd: ipr command struct
5825 *
5826 * This function is invoked by the interrupt handler for
5827 * ops generated by the SCSI mid-layer to SATA devices
5828 *
5829 * Return value:
5830 * none
5831 **/
5832static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5833{
5834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5835 struct ata_queued_cmd *qc = ipr_cmd->qc;
5836 struct ipr_sata_port *sata_port = qc->ap->private_data;
5837 struct ipr_resource_entry *res = sata_port->res;
5838 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5839
5840 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5841 sizeof(struct ipr_ioasa_gata));
5842 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5843
5844 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5845 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5846
5847 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5848 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5849 else
5850 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5851 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5852 ata_qc_complete(qc);
5853}
5854
a32c055f
WB
5855/**
5856 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5857 * @ipr_cmd: ipr command struct
5858 * @qc: ATA queued command
5859 *
5860 **/
5861static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5862 struct ata_queued_cmd *qc)
5863{
5864 u32 ioadl_flags = 0;
5865 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5866 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5867 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5868 int len = qc->nbytes;
5869 struct scatterlist *sg;
5870 unsigned int si;
5871 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5872
5873 if (len == 0)
5874 return;
5875
5876 if (qc->dma_dir == DMA_TO_DEVICE) {
5877 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5878 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5879 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5880 ioadl_flags = IPR_IOADL_FLAGS_READ;
5881
5882 ioarcb->data_transfer_length = cpu_to_be32(len);
5883 ioarcb->ioadl_len =
5884 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5885 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5886 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5887
5888 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5889 ioadl64->flags = cpu_to_be32(ioadl_flags);
5890 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5891 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5892
5893 last_ioadl64 = ioadl64;
5894 ioadl64++;
5895 }
5896
5897 if (likely(last_ioadl64))
5898 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5899}
5900
35a39691
BK
5901/**
5902 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5903 * @ipr_cmd: ipr command struct
5904 * @qc: ATA queued command
5905 *
5906 **/
5907static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5908 struct ata_queued_cmd *qc)
5909{
5910 u32 ioadl_flags = 0;
5911 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5912 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5913 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5914 int len = qc->nbytes;
35a39691 5915 struct scatterlist *sg;
ff2aeb1e 5916 unsigned int si;
35a39691
BK
5917
5918 if (len == 0)
5919 return;
5920
5921 if (qc->dma_dir == DMA_TO_DEVICE) {
5922 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5923 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5924 ioarcb->data_transfer_length = cpu_to_be32(len);
5925 ioarcb->ioadl_len =
35a39691
BK
5926 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5927 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5928 ioadl_flags = IPR_IOADL_FLAGS_READ;
5929 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5930 ioarcb->read_ioadl_len =
5931 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5932 }
5933
ff2aeb1e 5934 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5935 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5936 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5937
5938 last_ioadl = ioadl;
5939 ioadl++;
35a39691 5940 }
3be6cbd7
JG
5941
5942 if (likely(last_ioadl))
5943 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5944}
5945
5946/**
5947 * ipr_qc_issue - Issue a SATA qc to a device
5948 * @qc: queued command
5949 *
5950 * Return value:
5951 * 0 if success
5952 **/
5953static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5954{
5955 struct ata_port *ap = qc->ap;
5956 struct ipr_sata_port *sata_port = ap->private_data;
5957 struct ipr_resource_entry *res = sata_port->res;
5958 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5959 struct ipr_cmnd *ipr_cmd;
5960 struct ipr_ioarcb *ioarcb;
5961 struct ipr_ioarcb_ata_regs *regs;
5962
5963 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5964 return AC_ERR_SYSTEM;
35a39691
BK
5965
5966 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5967 ioarcb = &ipr_cmd->ioarcb;
35a39691 5968
a32c055f
WB
5969 if (ioa_cfg->sis64) {
5970 regs = &ipr_cmd->i.ata_ioadl.regs;
5971 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5972 } else
5973 regs = &ioarcb->u.add_data.u.regs;
5974
5975 memset(regs, 0, sizeof(*regs));
5976 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
5977
5978 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5979 ipr_cmd->qc = qc;
5980 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 5981 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
5982 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 5985 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 5986
a32c055f
WB
5987 if (ioa_cfg->sis64)
5988 ipr_build_ata_ioadl64(ipr_cmd, qc);
5989 else
5990 ipr_build_ata_ioadl(ipr_cmd, qc);
5991
35a39691
BK
5992 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5993 ipr_copy_sata_tf(regs, &qc->tf);
5994 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 5995 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
5996
5997 switch (qc->tf.protocol) {
5998 case ATA_PROT_NODATA:
5999 case ATA_PROT_PIO:
6000 break;
6001
6002 case ATA_PROT_DMA:
6003 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6004 break;
6005
0dc36888
TH
6006 case ATAPI_PROT_PIO:
6007 case ATAPI_PROT_NODATA:
35a39691
BK
6008 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6009 break;
6010
0dc36888 6011 case ATAPI_PROT_DMA:
35a39691
BK
6012 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6013 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6014 break;
6015
6016 default:
6017 WARN_ON(1);
0feeed82 6018 return AC_ERR_INVALID;
35a39691
BK
6019 }
6020
6021 mb();
a32c055f
WB
6022
6023 ipr_send_command(ipr_cmd);
6024
35a39691
BK
6025 return 0;
6026}
6027
4c9bf4e7
TH
6028/**
6029 * ipr_qc_fill_rtf - Read result TF
6030 * @qc: ATA queued command
6031 *
6032 * Return value:
6033 * true
6034 **/
6035static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6036{
6037 struct ipr_sata_port *sata_port = qc->ap->private_data;
6038 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6039 struct ata_taskfile *tf = &qc->result_tf;
6040
6041 tf->feature = g->error;
6042 tf->nsect = g->nsect;
6043 tf->lbal = g->lbal;
6044 tf->lbam = g->lbam;
6045 tf->lbah = g->lbah;
6046 tf->device = g->device;
6047 tf->command = g->status;
6048 tf->hob_nsect = g->hob_nsect;
6049 tf->hob_lbal = g->hob_lbal;
6050 tf->hob_lbam = g->hob_lbam;
6051 tf->hob_lbah = g->hob_lbah;
6052 tf->ctl = g->alt_status;
6053
6054 return true;
6055}
6056
35a39691 6057static struct ata_port_operations ipr_sata_ops = {
35a39691 6058 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6059 .hardreset = ipr_sata_reset,
35a39691 6060 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6061 .qc_prep = ata_noop_qc_prep,
6062 .qc_issue = ipr_qc_issue,
4c9bf4e7 6063 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6064 .port_start = ata_sas_port_start,
6065 .port_stop = ata_sas_port_stop
6066};
6067
6068static struct ata_port_info sata_port_info = {
6069 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6070 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6071 .pio_mask = 0x10, /* pio4 */
6072 .mwdma_mask = 0x07,
6073 .udma_mask = 0x7f, /* udma0-6 */
6074 .port_ops = &ipr_sata_ops
6075};
6076
1da177e4
LT
6077#ifdef CONFIG_PPC_PSERIES
6078static const u16 ipr_blocked_processors[] = {
6079 PV_NORTHSTAR,
6080 PV_PULSAR,
6081 PV_POWER4,
6082 PV_ICESTAR,
6083 PV_SSTAR,
6084 PV_POWER4p,
6085 PV_630,
6086 PV_630p
6087};
6088
6089/**
6090 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6091 * @ioa_cfg: ioa cfg struct
6092 *
6093 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6094 * certain pSeries hardware. This function determines if the given
6095 * adapter is in one of these confgurations or not.
6096 *
6097 * Return value:
6098 * 1 if adapter is not supported / 0 if adapter is supported
6099 **/
6100static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6101{
1da177e4
LT
6102 int i;
6103
44c10138
AK
6104 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6105 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6106 if (__is_processor(ipr_blocked_processors[i]))
6107 return 1;
1da177e4
LT
6108 }
6109 }
6110 return 0;
6111}
6112#else
6113#define ipr_invalid_adapter(ioa_cfg) 0
6114#endif
6115
6116/**
6117 * ipr_ioa_bringdown_done - IOA bring down completion.
6118 * @ipr_cmd: ipr command struct
6119 *
6120 * This function processes the completion of an adapter bring down.
6121 * It wakes any reset sleepers.
6122 *
6123 * Return value:
6124 * IPR_RC_JOB_RETURN
6125 **/
6126static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6127{
6128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6129
6130 ENTER;
6131 ioa_cfg->in_reset_reload = 0;
6132 ioa_cfg->reset_retries = 0;
6133 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6134 wake_up_all(&ioa_cfg->reset_wait_q);
6135
6136 spin_unlock_irq(ioa_cfg->host->host_lock);
6137 scsi_unblock_requests(ioa_cfg->host);
6138 spin_lock_irq(ioa_cfg->host->host_lock);
6139 LEAVE;
6140
6141 return IPR_RC_JOB_RETURN;
6142}
6143
6144/**
6145 * ipr_ioa_reset_done - IOA reset completion.
6146 * @ipr_cmd: ipr command struct
6147 *
6148 * This function processes the completion of an adapter reset.
6149 * It schedules any necessary mid-layer add/removes and
6150 * wakes any reset sleepers.
6151 *
6152 * Return value:
6153 * IPR_RC_JOB_RETURN
6154 **/
6155static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6156{
6157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6158 struct ipr_resource_entry *res;
6159 struct ipr_hostrcb *hostrcb, *temp;
6160 int i = 0;
6161
6162 ENTER;
6163 ioa_cfg->in_reset_reload = 0;
6164 ioa_cfg->allow_cmds = 1;
6165 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6166 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6167
6168 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6169 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6170 ipr_trace;
6171 break;
6172 }
6173 }
6174 schedule_work(&ioa_cfg->work_q);
6175
6176 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6177 list_del(&hostrcb->queue);
6178 if (i++ < IPR_NUM_LOG_HCAMS)
6179 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6180 else
6181 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6182 }
6183
6bb04170 6184 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6185 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6186
6187 ioa_cfg->reset_retries = 0;
6188 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6189 wake_up_all(&ioa_cfg->reset_wait_q);
6190
30237853 6191 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6192 scsi_unblock_requests(ioa_cfg->host);
30237853 6193 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6194
6195 if (!ioa_cfg->allow_cmds)
6196 scsi_block_requests(ioa_cfg->host);
6197
6198 LEAVE;
6199 return IPR_RC_JOB_RETURN;
6200}
6201
6202/**
6203 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6204 * @supported_dev: supported device struct
6205 * @vpids: vendor product id struct
6206 *
6207 * Return value:
6208 * none
6209 **/
6210static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6211 struct ipr_std_inq_vpids *vpids)
6212{
6213 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6214 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6215 supported_dev->num_records = 1;
6216 supported_dev->data_length =
6217 cpu_to_be16(sizeof(struct ipr_supported_device));
6218 supported_dev->reserved = 0;
6219}
6220
6221/**
6222 * ipr_set_supported_devs - Send Set Supported Devices for a device
6223 * @ipr_cmd: ipr command struct
6224 *
a32c055f 6225 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6226 *
6227 * Return value:
6228 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6229 **/
6230static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6231{
6232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6233 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6234 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6235 struct ipr_resource_entry *res = ipr_cmd->u.res;
6236
6237 ipr_cmd->job_step = ipr_ioa_reset_done;
6238
6239 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6240 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6241 continue;
6242
6243 ipr_cmd->u.res = res;
3e7ebdfa 6244 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6245
6246 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6249
6250 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6251 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6252 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6253 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6254
a32c055f
WB
6255 ipr_init_ioadl(ipr_cmd,
6256 ioa_cfg->vpd_cbs_dma +
6257 offsetof(struct ipr_misc_cbs, supp_dev),
6258 sizeof(struct ipr_supported_device),
6259 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6260
6261 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6262 IPR_SET_SUP_DEVICE_TIMEOUT);
6263
3e7ebdfa
WB
6264 if (!ioa_cfg->sis64)
6265 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6266 return IPR_RC_JOB_RETURN;
6267 }
6268
6269 return IPR_RC_JOB_CONTINUE;
6270}
6271
6272/**
6273 * ipr_get_mode_page - Locate specified mode page
6274 * @mode_pages: mode page buffer
6275 * @page_code: page code to find
6276 * @len: minimum required length for mode page
6277 *
6278 * Return value:
6279 * pointer to mode page / NULL on failure
6280 **/
6281static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6282 u32 page_code, u32 len)
6283{
6284 struct ipr_mode_page_hdr *mode_hdr;
6285 u32 page_length;
6286 u32 length;
6287
6288 if (!mode_pages || (mode_pages->hdr.length == 0))
6289 return NULL;
6290
6291 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6292 mode_hdr = (struct ipr_mode_page_hdr *)
6293 (mode_pages->data + mode_pages->hdr.block_desc_len);
6294
6295 while (length) {
6296 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6297 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6298 return mode_hdr;
6299 break;
6300 } else {
6301 page_length = (sizeof(struct ipr_mode_page_hdr) +
6302 mode_hdr->page_length);
6303 length -= page_length;
6304 mode_hdr = (struct ipr_mode_page_hdr *)
6305 ((unsigned long)mode_hdr + page_length);
6306 }
6307 }
6308 return NULL;
6309}
6310
6311/**
6312 * ipr_check_term_power - Check for term power errors
6313 * @ioa_cfg: ioa config struct
6314 * @mode_pages: IOAFP mode pages buffer
6315 *
6316 * Check the IOAFP's mode page 28 for term power errors
6317 *
6318 * Return value:
6319 * nothing
6320 **/
6321static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6322 struct ipr_mode_pages *mode_pages)
6323{
6324 int i;
6325 int entry_length;
6326 struct ipr_dev_bus_entry *bus;
6327 struct ipr_mode_page28 *mode_page;
6328
6329 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6330 sizeof(struct ipr_mode_page28));
6331
6332 entry_length = mode_page->entry_length;
6333
6334 bus = mode_page->bus;
6335
6336 for (i = 0; i < mode_page->num_entries; i++) {
6337 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6338 dev_err(&ioa_cfg->pdev->dev,
6339 "Term power is absent on scsi bus %d\n",
6340 bus->res_addr.bus);
6341 }
6342
6343 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6344 }
6345}
6346
6347/**
6348 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6349 * @ioa_cfg: ioa config struct
6350 *
6351 * Looks through the config table checking for SES devices. If
6352 * the SES device is in the SES table indicating a maximum SCSI
6353 * bus speed, the speed is limited for the bus.
6354 *
6355 * Return value:
6356 * none
6357 **/
6358static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6359{
6360 u32 max_xfer_rate;
6361 int i;
6362
6363 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6364 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6365 ioa_cfg->bus_attr[i].bus_width);
6366
6367 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6368 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6369 }
6370}
6371
6372/**
6373 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6374 * @ioa_cfg: ioa config struct
6375 * @mode_pages: mode page 28 buffer
6376 *
6377 * Updates mode page 28 based on driver configuration
6378 *
6379 * Return value:
6380 * none
6381 **/
6382static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6383 struct ipr_mode_pages *mode_pages)
6384{
6385 int i, entry_length;
6386 struct ipr_dev_bus_entry *bus;
6387 struct ipr_bus_attributes *bus_attr;
6388 struct ipr_mode_page28 *mode_page;
6389
6390 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6391 sizeof(struct ipr_mode_page28));
6392
6393 entry_length = mode_page->entry_length;
6394
6395 /* Loop for each device bus entry */
6396 for (i = 0, bus = mode_page->bus;
6397 i < mode_page->num_entries;
6398 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6399 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6400 dev_err(&ioa_cfg->pdev->dev,
6401 "Invalid resource address reported: 0x%08X\n",
6402 IPR_GET_PHYS_LOC(bus->res_addr));
6403 continue;
6404 }
6405
6406 bus_attr = &ioa_cfg->bus_attr[i];
6407 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6408 bus->bus_width = bus_attr->bus_width;
6409 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6410 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6411 if (bus_attr->qas_enabled)
6412 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6413 else
6414 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6415 }
6416}
6417
6418/**
6419 * ipr_build_mode_select - Build a mode select command
6420 * @ipr_cmd: ipr command struct
6421 * @res_handle: resource handle to send command to
6422 * @parm: Byte 2 of Mode Sense command
6423 * @dma_addr: DMA buffer address
6424 * @xfer_len: data transfer length
6425 *
6426 * Return value:
6427 * none
6428 **/
6429static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6430 __be32 res_handle, u8 parm,
6431 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6432{
1da177e4
LT
6433 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6434
6435 ioarcb->res_handle = res_handle;
6436 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6437 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6438 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6439 ioarcb->cmd_pkt.cdb[1] = parm;
6440 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6441
a32c055f 6442 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6443}
6444
6445/**
6446 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6447 * @ipr_cmd: ipr command struct
6448 *
6449 * This function sets up the SCSI bus attributes and sends
6450 * a Mode Select for Page 28 to activate them.
6451 *
6452 * Return value:
6453 * IPR_RC_JOB_RETURN
6454 **/
6455static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6456{
6457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6458 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6459 int length;
6460
6461 ENTER;
4733804c
BK
6462 ipr_scsi_bus_speed_limit(ioa_cfg);
6463 ipr_check_term_power(ioa_cfg, mode_pages);
6464 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6465 length = mode_pages->hdr.length + 1;
6466 mode_pages->hdr.length = 0;
1da177e4
LT
6467
6468 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6469 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6470 length);
6471
f72919ec
WB
6472 ipr_cmd->job_step = ipr_set_supported_devs;
6473 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6474 struct ipr_resource_entry, queue);
1da177e4
LT
6475 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6476
6477 LEAVE;
6478 return IPR_RC_JOB_RETURN;
6479}
6480
6481/**
6482 * ipr_build_mode_sense - Builds a mode sense command
6483 * @ipr_cmd: ipr command struct
6484 * @res: resource entry struct
6485 * @parm: Byte 2 of mode sense command
6486 * @dma_addr: DMA address of mode sense buffer
6487 * @xfer_len: Size of DMA buffer
6488 *
6489 * Return value:
6490 * none
6491 **/
6492static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6493 __be32 res_handle,
a32c055f 6494 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6495{
1da177e4
LT
6496 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6497
6498 ioarcb->res_handle = res_handle;
6499 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6500 ioarcb->cmd_pkt.cdb[2] = parm;
6501 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6502 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6503
a32c055f 6504 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6505}
6506
dfed823e
BK
6507/**
6508 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6509 * @ipr_cmd: ipr command struct
6510 *
6511 * This function handles the failure of an IOA bringup command.
6512 *
6513 * Return value:
6514 * IPR_RC_JOB_RETURN
6515 **/
6516static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6517{
6518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6519 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6520
6521 dev_err(&ioa_cfg->pdev->dev,
6522 "0x%02X failed with IOASC: 0x%08X\n",
6523 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6524
6525 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6526 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6527 return IPR_RC_JOB_RETURN;
6528}
6529
6530/**
6531 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6532 * @ipr_cmd: ipr command struct
6533 *
6534 * This function handles the failure of a Mode Sense to the IOAFP.
6535 * Some adapters do not handle all mode pages.
6536 *
6537 * Return value:
6538 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6539 **/
6540static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6541{
f72919ec 6542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
dfed823e
BK
6543 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6544
6545 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6546 ipr_cmd->job_step = ipr_set_supported_devs;
6547 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6548 struct ipr_resource_entry, queue);
dfed823e
BK
6549 return IPR_RC_JOB_CONTINUE;
6550 }
6551
6552 return ipr_reset_cmd_failed(ipr_cmd);
6553}
6554
1da177e4
LT
6555/**
6556 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6557 * @ipr_cmd: ipr command struct
6558 *
6559 * This function send a Page 28 mode sense to the IOA to
6560 * retrieve SCSI bus attributes.
6561 *
6562 * Return value:
6563 * IPR_RC_JOB_RETURN
6564 **/
6565static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6566{
6567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6568
6569 ENTER;
6570 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6571 0x28, ioa_cfg->vpd_cbs_dma +
6572 offsetof(struct ipr_misc_cbs, mode_pages),
6573 sizeof(struct ipr_mode_pages));
6574
6575 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6576 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6577
6578 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6579
6580 LEAVE;
6581 return IPR_RC_JOB_RETURN;
6582}
6583
ac09c349
BK
6584/**
6585 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6586 * @ipr_cmd: ipr command struct
6587 *
6588 * This function enables dual IOA RAID support if possible.
6589 *
6590 * Return value:
6591 * IPR_RC_JOB_RETURN
6592 **/
6593static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6594{
6595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6596 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6597 struct ipr_mode_page24 *mode_page;
6598 int length;
6599
6600 ENTER;
6601 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6602 sizeof(struct ipr_mode_page24));
6603
6604 if (mode_page)
6605 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6606
6607 length = mode_pages->hdr.length + 1;
6608 mode_pages->hdr.length = 0;
6609
6610 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6611 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6612 length);
6613
6614 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6615 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6616
6617 LEAVE;
6618 return IPR_RC_JOB_RETURN;
6619}
6620
6621/**
6622 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6623 * @ipr_cmd: ipr command struct
6624 *
6625 * This function handles the failure of a Mode Sense to the IOAFP.
6626 * Some adapters do not handle all mode pages.
6627 *
6628 * Return value:
6629 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6630 **/
6631static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6632{
6633 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6634
6635 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6636 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6637 return IPR_RC_JOB_CONTINUE;
6638 }
6639
6640 return ipr_reset_cmd_failed(ipr_cmd);
6641}
6642
6643/**
6644 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6645 * @ipr_cmd: ipr command struct
6646 *
6647 * This function send a mode sense to the IOA to retrieve
6648 * the IOA Advanced Function Control mode page.
6649 *
6650 * Return value:
6651 * IPR_RC_JOB_RETURN
6652 **/
6653static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6654{
6655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6656
6657 ENTER;
6658 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6659 0x24, ioa_cfg->vpd_cbs_dma +
6660 offsetof(struct ipr_misc_cbs, mode_pages),
6661 sizeof(struct ipr_mode_pages));
6662
6663 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6664 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6665
6666 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6667
6668 LEAVE;
6669 return IPR_RC_JOB_RETURN;
6670}
6671
1da177e4
LT
6672/**
6673 * ipr_init_res_table - Initialize the resource table
6674 * @ipr_cmd: ipr command struct
6675 *
6676 * This function looks through the existing resource table, comparing
6677 * it with the config table. This function will take care of old/new
6678 * devices and schedule adding/removing them from the mid-layer
6679 * as appropriate.
6680 *
6681 * Return value:
6682 * IPR_RC_JOB_CONTINUE
6683 **/
6684static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6685{
6686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6687 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6688 struct ipr_config_table_entry_wrapper cfgtew;
6689 int entries, found, flag, i;
1da177e4
LT
6690 LIST_HEAD(old_res);
6691
6692 ENTER;
3e7ebdfa
WB
6693 if (ioa_cfg->sis64)
6694 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6695 else
6696 flag = ioa_cfg->u.cfg_table->hdr.flags;
6697
6698 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6699 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6700
6701 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6702 list_move_tail(&res->queue, &old_res);
6703
3e7ebdfa
WB
6704 if (ioa_cfg->sis64)
6705 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6706 else
6707 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6708
6709 for (i = 0; i < entries; i++) {
6710 if (ioa_cfg->sis64)
6711 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6712 else
6713 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6714 found = 0;
6715
6716 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6717 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6718 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6719 found = 1;
6720 break;
6721 }
6722 }
6723
6724 if (!found) {
6725 if (list_empty(&ioa_cfg->free_res_q)) {
6726 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6727 break;
6728 }
6729
6730 found = 1;
6731 res = list_entry(ioa_cfg->free_res_q.next,
6732 struct ipr_resource_entry, queue);
6733 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6734 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
6735 res->add_to_ml = 1;
6736 }
6737
6738 if (found)
3e7ebdfa 6739 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6740 }
6741
6742 list_for_each_entry_safe(res, temp, &old_res, queue) {
6743 if (res->sdev) {
6744 res->del_from_ml = 1;
3e7ebdfa 6745 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6746 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6747 }
6748 }
6749
3e7ebdfa
WB
6750 list_for_each_entry_safe(res, temp, &old_res, queue) {
6751 ipr_clear_res_target(res);
6752 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6753 }
6754
ac09c349
BK
6755 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6756 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6757 else
6758 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6759
6760 LEAVE;
6761 return IPR_RC_JOB_CONTINUE;
6762}
6763
6764/**
6765 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6766 * @ipr_cmd: ipr command struct
6767 *
6768 * This function sends a Query IOA Configuration command
6769 * to the adapter to retrieve the IOA configuration table.
6770 *
6771 * Return value:
6772 * IPR_RC_JOB_RETURN
6773 **/
6774static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6775{
6776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6777 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6778 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6779 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6780
6781 ENTER;
ac09c349
BK
6782 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6783 ioa_cfg->dual_raid = 1;
1da177e4
LT
6784 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6785 ucode_vpd->major_release, ucode_vpd->card_type,
6786 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6787 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6788 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6789
6790 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
3e7ebdfa
WB
6791 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6792 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6793
3e7ebdfa 6794 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6795 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6796
6797 ipr_cmd->job_step = ipr_init_res_table;
6798
6799 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6800
6801 LEAVE;
6802 return IPR_RC_JOB_RETURN;
6803}
6804
6805/**
6806 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6807 * @ipr_cmd: ipr command struct
6808 *
6809 * This utility function sends an inquiry to the adapter.
6810 *
6811 * Return value:
6812 * none
6813 **/
6814static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6815 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6816{
6817 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6818
6819 ENTER;
6820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6821 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6822
6823 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6824 ioarcb->cmd_pkt.cdb[1] = flags;
6825 ioarcb->cmd_pkt.cdb[2] = page;
6826 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6827
a32c055f 6828 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6829
6830 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6831 LEAVE;
6832}
6833
62275040
BK
6834/**
6835 * ipr_inquiry_page_supported - Is the given inquiry page supported
6836 * @page0: inquiry page 0 buffer
6837 * @page: page code.
6838 *
6839 * This function determines if the specified inquiry page is supported.
6840 *
6841 * Return value:
6842 * 1 if page is supported / 0 if not
6843 **/
6844static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6845{
6846 int i;
6847
6848 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6849 if (page0->page[i] == page)
6850 return 1;
6851
6852 return 0;
6853}
6854
ac09c349
BK
6855/**
6856 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6857 * @ipr_cmd: ipr command struct
6858 *
6859 * This function sends a Page 0xD0 inquiry to the adapter
6860 * to retrieve adapter capabilities.
6861 *
6862 * Return value:
6863 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6864 **/
6865static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6866{
6867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6868 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6869 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6870
6871 ENTER;
6872 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6873 memset(cap, 0, sizeof(*cap));
6874
6875 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6876 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6877 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6878 sizeof(struct ipr_inquiry_cap));
6879 return IPR_RC_JOB_RETURN;
6880 }
6881
6882 LEAVE;
6883 return IPR_RC_JOB_CONTINUE;
6884}
6885
1da177e4
LT
6886/**
6887 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6888 * @ipr_cmd: ipr command struct
6889 *
6890 * This function sends a Page 3 inquiry to the adapter
6891 * to retrieve software VPD information.
6892 *
6893 * Return value:
6894 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6895 **/
6896static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
6897{
6898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
6899
6900 ENTER;
6901
ac09c349 6902 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
6903
6904 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6905 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6906 sizeof(struct ipr_inquiry_page3));
6907
6908 LEAVE;
6909 return IPR_RC_JOB_RETURN;
6910}
6911
6912/**
6913 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6914 * @ipr_cmd: ipr command struct
6915 *
6916 * This function sends a Page 0 inquiry to the adapter
6917 * to retrieve supported inquiry pages.
6918 *
6919 * Return value:
6920 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6921 **/
6922static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6923{
6924 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6925 char type[5];
6926
6927 ENTER;
6928
6929 /* Grab the type out of the VPD and store it away */
6930 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6931 type[4] = '\0';
6932 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6933
62275040 6934 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6935
62275040
BK
6936 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6937 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6938 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6939
6940 LEAVE;
6941 return IPR_RC_JOB_RETURN;
6942}
6943
6944/**
6945 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6946 * @ipr_cmd: ipr command struct
6947 *
6948 * This function sends a standard inquiry to the adapter.
6949 *
6950 * Return value:
6951 * IPR_RC_JOB_RETURN
6952 **/
6953static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6954{
6955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6956
6957 ENTER;
62275040 6958 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6959
6960 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6961 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6962 sizeof(struct ipr_ioa_vpd));
6963
6964 LEAVE;
6965 return IPR_RC_JOB_RETURN;
6966}
6967
6968/**
214777ba 6969 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
6970 * @ipr_cmd: ipr command struct
6971 *
6972 * This function send an Identify Host Request Response Queue
6973 * command to establish the HRRQ with the adapter.
6974 *
6975 * Return value:
6976 * IPR_RC_JOB_RETURN
6977 **/
214777ba 6978static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6979{
6980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6981 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6982
6983 ENTER;
6984 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6985
6986 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6987 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6988
6989 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
6990 if (ioa_cfg->sis64)
6991 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 6992 ioarcb->cmd_pkt.cdb[2] =
214777ba 6993 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 6994 ioarcb->cmd_pkt.cdb[3] =
214777ba 6995 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 6996 ioarcb->cmd_pkt.cdb[4] =
214777ba 6997 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 6998 ioarcb->cmd_pkt.cdb[5] =
214777ba 6999 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7000 ioarcb->cmd_pkt.cdb[7] =
7001 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7002 ioarcb->cmd_pkt.cdb[8] =
7003 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7004
214777ba
WB
7005 if (ioa_cfg->sis64) {
7006 ioarcb->cmd_pkt.cdb[10] =
7007 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7008 ioarcb->cmd_pkt.cdb[11] =
7009 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7010 ioarcb->cmd_pkt.cdb[12] =
7011 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7012 ioarcb->cmd_pkt.cdb[13] =
7013 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7014 }
7015
1da177e4
LT
7016 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7017
7018 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7019
7020 LEAVE;
7021 return IPR_RC_JOB_RETURN;
7022}
7023
7024/**
7025 * ipr_reset_timer_done - Adapter reset timer function
7026 * @ipr_cmd: ipr command struct
7027 *
7028 * Description: This function is used in adapter reset processing
7029 * for timing events. If the reset_cmd pointer in the IOA
7030 * config struct is not this adapter's we are doing nested
7031 * resets and fail_all_ops will take care of freeing the
7032 * command block.
7033 *
7034 * Return value:
7035 * none
7036 **/
7037static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7038{
7039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7040 unsigned long lock_flags = 0;
7041
7042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7043
7044 if (ioa_cfg->reset_cmd == ipr_cmd) {
7045 list_del(&ipr_cmd->queue);
7046 ipr_cmd->done(ipr_cmd);
7047 }
7048
7049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7050}
7051
7052/**
7053 * ipr_reset_start_timer - Start a timer for adapter reset job
7054 * @ipr_cmd: ipr command struct
7055 * @timeout: timeout value
7056 *
7057 * Description: This function is used in adapter reset processing
7058 * for timing events. If the reset_cmd pointer in the IOA
7059 * config struct is not this adapter's we are doing nested
7060 * resets and fail_all_ops will take care of freeing the
7061 * command block.
7062 *
7063 * Return value:
7064 * none
7065 **/
7066static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7067 unsigned long timeout)
7068{
7069 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7070 ipr_cmd->done = ipr_reset_ioa_job;
7071
7072 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7073 ipr_cmd->timer.expires = jiffies + timeout;
7074 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7075 add_timer(&ipr_cmd->timer);
7076}
7077
7078/**
7079 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7080 * @ioa_cfg: ioa cfg struct
7081 *
7082 * Return value:
7083 * nothing
7084 **/
7085static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7086{
7087 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7088
7089 /* Initialize Host RRQ pointers */
7090 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7091 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7092 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7093 ioa_cfg->toggle_bit = 1;
7094
7095 /* Zero out config table */
3e7ebdfa 7096 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7097}
7098
214777ba
WB
7099/**
7100 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7101 * @ipr_cmd: ipr command struct
7102 *
7103 * Return value:
7104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7105 **/
7106static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7107{
7108 unsigned long stage, stage_time;
7109 u32 feedback;
7110 volatile u32 int_reg;
7111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7112 u64 maskval = 0;
7113
7114 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7115 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7116 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7117
7118 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7119
7120 /* sanity check the stage_time value */
7121 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7122 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7123 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7124 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7125
7126 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7127 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7128 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7129 stage_time = ioa_cfg->transop_timeout;
7130 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7131 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7132 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7133 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7134 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7135 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7136 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7137 return IPR_RC_JOB_CONTINUE;
7138 }
7139
7140 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7141 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7142 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7143 ipr_cmd->done = ipr_reset_ioa_job;
7144 add_timer(&ipr_cmd->timer);
7145 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7146
7147 return IPR_RC_JOB_RETURN;
7148}
7149
1da177e4
LT
7150/**
7151 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7152 * @ipr_cmd: ipr command struct
7153 *
7154 * This function reinitializes some control blocks and
7155 * enables destructive diagnostics on the adapter.
7156 *
7157 * Return value:
7158 * IPR_RC_JOB_RETURN
7159 **/
7160static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7161{
7162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7163 volatile u32 int_reg;
7164
7165 ENTER;
214777ba 7166 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7167 ipr_init_ioa_mem(ioa_cfg);
7168
7169 ioa_cfg->allow_interrupts = 1;
7170 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7171
7172 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7173 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7174 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7175 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7176 return IPR_RC_JOB_CONTINUE;
7177 }
7178
7179 /* Enable destructive diagnostics on IOA */
214777ba
WB
7180 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7181
7182 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7183 if (ioa_cfg->sis64)
7184 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
1da177e4 7185
1da177e4
LT
7186 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7187
7188 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7189
214777ba
WB
7190 if (ioa_cfg->sis64) {
7191 ipr_cmd->job_step = ipr_reset_next_stage;
7192 return IPR_RC_JOB_CONTINUE;
7193 }
7194
1da177e4 7195 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7196 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7197 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7198 ipr_cmd->done = ipr_reset_ioa_job;
7199 add_timer(&ipr_cmd->timer);
7200 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7201
7202 LEAVE;
7203 return IPR_RC_JOB_RETURN;
7204}
7205
7206/**
7207 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7208 * @ipr_cmd: ipr command struct
7209 *
7210 * This function is invoked when an adapter dump has run out
7211 * of processing time.
7212 *
7213 * Return value:
7214 * IPR_RC_JOB_CONTINUE
7215 **/
7216static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7217{
7218 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7219
7220 if (ioa_cfg->sdt_state == GET_DUMP)
7221 ioa_cfg->sdt_state = ABORT_DUMP;
7222
7223 ipr_cmd->job_step = ipr_reset_alert;
7224
7225 return IPR_RC_JOB_CONTINUE;
7226}
7227
7228/**
7229 * ipr_unit_check_no_data - Log a unit check/no data error log
7230 * @ioa_cfg: ioa config struct
7231 *
7232 * Logs an error indicating the adapter unit checked, but for some
7233 * reason, we were unable to fetch the unit check buffer.
7234 *
7235 * Return value:
7236 * nothing
7237 **/
7238static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7239{
7240 ioa_cfg->errors_logged++;
7241 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7242}
7243
7244/**
7245 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7246 * @ioa_cfg: ioa config struct
7247 *
7248 * Fetches the unit check buffer from the adapter by clocking the data
7249 * through the mailbox register.
7250 *
7251 * Return value:
7252 * nothing
7253 **/
7254static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7255{
7256 unsigned long mailbox;
7257 struct ipr_hostrcb *hostrcb;
7258 struct ipr_uc_sdt sdt;
7259 int rc, length;
65f56475 7260 u32 ioasc;
1da177e4
LT
7261
7262 mailbox = readl(ioa_cfg->ioa_mailbox);
7263
dcbad00e 7264 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7265 ipr_unit_check_no_data(ioa_cfg);
7266 return;
7267 }
7268
7269 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7270 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7271 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7272
dcbad00e
WB
7273 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7274 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7275 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7276 ipr_unit_check_no_data(ioa_cfg);
7277 return;
7278 }
7279
7280 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7281 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7282 length = be32_to_cpu(sdt.entry[0].end_token);
7283 else
7284 length = (be32_to_cpu(sdt.entry[0].end_token) -
7285 be32_to_cpu(sdt.entry[0].start_token)) &
7286 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7287
7288 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7289 struct ipr_hostrcb, queue);
7290 list_del(&hostrcb->queue);
7291 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7292
7293 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7294 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7295 (__be32 *)&hostrcb->hcam,
7296 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7297
65f56475 7298 if (!rc) {
1da177e4 7299 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7300 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7301 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7302 ioa_cfg->sdt_state == GET_DUMP)
7303 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7304 } else
1da177e4
LT
7305 ipr_unit_check_no_data(ioa_cfg);
7306
7307 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7308}
7309
7310/**
7311 * ipr_reset_restore_cfg_space - Restore PCI config space.
7312 * @ipr_cmd: ipr command struct
7313 *
7314 * Description: This function restores the saved PCI config space of
7315 * the adapter, fails all outstanding ops back to the callers, and
7316 * fetches the dump/unit check if applicable to this reset.
7317 *
7318 * Return value:
7319 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7320 **/
7321static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7322{
7323 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7324 int rc;
7325
7326 ENTER;
99c965dd 7327 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7328 rc = pci_restore_state(ioa_cfg->pdev);
7329
7330 if (rc != PCIBIOS_SUCCESSFUL) {
7331 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7332 return IPR_RC_JOB_CONTINUE;
7333 }
7334
7335 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7336 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7337 return IPR_RC_JOB_CONTINUE;
7338 }
7339
7340 ipr_fail_all_ops(ioa_cfg);
7341
7342 if (ioa_cfg->ioa_unit_checked) {
7343 ioa_cfg->ioa_unit_checked = 0;
7344 ipr_get_unit_check_buffer(ioa_cfg);
7345 ipr_cmd->job_step = ipr_reset_alert;
7346 ipr_reset_start_timer(ipr_cmd, 0);
7347 return IPR_RC_JOB_RETURN;
7348 }
7349
7350 if (ioa_cfg->in_ioa_bringdown) {
7351 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7352 } else {
7353 ipr_cmd->job_step = ipr_reset_enable_ioa;
7354
7355 if (GET_DUMP == ioa_cfg->sdt_state) {
7356 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7357 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7358 schedule_work(&ioa_cfg->work_q);
7359 return IPR_RC_JOB_RETURN;
7360 }
7361 }
7362
7363 ENTER;
7364 return IPR_RC_JOB_CONTINUE;
7365}
7366
e619e1a7
BK
7367/**
7368 * ipr_reset_bist_done - BIST has completed on the adapter.
7369 * @ipr_cmd: ipr command struct
7370 *
7371 * Description: Unblock config space and resume the reset process.
7372 *
7373 * Return value:
7374 * IPR_RC_JOB_CONTINUE
7375 **/
7376static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7377{
7378 ENTER;
7379 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7380 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7381 LEAVE;
7382 return IPR_RC_JOB_CONTINUE;
7383}
7384
1da177e4
LT
7385/**
7386 * ipr_reset_start_bist - Run BIST on the adapter.
7387 * @ipr_cmd: ipr command struct
7388 *
7389 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7390 *
7391 * Return value:
7392 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7393 **/
7394static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7395{
7396 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7397 int rc;
7398
7399 ENTER;
b30197d2 7400 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
7401 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7402
7403 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 7404 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
7405 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7406 rc = IPR_RC_JOB_CONTINUE;
7407 } else {
e619e1a7 7408 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7409 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7410 rc = IPR_RC_JOB_RETURN;
7411 }
7412
7413 LEAVE;
7414 return rc;
7415}
7416
463fc696
BK
7417/**
7418 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7419 * @ipr_cmd: ipr command struct
7420 *
7421 * Description: This clears PCI reset to the adapter and delays two seconds.
7422 *
7423 * Return value:
7424 * IPR_RC_JOB_RETURN
7425 **/
7426static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7427{
7428 ENTER;
7429 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7430 ipr_cmd->job_step = ipr_reset_bist_done;
7431 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7432 LEAVE;
7433 return IPR_RC_JOB_RETURN;
7434}
7435
7436/**
7437 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7438 * @ipr_cmd: ipr command struct
7439 *
7440 * Description: This asserts PCI reset to the adapter.
7441 *
7442 * Return value:
7443 * IPR_RC_JOB_RETURN
7444 **/
7445static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7446{
7447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7448 struct pci_dev *pdev = ioa_cfg->pdev;
7449
7450 ENTER;
7451 pci_block_user_cfg_access(pdev);
7452 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7453 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7454 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7455 LEAVE;
7456 return IPR_RC_JOB_RETURN;
7457}
7458
1da177e4
LT
7459/**
7460 * ipr_reset_allowed - Query whether or not IOA can be reset
7461 * @ioa_cfg: ioa config struct
7462 *
7463 * Return value:
7464 * 0 if reset not allowed / non-zero if reset is allowed
7465 **/
7466static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7467{
7468 volatile u32 temp_reg;
7469
7470 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7471 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7472}
7473
7474/**
7475 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7476 * @ipr_cmd: ipr command struct
7477 *
7478 * Description: This function waits for adapter permission to run BIST,
7479 * then runs BIST. If the adapter does not give permission after a
7480 * reasonable time, we will reset the adapter anyway. The impact of
7481 * resetting the adapter without warning the adapter is the risk of
7482 * losing the persistent error log on the adapter. If the adapter is
7483 * reset while it is writing to the flash on the adapter, the flash
7484 * segment will have bad ECC and be zeroed.
7485 *
7486 * Return value:
7487 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7488 **/
7489static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7490{
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 int rc = IPR_RC_JOB_RETURN;
7493
7494 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7495 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7496 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7497 } else {
463fc696 7498 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7499 rc = IPR_RC_JOB_CONTINUE;
7500 }
7501
7502 return rc;
7503}
7504
7505/**
7506 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7507 * @ipr_cmd: ipr command struct
7508 *
7509 * Description: This function alerts the adapter that it will be reset.
7510 * If memory space is not currently enabled, proceed directly
7511 * to running BIST on the adapter. The timer must always be started
7512 * so we guarantee we do not run BIST from ipr_isr.
7513 *
7514 * Return value:
7515 * IPR_RC_JOB_RETURN
7516 **/
7517static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7518{
7519 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7520 u16 cmd_reg;
7521 int rc;
7522
7523 ENTER;
7524 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7525
7526 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7527 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7528 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7529 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7530 } else {
463fc696 7531 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7532 }
7533
7534 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7535 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7536
7537 LEAVE;
7538 return IPR_RC_JOB_RETURN;
7539}
7540
7541/**
7542 * ipr_reset_ucode_download_done - Microcode download completion
7543 * @ipr_cmd: ipr command struct
7544 *
7545 * Description: This function unmaps the microcode download buffer.
7546 *
7547 * Return value:
7548 * IPR_RC_JOB_CONTINUE
7549 **/
7550static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7551{
7552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7553 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7554
7555 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7556 sglist->num_sg, DMA_TO_DEVICE);
7557
7558 ipr_cmd->job_step = ipr_reset_alert;
7559 return IPR_RC_JOB_CONTINUE;
7560}
7561
7562/**
7563 * ipr_reset_ucode_download - Download microcode to the adapter
7564 * @ipr_cmd: ipr command struct
7565 *
7566 * Description: This function checks to see if it there is microcode
7567 * to download to the adapter. If there is, a download is performed.
7568 *
7569 * Return value:
7570 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7571 **/
7572static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7573{
7574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7575 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7576
7577 ENTER;
7578 ipr_cmd->job_step = ipr_reset_alert;
7579
7580 if (!sglist)
7581 return IPR_RC_JOB_CONTINUE;
7582
7583 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7584 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7585 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7586 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7587 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7588 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7589 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7590
a32c055f
WB
7591 if (ioa_cfg->sis64)
7592 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7593 else
7594 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7595 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7596
7597 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7598 IPR_WRITE_BUFFER_TIMEOUT);
7599
7600 LEAVE;
7601 return IPR_RC_JOB_RETURN;
7602}
7603
7604/**
7605 * ipr_reset_shutdown_ioa - Shutdown the adapter
7606 * @ipr_cmd: ipr command struct
7607 *
7608 * Description: This function issues an adapter shutdown of the
7609 * specified type to the specified adapter as part of the
7610 * adapter reset job.
7611 *
7612 * Return value:
7613 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7614 **/
7615static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7616{
7617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7619 unsigned long timeout;
7620 int rc = IPR_RC_JOB_CONTINUE;
7621
7622 ENTER;
7623 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7624 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7625 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7626 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7627 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7628
ac09c349
BK
7629 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7630 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7631 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7632 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7633 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7634 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7635 else
ac09c349 7636 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7637
7638 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7639
7640 rc = IPR_RC_JOB_RETURN;
7641 ipr_cmd->job_step = ipr_reset_ucode_download;
7642 } else
7643 ipr_cmd->job_step = ipr_reset_alert;
7644
7645 LEAVE;
7646 return rc;
7647}
7648
7649/**
7650 * ipr_reset_ioa_job - Adapter reset job
7651 * @ipr_cmd: ipr command struct
7652 *
7653 * Description: This function is the job router for the adapter reset job.
7654 *
7655 * Return value:
7656 * none
7657 **/
7658static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7659{
7660 u32 rc, ioasc;
1da177e4
LT
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662
7663 do {
7664 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7665
7666 if (ioa_cfg->reset_cmd != ipr_cmd) {
7667 /*
7668 * We are doing nested adapter resets and this is
7669 * not the current reset job.
7670 */
7671 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7672 return;
7673 }
7674
7675 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
7676 rc = ipr_cmd->job_step_failed(ipr_cmd);
7677 if (rc == IPR_RC_JOB_RETURN)
7678 return;
1da177e4
LT
7679 }
7680
7681 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7682 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7683 rc = ipr_cmd->job_step(ipr_cmd);
7684 } while(rc == IPR_RC_JOB_CONTINUE);
7685}
7686
7687/**
7688 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7689 * @ioa_cfg: ioa config struct
7690 * @job_step: first job step of reset job
7691 * @shutdown_type: shutdown type
7692 *
7693 * Description: This function will initiate the reset of the given adapter
7694 * starting at the selected job step.
7695 * If the caller needs to wait on the completion of the reset,
7696 * the caller must sleep on the reset_wait_q.
7697 *
7698 * Return value:
7699 * none
7700 **/
7701static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7702 int (*job_step) (struct ipr_cmnd *),
7703 enum ipr_shutdown_type shutdown_type)
7704{
7705 struct ipr_cmnd *ipr_cmd;
7706
7707 ioa_cfg->in_reset_reload = 1;
7708 ioa_cfg->allow_cmds = 0;
7709 scsi_block_requests(ioa_cfg->host);
7710
7711 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7712 ioa_cfg->reset_cmd = ipr_cmd;
7713 ipr_cmd->job_step = job_step;
7714 ipr_cmd->u.shutdown_type = shutdown_type;
7715
7716 ipr_reset_ioa_job(ipr_cmd);
7717}
7718
7719/**
7720 * ipr_initiate_ioa_reset - Initiate an adapter reset
7721 * @ioa_cfg: ioa config struct
7722 * @shutdown_type: shutdown type
7723 *
7724 * Description: This function will initiate the reset of the given adapter.
7725 * If the caller needs to wait on the completion of the reset,
7726 * the caller must sleep on the reset_wait_q.
7727 *
7728 * Return value:
7729 * none
7730 **/
7731static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7732 enum ipr_shutdown_type shutdown_type)
7733{
7734 if (ioa_cfg->ioa_is_dead)
7735 return;
7736
7737 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7738 ioa_cfg->sdt_state = ABORT_DUMP;
7739
7740 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7741 dev_err(&ioa_cfg->pdev->dev,
7742 "IOA taken offline - error recovery failed\n");
7743
7744 ioa_cfg->reset_retries = 0;
7745 ioa_cfg->ioa_is_dead = 1;
7746
7747 if (ioa_cfg->in_ioa_bringdown) {
7748 ioa_cfg->reset_cmd = NULL;
7749 ioa_cfg->in_reset_reload = 0;
7750 ipr_fail_all_ops(ioa_cfg);
7751 wake_up_all(&ioa_cfg->reset_wait_q);
7752
7753 spin_unlock_irq(ioa_cfg->host->host_lock);
7754 scsi_unblock_requests(ioa_cfg->host);
7755 spin_lock_irq(ioa_cfg->host->host_lock);
7756 return;
7757 } else {
7758 ioa_cfg->in_ioa_bringdown = 1;
7759 shutdown_type = IPR_SHUTDOWN_NONE;
7760 }
7761 }
7762
7763 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7764 shutdown_type);
7765}
7766
f8a88b19
LV
7767/**
7768 * ipr_reset_freeze - Hold off all I/O activity
7769 * @ipr_cmd: ipr command struct
7770 *
7771 * Description: If the PCI slot is frozen, hold off all I/O
7772 * activity; then, as soon as the slot is available again,
7773 * initiate an adapter reset.
7774 */
7775static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7776{
7777 /* Disallow new interrupts, avoid loop */
7778 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7779 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7780 ipr_cmd->done = ipr_reset_ioa_job;
7781 return IPR_RC_JOB_RETURN;
7782}
7783
7784/**
7785 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7786 * @pdev: PCI device struct
7787 *
7788 * Description: This routine is called to tell us that the PCI bus
7789 * is down. Can't do anything here, except put the device driver
7790 * into a holding pattern, waiting for the PCI bus to come back.
7791 */
7792static void ipr_pci_frozen(struct pci_dev *pdev)
7793{
7794 unsigned long flags = 0;
7795 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7796
7797 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7798 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7800}
7801
7802/**
7803 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7804 * @pdev: PCI device struct
7805 *
7806 * Description: This routine is called by the pci error recovery
7807 * code after the PCI slot has been reset, just before we
7808 * should resume normal operations.
7809 */
7810static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7811{
7812 unsigned long flags = 0;
7813 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7814
7815 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7816 if (ioa_cfg->needs_warm_reset)
7817 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7818 else
7819 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7820 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7822 return PCI_ERS_RESULT_RECOVERED;
7823}
7824
7825/**
7826 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7827 * @pdev: PCI device struct
7828 *
7829 * Description: This routine is called when the PCI bus has
7830 * permanently failed.
7831 */
7832static void ipr_pci_perm_failure(struct pci_dev *pdev)
7833{
7834 unsigned long flags = 0;
7835 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7836
7837 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7838 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7839 ioa_cfg->sdt_state = ABORT_DUMP;
7840 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7841 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7842 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7843 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7845}
7846
7847/**
7848 * ipr_pci_error_detected - Called when a PCI error is detected.
7849 * @pdev: PCI device struct
7850 * @state: PCI channel state
7851 *
7852 * Description: Called when a PCI error is detected.
7853 *
7854 * Return value:
7855 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7856 */
7857static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7858 pci_channel_state_t state)
7859{
7860 switch (state) {
7861 case pci_channel_io_frozen:
7862 ipr_pci_frozen(pdev);
7863 return PCI_ERS_RESULT_NEED_RESET;
7864 case pci_channel_io_perm_failure:
7865 ipr_pci_perm_failure(pdev);
7866 return PCI_ERS_RESULT_DISCONNECT;
7867 break;
7868 default:
7869 break;
7870 }
7871 return PCI_ERS_RESULT_NEED_RESET;
7872}
7873
1da177e4
LT
7874/**
7875 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7876 * @ioa_cfg: ioa cfg struct
7877 *
7878 * Description: This is the second phase of adapter intialization
7879 * This function takes care of initilizing the adapter to the point
7880 * where it can accept new commands.
7881
7882 * Return value:
b1c11812 7883 * 0 on success / -EIO on failure
1da177e4
LT
7884 **/
7885static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7886{
7887 int rc = 0;
7888 unsigned long host_lock_flags = 0;
7889
7890 ENTER;
7891 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7892 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
7893 if (ioa_cfg->needs_hard_reset) {
7894 ioa_cfg->needs_hard_reset = 0;
7895 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7896 } else
7897 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7898 IPR_SHUTDOWN_NONE);
1da177e4
LT
7899
7900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7901 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7902 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7903
7904 if (ioa_cfg->ioa_is_dead) {
7905 rc = -EIO;
7906 } else if (ipr_invalid_adapter(ioa_cfg)) {
7907 if (!ipr_testmode)
7908 rc = -EIO;
7909
7910 dev_err(&ioa_cfg->pdev->dev,
7911 "Adapter not supported in this hardware configuration.\n");
7912 }
7913
7914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7915
7916 LEAVE;
7917 return rc;
7918}
7919
7920/**
7921 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7922 * @ioa_cfg: ioa config struct
7923 *
7924 * Return value:
7925 * none
7926 **/
7927static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7928{
7929 int i;
7930
7931 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7932 if (ioa_cfg->ipr_cmnd_list[i])
7933 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7934 ioa_cfg->ipr_cmnd_list[i],
7935 ioa_cfg->ipr_cmnd_list_dma[i]);
7936
7937 ioa_cfg->ipr_cmnd_list[i] = NULL;
7938 }
7939
7940 if (ioa_cfg->ipr_cmd_pool)
7941 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7942
7943 ioa_cfg->ipr_cmd_pool = NULL;
7944}
7945
7946/**
7947 * ipr_free_mem - Frees memory allocated for an adapter
7948 * @ioa_cfg: ioa cfg struct
7949 *
7950 * Return value:
7951 * nothing
7952 **/
7953static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7954{
7955 int i;
7956
7957 kfree(ioa_cfg->res_entries);
7958 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7959 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7960 ipr_free_cmd_blks(ioa_cfg);
7961 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7962 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
7963 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7964 ioa_cfg->u.cfg_table,
1da177e4
LT
7965 ioa_cfg->cfg_table_dma);
7966
7967 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7968 pci_free_consistent(ioa_cfg->pdev,
7969 sizeof(struct ipr_hostrcb),
7970 ioa_cfg->hostrcb[i],
7971 ioa_cfg->hostrcb_dma[i]);
7972 }
7973
7974 ipr_free_dump(ioa_cfg);
1da177e4
LT
7975 kfree(ioa_cfg->trace);
7976}
7977
7978/**
7979 * ipr_free_all_resources - Free all allocated resources for an adapter.
7980 * @ipr_cmd: ipr command struct
7981 *
7982 * This function frees all allocated resources for the
7983 * specified adapter.
7984 *
7985 * Return value:
7986 * none
7987 **/
7988static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7989{
7990 struct pci_dev *pdev = ioa_cfg->pdev;
7991
7992 ENTER;
7993 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 7994 pci_disable_msi(pdev);
1da177e4
LT
7995 iounmap(ioa_cfg->hdw_dma_regs);
7996 pci_release_regions(pdev);
7997 ipr_free_mem(ioa_cfg);
7998 scsi_host_put(ioa_cfg->host);
7999 pci_disable_device(pdev);
8000 LEAVE;
8001}
8002
8003/**
8004 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8005 * @ioa_cfg: ioa config struct
8006 *
8007 * Return value:
8008 * 0 on success / -ENOMEM on allocation failure
8009 **/
8010static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8011{
8012 struct ipr_cmnd *ipr_cmd;
8013 struct ipr_ioarcb *ioarcb;
8014 dma_addr_t dma_addr;
8015 int i;
8016
8017 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8018 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8019
8020 if (!ioa_cfg->ipr_cmd_pool)
8021 return -ENOMEM;
8022
8023 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8024 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8025
8026 if (!ipr_cmd) {
8027 ipr_free_cmd_blks(ioa_cfg);
8028 return -ENOMEM;
8029 }
8030
8031 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8032 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8033 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8034
8035 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8036 ipr_cmd->dma_addr = dma_addr;
8037 if (ioa_cfg->sis64)
8038 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8039 else
8040 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8041
1da177e4 8042 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8043 if (ioa_cfg->sis64) {
8044 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8045 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8046 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8047 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8048 } else {
8049 ioarcb->write_ioadl_addr =
8050 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8051 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8052 ioarcb->ioasa_host_pci_addr =
8053 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8054 }
1da177e4
LT
8055 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8056 ipr_cmd->cmd_index = i;
8057 ipr_cmd->ioa_cfg = ioa_cfg;
8058 ipr_cmd->sense_buffer_dma = dma_addr +
8059 offsetof(struct ipr_cmnd, sense_buffer);
8060
8061 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8062 }
8063
8064 return 0;
8065}
8066
8067/**
8068 * ipr_alloc_mem - Allocate memory for an adapter
8069 * @ioa_cfg: ioa config struct
8070 *
8071 * Return value:
8072 * 0 on success / non-zero for error
8073 **/
8074static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8075{
8076 struct pci_dev *pdev = ioa_cfg->pdev;
8077 int i, rc = -ENOMEM;
8078
8079 ENTER;
0bc42e35 8080 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8081 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8082
8083 if (!ioa_cfg->res_entries)
8084 goto out;
8085
3e7ebdfa
WB
8086 if (ioa_cfg->sis64) {
8087 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8088 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8089 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8090 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8091 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8092 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8093 }
8094
8095 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8096 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8097 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8098 }
1da177e4
LT
8099
8100 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8101 sizeof(struct ipr_misc_cbs),
8102 &ioa_cfg->vpd_cbs_dma);
8103
8104 if (!ioa_cfg->vpd_cbs)
8105 goto out_free_res_entries;
8106
8107 if (ipr_alloc_cmd_blks(ioa_cfg))
8108 goto out_free_vpd_cbs;
8109
8110 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8111 sizeof(u32) * IPR_NUM_CMD_BLKS,
8112 &ioa_cfg->host_rrq_dma);
8113
8114 if (!ioa_cfg->host_rrq)
8115 goto out_ipr_free_cmd_blocks;
8116
3e7ebdfa
WB
8117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8118 ioa_cfg->cfg_table_size,
8119 &ioa_cfg->cfg_table_dma);
1da177e4 8120
3e7ebdfa 8121 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8122 goto out_free_host_rrq;
8123
8124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8125 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8126 sizeof(struct ipr_hostrcb),
8127 &ioa_cfg->hostrcb_dma[i]);
8128
8129 if (!ioa_cfg->hostrcb[i])
8130 goto out_free_hostrcb_dma;
8131
8132 ioa_cfg->hostrcb[i]->hostrcb_dma =
8133 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8134 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8135 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8136 }
8137
0bc42e35 8138 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8139 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8140
8141 if (!ioa_cfg->trace)
8142 goto out_free_hostrcb_dma;
8143
1da177e4
LT
8144 rc = 0;
8145out:
8146 LEAVE;
8147 return rc;
8148
8149out_free_hostrcb_dma:
8150 while (i-- > 0) {
8151 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8152 ioa_cfg->hostrcb[i],
8153 ioa_cfg->hostrcb_dma[i]);
8154 }
3e7ebdfa
WB
8155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8156 ioa_cfg->u.cfg_table,
8157 ioa_cfg->cfg_table_dma);
1da177e4
LT
8158out_free_host_rrq:
8159 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8160 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8161out_ipr_free_cmd_blocks:
8162 ipr_free_cmd_blks(ioa_cfg);
8163out_free_vpd_cbs:
8164 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8165 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8166out_free_res_entries:
8167 kfree(ioa_cfg->res_entries);
8168 goto out;
8169}
8170
8171/**
8172 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8173 * @ioa_cfg: ioa config struct
8174 *
8175 * Return value:
8176 * none
8177 **/
8178static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8179{
8180 int i;
8181
8182 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8183 ioa_cfg->bus_attr[i].bus = i;
8184 ioa_cfg->bus_attr[i].qas_enabled = 0;
8185 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8186 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8187 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8188 else
8189 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8190 }
8191}
8192
8193/**
8194 * ipr_init_ioa_cfg - Initialize IOA config struct
8195 * @ioa_cfg: ioa config struct
8196 * @host: scsi host struct
8197 * @pdev: PCI dev struct
8198 *
8199 * Return value:
8200 * none
8201 **/
8202static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8203 struct Scsi_Host *host, struct pci_dev *pdev)
8204{
8205 const struct ipr_interrupt_offsets *p;
8206 struct ipr_interrupts *t;
8207 void __iomem *base;
8208
8209 ioa_cfg->host = host;
8210 ioa_cfg->pdev = pdev;
8211 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8212 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8213 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8214 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8215 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8216 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8217 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8218 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8219 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8220 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8221
8222 INIT_LIST_HEAD(&ioa_cfg->free_q);
8223 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8224 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8225 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8226 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8227 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8228 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8231 ioa_cfg->sdt_state = INACTIVE;
8232
8233 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8234 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8235
3e7ebdfa
WB
8236 if (ioa_cfg->sis64) {
8237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8241 } else {
8242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8246 }
1da177e4
LT
8247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8248 host->unique_id = host->host_no;
8249 host->max_cmd_len = IPR_MAX_CDB_LEN;
8250 pci_set_drvdata(pdev, ioa_cfg);
8251
8252 p = &ioa_cfg->chip_cfg->regs;
8253 t = &ioa_cfg->regs;
8254 base = ioa_cfg->hdw_dma_regs;
8255
8256 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8257 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8258 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8259 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8260 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8261 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8262 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8263 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8264 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8265 t->ioarrin_reg = base + p->ioarrin_reg;
8266 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8267 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8268 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8269 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8270 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8271 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8272
8273 if (ioa_cfg->sis64) {
214777ba 8274 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8275 t->dump_addr_reg = base + p->dump_addr_reg;
8276 t->dump_data_reg = base + p->dump_data_reg;
8277 }
1da177e4
LT
8278}
8279
8280/**
1be7bd82 8281 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8282 * @dev_id: PCI device id struct
8283 *
8284 * Return value:
1be7bd82 8285 * ptr to chip information on success / NULL on failure
1da177e4 8286 **/
1be7bd82
WB
8287static const struct ipr_chip_t * __devinit
8288ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8289{
8290 int i;
8291
1da177e4
LT
8292 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8293 if (ipr_chip[i].vendor == dev_id->vendor &&
8294 ipr_chip[i].device == dev_id->device)
1be7bd82 8295 return &ipr_chip[i];
1da177e4
LT
8296 return NULL;
8297}
8298
95fecd90
WB
8299/**
8300 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8301 * @pdev: PCI device struct
8302 *
8303 * Description: Simply set the msi_received flag to 1 indicating that
8304 * Message Signaled Interrupts are supported.
8305 *
8306 * Return value:
8307 * 0 on success / non-zero on failure
8308 **/
8309static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8310{
8311 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8312 unsigned long lock_flags = 0;
8313 irqreturn_t rc = IRQ_HANDLED;
8314
8315 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8316
8317 ioa_cfg->msi_received = 1;
8318 wake_up(&ioa_cfg->msi_wait_q);
8319
8320 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8321 return rc;
8322}
8323
8324/**
8325 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8326 * @pdev: PCI device struct
8327 *
8328 * Description: The return value from pci_enable_msi() can not always be
8329 * trusted. This routine sets up and initiates a test interrupt to determine
8330 * if the interrupt is received via the ipr_test_intr() service routine.
8331 * If the tests fails, the driver will fall back to LSI.
8332 *
8333 * Return value:
8334 * 0 on success / non-zero on failure
8335 **/
8336static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8337 struct pci_dev *pdev)
8338{
8339 int rc;
8340 volatile u32 int_reg;
8341 unsigned long lock_flags = 0;
8342
8343 ENTER;
8344
8345 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8346 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8347 ioa_cfg->msi_received = 0;
8348 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8349 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8350 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8352
8353 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8354 if (rc) {
8355 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8356 return rc;
8357 } else if (ipr_debug)
8358 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8359
214777ba 8360 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8361 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8362 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8363 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8364
8365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8366 if (!ioa_cfg->msi_received) {
8367 /* MSI test failed */
8368 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8369 rc = -EOPNOTSUPP;
8370 } else if (ipr_debug)
8371 dev_info(&pdev->dev, "MSI test succeeded.\n");
8372
8373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8374
8375 free_irq(pdev->irq, ioa_cfg);
8376
8377 LEAVE;
8378
8379 return rc;
8380}
8381
1da177e4
LT
8382/**
8383 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8384 * @pdev: PCI device struct
8385 * @dev_id: PCI device id struct
8386 *
8387 * Return value:
8388 * 0 on success / non-zero on failure
8389 **/
8390static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8391 const struct pci_device_id *dev_id)
8392{
8393 struct ipr_ioa_cfg *ioa_cfg;
8394 struct Scsi_Host *host;
8395 unsigned long ipr_regs_pci;
8396 void __iomem *ipr_regs;
a2a65a3e 8397 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8398 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8399
8400 ENTER;
8401
8402 if ((rc = pci_enable_device(pdev))) {
8403 dev_err(&pdev->dev, "Cannot enable adapter\n");
8404 goto out;
8405 }
8406
8407 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8408
8409 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8410
8411 if (!host) {
8412 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8413 rc = -ENOMEM;
8414 goto out_disable;
8415 }
8416
8417 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8418 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8419 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8420 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8421
1be7bd82 8422 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8423
1be7bd82 8424 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8425 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8426 dev_id->vendor, dev_id->device);
8427 goto out_scsi_host_put;
8428 }
8429
a32c055f
WB
8430 /* set SIS 32 or SIS 64 */
8431 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8432 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8433
5469cb5b
BK
8434 if (ipr_transop_timeout)
8435 ioa_cfg->transop_timeout = ipr_transop_timeout;
8436 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8437 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8438 else
8439 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8440
44c10138 8441 ioa_cfg->revid = pdev->revision;
463fc696 8442
1da177e4
LT
8443 ipr_regs_pci = pci_resource_start(pdev, 0);
8444
8445 rc = pci_request_regions(pdev, IPR_NAME);
8446 if (rc < 0) {
8447 dev_err(&pdev->dev,
8448 "Couldn't register memory range of registers\n");
8449 goto out_scsi_host_put;
8450 }
8451
25729a7f 8452 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8453
8454 if (!ipr_regs) {
8455 dev_err(&pdev->dev,
8456 "Couldn't map memory range of registers\n");
8457 rc = -ENOMEM;
8458 goto out_release_regions;
8459 }
8460
8461 ioa_cfg->hdw_dma_regs = ipr_regs;
8462 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8463 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8464
8465 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8466
8467 pci_set_master(pdev);
8468
a32c055f
WB
8469 if (ioa_cfg->sis64) {
8470 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8471 if (rc < 0) {
8472 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8473 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8474 }
8475
8476 } else
8477 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8478
1da177e4
LT
8479 if (rc < 0) {
8480 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8481 goto cleanup_nomem;
8482 }
8483
8484 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8485 ioa_cfg->chip_cfg->cache_line_size);
8486
8487 if (rc != PCIBIOS_SUCCESSFUL) {
8488 dev_err(&pdev->dev, "Write of cache line size failed\n");
8489 rc = -EIO;
8490 goto cleanup_nomem;
8491 }
8492
95fecd90 8493 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8494 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8495 rc = ipr_test_msi(ioa_cfg, pdev);
8496 if (rc == -EOPNOTSUPP)
8497 pci_disable_msi(pdev);
8498 else if (rc)
8499 goto out_msi_disable;
8500 else
8501 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8502 } else if (ipr_debug)
8503 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8504
1da177e4
LT
8505 /* Save away PCI config space for use following IOA reset */
8506 rc = pci_save_state(pdev);
8507
8508 if (rc != PCIBIOS_SUCCESSFUL) {
8509 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8510 rc = -EIO;
8511 goto cleanup_nomem;
8512 }
8513
8514 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8515 goto cleanup_nomem;
8516
8517 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8518 goto cleanup_nomem;
8519
3e7ebdfa
WB
8520 if (ioa_cfg->sis64)
8521 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8522 + ((sizeof(struct ipr_config_table_entry64)
8523 * ioa_cfg->max_devs_supported)));
8524 else
8525 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8526 + ((sizeof(struct ipr_config_table_entry)
8527 * ioa_cfg->max_devs_supported)));
8528
1da177e4
LT
8529 rc = ipr_alloc_mem(ioa_cfg);
8530 if (rc < 0) {
8531 dev_err(&pdev->dev,
8532 "Couldn't allocate enough memory for device driver!\n");
8533 goto cleanup_nomem;
8534 }
8535
ce155cce
BK
8536 /*
8537 * If HRRQ updated interrupt is not masked, or reset alert is set,
8538 * the card is in an unknown state and needs a hard reset
8539 */
214777ba
WB
8540 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8541 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8542 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
8543 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8544 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8545 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8546 ioa_cfg->needs_hard_reset = 1;
8547 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8548 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8549
1da177e4 8550 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8551 rc = request_irq(pdev->irq, ipr_isr,
8552 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8553 IPR_NAME, ioa_cfg);
1da177e4
LT
8554
8555 if (rc) {
8556 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8557 pdev->irq, rc);
8558 goto cleanup_nolog;
8559 }
8560
463fc696
BK
8561 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8562 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8563 ioa_cfg->needs_warm_reset = 1;
8564 ioa_cfg->reset = ipr_reset_slot_reset;
8565 } else
8566 ioa_cfg->reset = ipr_reset_start_bist;
8567
1da177e4
LT
8568 spin_lock(&ipr_driver_lock);
8569 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8570 spin_unlock(&ipr_driver_lock);
8571
8572 LEAVE;
8573out:
8574 return rc;
8575
8576cleanup_nolog:
8577 ipr_free_mem(ioa_cfg);
8578cleanup_nomem:
8579 iounmap(ipr_regs);
95fecd90
WB
8580out_msi_disable:
8581 pci_disable_msi(pdev);
1da177e4
LT
8582out_release_regions:
8583 pci_release_regions(pdev);
8584out_scsi_host_put:
8585 scsi_host_put(host);
8586out_disable:
8587 pci_disable_device(pdev);
8588 goto out;
8589}
8590
8591/**
8592 * ipr_scan_vsets - Scans for VSET devices
8593 * @ioa_cfg: ioa config struct
8594 *
8595 * Description: Since the VSET resources do not follow SAM in that we can have
8596 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8597 *
8598 * Return value:
8599 * none
8600 **/
8601static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8602{
8603 int target, lun;
8604
8605 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8606 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8607 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8608}
8609
8610/**
8611 * ipr_initiate_ioa_bringdown - Bring down an adapter
8612 * @ioa_cfg: ioa config struct
8613 * @shutdown_type: shutdown type
8614 *
8615 * Description: This function will initiate bringing down the adapter.
8616 * This consists of issuing an IOA shutdown to the adapter
8617 * to flush the cache, and running BIST.
8618 * If the caller needs to wait on the completion of the reset,
8619 * the caller must sleep on the reset_wait_q.
8620 *
8621 * Return value:
8622 * none
8623 **/
8624static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8625 enum ipr_shutdown_type shutdown_type)
8626{
8627 ENTER;
8628 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8629 ioa_cfg->sdt_state = ABORT_DUMP;
8630 ioa_cfg->reset_retries = 0;
8631 ioa_cfg->in_ioa_bringdown = 1;
8632 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8633 LEAVE;
8634}
8635
8636/**
8637 * __ipr_remove - Remove a single adapter
8638 * @pdev: pci device struct
8639 *
8640 * Adapter hot plug remove entry point.
8641 *
8642 * Return value:
8643 * none
8644 **/
8645static void __ipr_remove(struct pci_dev *pdev)
8646{
8647 unsigned long host_lock_flags = 0;
8648 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8649 ENTER;
8650
8651 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8652 while(ioa_cfg->in_reset_reload) {
8653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8654 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8655 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8656 }
8657
1da177e4
LT
8658 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8659
8660 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8661 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8662 flush_scheduled_work();
1da177e4
LT
8663 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8664
8665 spin_lock(&ipr_driver_lock);
8666 list_del(&ioa_cfg->queue);
8667 spin_unlock(&ipr_driver_lock);
8668
8669 if (ioa_cfg->sdt_state == ABORT_DUMP)
8670 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8672
8673 ipr_free_all_resources(ioa_cfg);
8674
8675 LEAVE;
8676}
8677
8678/**
8679 * ipr_remove - IOA hot plug remove entry point
8680 * @pdev: pci device struct
8681 *
8682 * Adapter hot plug remove entry point.
8683 *
8684 * Return value:
8685 * none
8686 **/
f381642d 8687static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8688{
8689 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8690
8691 ENTER;
8692
ee959b00 8693 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8694 &ipr_trace_attr);
ee959b00 8695 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8696 &ipr_dump_attr);
8697 scsi_remove_host(ioa_cfg->host);
8698
8699 __ipr_remove(pdev);
8700
8701 LEAVE;
8702}
8703
8704/**
8705 * ipr_probe - Adapter hot plug add entry point
8706 *
8707 * Return value:
8708 * 0 on success / non-zero on failure
8709 **/
8710static int __devinit ipr_probe(struct pci_dev *pdev,
8711 const struct pci_device_id *dev_id)
8712{
8713 struct ipr_ioa_cfg *ioa_cfg;
8714 int rc;
8715
8716 rc = ipr_probe_ioa(pdev, dev_id);
8717
8718 if (rc)
8719 return rc;
8720
8721 ioa_cfg = pci_get_drvdata(pdev);
8722 rc = ipr_probe_ioa_part2(ioa_cfg);
8723
8724 if (rc) {
8725 __ipr_remove(pdev);
8726 return rc;
8727 }
8728
8729 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8730
8731 if (rc) {
8732 __ipr_remove(pdev);
8733 return rc;
8734 }
8735
ee959b00 8736 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8737 &ipr_trace_attr);
8738
8739 if (rc) {
8740 scsi_remove_host(ioa_cfg->host);
8741 __ipr_remove(pdev);
8742 return rc;
8743 }
8744
ee959b00 8745 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8746 &ipr_dump_attr);
8747
8748 if (rc) {
ee959b00 8749 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8750 &ipr_trace_attr);
8751 scsi_remove_host(ioa_cfg->host);
8752 __ipr_remove(pdev);
8753 return rc;
8754 }
8755
8756 scsi_scan_host(ioa_cfg->host);
8757 ipr_scan_vsets(ioa_cfg);
8758 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8759 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8760 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8761 schedule_work(&ioa_cfg->work_q);
8762 return 0;
8763}
8764
8765/**
8766 * ipr_shutdown - Shutdown handler.
d18c3db5 8767 * @pdev: pci device struct
1da177e4
LT
8768 *
8769 * This function is invoked upon system shutdown/reboot. It will issue
8770 * an adapter shutdown to the adapter to flush the write cache.
8771 *
8772 * Return value:
8773 * none
8774 **/
d18c3db5 8775static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8776{
d18c3db5 8777 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8778 unsigned long lock_flags = 0;
8779
8780 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8781 while(ioa_cfg->in_reset_reload) {
8782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8783 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8784 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8785 }
8786
1da177e4
LT
8787 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8789 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8790}
8791
8792static struct pci_device_id ipr_pci_table[] __devinitdata = {
8793 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8794 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8795 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8796 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8797 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8798 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8799 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8801 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8802 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8803 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8804 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8807 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8808 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8809 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8812 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8813 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8814 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8815 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8817 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8818 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8819 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8820 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8821 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8822 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8823 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8824 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8825 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8826 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8827 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8828 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 8831 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8832 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8833 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8834 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8835 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8836 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8837 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8838 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8839 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8840 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8841 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8843 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
8844 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8845 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8846 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8847 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8848 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8849 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8850 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8851 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8852 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8853 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8854 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8856 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8858 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
1da177e4
LT
8860 { }
8861};
8862MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8863
f8a88b19
LV
8864static struct pci_error_handlers ipr_err_handler = {
8865 .error_detected = ipr_pci_error_detected,
8866 .slot_reset = ipr_pci_slot_reset,
8867};
8868
1da177e4
LT
8869static struct pci_driver ipr_driver = {
8870 .name = IPR_NAME,
8871 .id_table = ipr_pci_table,
8872 .probe = ipr_probe,
f381642d 8873 .remove = __devexit_p(ipr_remove),
d18c3db5 8874 .shutdown = ipr_shutdown,
f8a88b19 8875 .err_handler = &ipr_err_handler,
1da177e4
LT
8876};
8877
f72919ec
WB
8878/**
8879 * ipr_halt_done - Shutdown prepare completion
8880 *
8881 * Return value:
8882 * none
8883 **/
8884static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8885{
8886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8887
8888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8889}
8890
8891/**
8892 * ipr_halt - Issue shutdown prepare to all adapters
8893 *
8894 * Return value:
8895 * NOTIFY_OK on success / NOTIFY_DONE on failure
8896 **/
8897static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8898{
8899 struct ipr_cmnd *ipr_cmd;
8900 struct ipr_ioa_cfg *ioa_cfg;
8901 unsigned long flags = 0;
8902
8903 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8904 return NOTIFY_DONE;
8905
8906 spin_lock(&ipr_driver_lock);
8907
8908 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8909 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8910 if (!ioa_cfg->allow_cmds) {
8911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8912 continue;
8913 }
8914
8915 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8916 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8917 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8918 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8919 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8920
8921 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8923 }
8924 spin_unlock(&ipr_driver_lock);
8925
8926 return NOTIFY_OK;
8927}
8928
8929static struct notifier_block ipr_notifier = {
8930 ipr_halt, NULL, 0
8931};
8932
1da177e4
LT
8933/**
8934 * ipr_init - Module entry point
8935 *
8936 * Return value:
8937 * 0 on success / negative value on failure
8938 **/
8939static int __init ipr_init(void)
8940{
8941 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8942 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8943
f72919ec 8944 register_reboot_notifier(&ipr_notifier);
dcbccbde 8945 return pci_register_driver(&ipr_driver);
1da177e4
LT
8946}
8947
8948/**
8949 * ipr_exit - Module unload
8950 *
8951 * Module unload entry point.
8952 *
8953 * Return value:
8954 * none
8955 **/
8956static void __exit ipr_exit(void)
8957{
f72919ec 8958 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
8959 pci_unregister_driver(&ipr_driver);
8960}
8961
8962module_init(ipr_init);
8963module_exit(ipr_exit);