]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] dpt_i2o: Use GFP_ATOMIC when a lock is held
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
1da177e4
LT
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
35a39691 74#include <linux/libata.h>
0ce3a7e5 75#include <linux/hdreg.h>
f72919ec 76#include <linux/reboot.h>
3e7ebdfa 77#include <linux/stringify.h>
1da177e4
LT
78#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
1da177e4
LT
86#include "ipr.h"
87
88/*
89 * Global Data
90 */
b7d68ca3 91static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
92static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
5469cb5b 96static unsigned int ipr_transop_timeout = 0;
d3c74871 97static unsigned int ipr_debug = 0;
3e7ebdfa 98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 99static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
107 {
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
214777ba 110 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 111 .sense_interrupt_mask_reg = 0x0022C,
214777ba 112 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 113 .clr_interrupt_reg = 0x00228,
214777ba 114 .clr_interrupt_reg32 = 0x00228,
1da177e4 115 .sense_interrupt_reg = 0x00224,
214777ba 116 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
214777ba 119 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 120 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
124 }
125 },
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
129 {
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
214777ba 132 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 133 .sense_interrupt_mask_reg = 0x00288,
214777ba 134 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 135 .clr_interrupt_reg = 0x00284,
214777ba 136 .clr_interrupt_reg32 = 0x00284,
1da177e4 137 .sense_interrupt_reg = 0x00280,
214777ba 138 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
214777ba 141 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 142 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
146 }
147 },
a74c1639
WB
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
214777ba 154 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 155 .sense_interrupt_mask_reg = 0x00010,
214777ba 156 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 157 .clr_interrupt_reg = 0x00008,
214777ba 158 .clr_interrupt_reg32 = 0x0000C,
a74c1639 159 .sense_interrupt_reg = 0x00000,
214777ba 160 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
214777ba 163 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 164 .set_uproc_interrupt_reg = 0x00020,
214777ba 165 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 166 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
dcbad00e
WB
169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
a74c1639
WB
171 }
172 },
1da177e4
LT
173};
174
175static const struct ipr_chip_t ipr_chip[] = {
a32c055f
WB
176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
d7b4627f
WB
182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
1da177e4
LT
185};
186
187static int ipr_max_bus_speeds [] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
210MODULE_LICENSE("GPL");
211MODULE_VERSION(IPR_DRIVER_VERSION);
212
1da177e4
LT
213/* A constant array of IOASCs/URCs/Error Messages */
214static const
215struct ipr_error_table_t ipr_error_table[] = {
933916f3 216 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
217 "8155: An unknown error was received"},
218 {0x00330000, 0, 0,
219 "Soft underlength error"},
220 {0x005A0000, 0, 0,
221 "Command to be cancelled not found"},
222 {0x00808000, 0, 0,
223 "Qualified success"},
933916f3 224 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 225 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 227 "4101: Soft device bus fabric error"},
5aa3a333
WB
228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 243 "FFF9: Device sector reassign successful"},
933916f3 244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 245 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 246 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 247 "7001: IOA sector reassignment successful"},
933916f3 248 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 249 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 250 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 251 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 252 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 253 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 254 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 255 "FFF6: Device hardware error recovered by the IOA"},
933916f3 256 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF6: Device hardware error recovered by the device"},
933916f3 258 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 260 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FFFA: Undefined device response recovered by the IOA"},
933916f3 262 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF6: Device bus error, message or command phase"},
933916f3 264 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 265 "FFFE: Task Management Function failed"},
933916f3 266 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FFF6: Failure prediction threshold exceeded"},
933916f3 268 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
269 "8009: Impending cache battery pack failure"},
270 {0x02040400, 0, 0,
271 "34FF: Disk device format in progress"},
65f56475
BK
272 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "9070: IOA requested reset"},
1da177e4
LT
274 {0x023F0000, 0, 0,
275 "Synchronization required"},
276 {0x024E0000, 0, 0,
277 "No ready, IOA shutdown"},
278 {0x025A0000, 0, 0,
279 "Not ready, IOA has been shutdown"},
933916f3 280 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
281 "3020: Storage subsystem configuration error"},
282 {0x03110B00, 0, 0,
283 "FFF5: Medium error, data unreadable, recommend reassign"},
284 {0x03110C00, 0, 0,
285 "7000: Medium error, data unreadable, do not reassign"},
933916f3 286 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 287 "FFF3: Disk media format bad"},
933916f3 288 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 289 "3002: Addressed device failed to respond to selection"},
933916f3 290 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 291 "3100: Device bus error"},
933916f3 292 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
293 "3109: IOA timed out a device command"},
294 {0x04088000, 0, 0,
295 "3120: SCSI bus is not operational"},
933916f3 296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 297 "4100: Hard device bus fabric error"},
5aa3a333
WB
298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
933916f3 312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 313 "9000: IOA reserved area data check"},
933916f3 314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "9001: IOA reserved area invalid data pattern"},
933916f3 316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 317 "9002: IOA reserved area LRC error"},
5aa3a333
WB
318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
933916f3 320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 321 "102E: Out of alternate sectors for disk storage"},
933916f3 322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 323 "FFF4: Data transfer underlength error"},
933916f3 324 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 325 "FFF4: Data transfer overlength error"},
933916f3 326 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 327 "3400: Logical unit failure"},
933916f3 328 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 329 "FFF4: Device microcode is corrupt"},
933916f3 330 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
331 "8150: PCI bus error"},
332 {0x04430000, 1, 0,
333 "Unsupported device bus message received"},
933916f3 334 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "FFF4: Disk device problem"},
933916f3 336 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "8150: Permanent IOA failure"},
933916f3 338 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "3010: Disk device returned wrong response to IOA"},
933916f3 340 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
341 "8151: IOA microcode error"},
342 {0x04448500, 0, 0,
343 "Device bus status error"},
933916f3 344 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
346 {0x04448700, 0, 0,
347 "ATA device status error"},
1da177e4
LT
348 {0x04490000, 0, 0,
349 "Message reject received from the device"},
933916f3 350 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "8008: A permanent cache battery pack failure occurred"},
933916f3 352 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "9090: Disk unit has been modified after the last known status"},
933916f3 354 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 355 "9081: IOA detected device error"},
933916f3 356 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 357 "9082: IOA detected device error"},
933916f3 358 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "3110: Device bus error, message or command phase"},
933916f3 360 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 361 "3110: SAS Command / Task Management Function failed"},
933916f3 362 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "9091: Incorrect hardware configuration change has been detected"},
933916f3 364 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 365 "9073: Invalid multi-adapter configuration"},
933916f3 366 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 367 "4010: Incorrect connection between cascaded expanders"},
933916f3 368 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 369 "4020: Connections exceed IOA design limits"},
933916f3 370 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 371 "4030: Incorrect multipath connection"},
933916f3 372 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 373 "4110: Unsupported enclosure function"},
933916f3 374 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
375 "FFF4: Command to logical unit failed"},
376 {0x05240000, 1, 0,
377 "Illegal request, invalid request type or request packet"},
378 {0x05250000, 0, 0,
379 "Illegal request, invalid resource handle"},
b0df54bb
BK
380 {0x05258000, 0, 0,
381 "Illegal request, commands not allowed to this device"},
382 {0x05258100, 0, 0,
383 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
386 {0x05260000, 0, 0,
387 "Illegal request, invalid field in parameter list"},
388 {0x05260100, 0, 0,
389 "Illegal request, parameter not supported"},
390 {0x05260200, 0, 0,
391 "Illegal request, parameter value invalid"},
392 {0x052C0000, 0, 0,
393 "Illegal request, command sequence error"},
b0df54bb
BK
394 {0x052C8000, 1, 0,
395 "Illegal request, dual adapter support not enabled"},
933916f3 396 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 397 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 398 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 399 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 400 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 401 "3140: Device bus not ready to ready transition"},
933916f3 402 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
403 "FFFB: SCSI bus was reset"},
404 {0x06290500, 0, 0,
405 "FFFE: SCSI bus transition to single ended"},
406 {0x06290600, 0, 0,
407 "FFFE: SCSI bus transition to LVD"},
933916f3 408 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 409 "FFFB: SCSI bus was reset by another initiator"},
933916f3 410 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 411 "3029: A device replacement has occurred"},
933916f3 412 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 413 "9051: IOA cache data exists for a missing or failed device"},
933916f3 414 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 415 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 416 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 417 "9025: Disk unit is not supported at its physical location"},
933916f3 418 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 419 "3020: IOA detected a SCSI bus configuration error"},
933916f3 420 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 421 "3150: SCSI bus configuration error"},
933916f3 422 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 423 "9074: Asymmetric advanced function disk configuration"},
933916f3 424 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 425 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 426 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 427 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 428 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 429 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 430 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 431 "9076: Configuration error, missing remote IOA"},
933916f3 432 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 433 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
434 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4070: Logically bad block written on device"},
933916f3 436 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 437 "9041: Array protection temporarily suspended"},
933916f3 438 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 439 "9042: Corrupt array parity detected on specified device"},
933916f3 440 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 441 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 442 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 443 "9071: Link operational transition"},
933916f3 444 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 445 "9072: Link not operational transition"},
933916f3 446 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 447 "9032: Array exposed but still protected"},
e435340c
BK
448 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449 "70DD: Device forced failed by disrupt device command"},
933916f3 450 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 451 "4061: Multipath redundancy level got better"},
933916f3 452 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 453 "4060: Multipath redundancy level got worse"},
1da177e4
LT
454 {0x07270000, 0, 0,
455 "Failure due to other device"},
933916f3 456 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 457 "9008: IOA does not support functions expected by devices"},
933916f3 458 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 459 "9010: Cache data associated with attached devices cannot be found"},
933916f3 460 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 461 "9011: Cache data belongs to devices other than those attached"},
933916f3 462 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 463 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 464 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 465 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 466 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 467 "9022: Exposed array is missing a required device"},
933916f3 468 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 469 "9023: Array member(s) not at required physical locations"},
933916f3 470 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 471 "9024: Array not functional due to present hardware configuration"},
933916f3 472 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 473 "9026: Array not functional due to present hardware configuration"},
933916f3 474 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 475 "9027: Array is missing a device and parity is out of sync"},
933916f3 476 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 477 "9028: Maximum number of arrays already exist"},
933916f3 478 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 479 "9050: Required cache data cannot be located for a disk unit"},
933916f3 480 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9052: Cache data exists for a device that has been modified"},
933916f3 482 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9054: IOA resources not available due to previous problems"},
933916f3 484 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9092: Disk unit requires initialization before use"},
933916f3 486 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9029: Incorrect hardware configuration change has been detected"},
933916f3 488 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9060: One or more disk pairs are missing from an array"},
933916f3 490 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9061: One or more disks are missing from an array"},
933916f3 492 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 493 "9062: One or more disks are missing from an array"},
933916f3 494 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
495 "9063: Maximum number of functional arrays has been exceeded"},
496 {0x0B260000, 0, 0,
497 "Aborted command, invalid descriptor"},
498 {0x0B5A0000, 0, 0,
499 "Command terminated by host"}
500};
501
502static const struct ipr_ses_table_entry ipr_ses_table[] = {
503 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
514 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516};
517
518/*
519 * Function Prototypes
520 */
521static int ipr_reset_alert(struct ipr_cmnd *);
522static void ipr_process_ccn(struct ipr_cmnd *);
523static void ipr_process_error(struct ipr_cmnd *);
524static void ipr_reset_ioa_job(struct ipr_cmnd *);
525static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526 enum ipr_shutdown_type);
527
528#ifdef CONFIG_SCSI_IPR_TRACE
529/**
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd: ipr command struct
532 * @type: trace type
533 * @add_data: additional data
534 *
535 * Return value:
536 * none
537 **/
538static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539 u8 type, u32 add_data)
540{
541 struct ipr_trace_entry *trace_entry;
542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545 trace_entry->time = jiffies;
546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547 trace_entry->type = type;
a32c055f
WB
548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554 trace_entry->u.add_data = add_data;
555}
556#else
557#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558#endif
559
560/**
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd: ipr command struct
563 *
564 * Return value:
565 * none
566 **/
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
573
574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 575 ioarcb->data_transfer_length = 0;
1da177e4 576 ioarcb->read_data_transfer_length = 0;
a32c055f 577 ioarcb->ioadl_len = 0;
1da177e4 578 ioarcb->read_ioadl_len = 0;
a32c055f 579
96d21f00 580 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
583 ioasa64->u.gata.status = 0;
584 } else {
a32c055f
WB
585 ioarcb->write_ioadl_addr =
586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 588 ioasa->u.gata.status = 0;
a32c055f
WB
589 }
590
96d21f00
WB
591 ioasa->hdr.ioasc = 0;
592 ioasa->hdr.residual_data_len = 0;
1da177e4 593 ipr_cmd->scsi_cmd = NULL;
35a39691 594 ipr_cmd->qc = NULL;
1da177e4
LT
595 ipr_cmd->sense_buffer[0] = 0;
596 ipr_cmd->dma_use_sg = 0;
597}
598
599/**
600 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601 * @ipr_cmd: ipr command struct
602 *
603 * Return value:
604 * none
605 **/
606static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
607{
608 ipr_reinit_ipr_cmnd(ipr_cmd);
609 ipr_cmd->u.scratch = 0;
610 ipr_cmd->sibling = NULL;
611 init_timer(&ipr_cmd->timer);
612}
613
614/**
615 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616 * @ioa_cfg: ioa config struct
617 *
618 * Return value:
619 * pointer to ipr command struct
620 **/
621static
622struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
623{
624 struct ipr_cmnd *ipr_cmd;
625
626 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627 list_del(&ipr_cmd->queue);
628 ipr_init_ipr_cmnd(ipr_cmd);
629
630 return ipr_cmd;
631}
632
1da177e4
LT
633/**
634 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635 * @ioa_cfg: ioa config struct
636 * @clr_ints: interrupts to clear
637 *
638 * This function masks all interrupts on the adapter, then clears the
639 * interrupts specified in the mask
640 *
641 * Return value:
642 * none
643 **/
644static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645 u32 clr_ints)
646{
647 volatile u32 int_reg;
648
649 /* Stop new interrupts */
650 ioa_cfg->allow_interrupts = 0;
651
652 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
653 if (ioa_cfg->sis64)
654 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655 else
656 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
657
658 /* Clear any pending interrupts */
214777ba
WB
659 if (ioa_cfg->sis64)
660 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
663}
664
665/**
666 * ipr_save_pcix_cmd_reg - Save PCI-X command register
667 * @ioa_cfg: ioa config struct
668 *
669 * Return value:
670 * 0 on success / -EIO on failure
671 **/
672static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
673{
674 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
675
7dce0e1c
BK
676 if (pcix_cmd_reg == 0)
677 return 0;
1da177e4
LT
678
679 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682 return -EIO;
683 }
684
685 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686 return 0;
687}
688
689/**
690 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691 * @ioa_cfg: ioa config struct
692 *
693 * Return value:
694 * 0 on success / -EIO on failure
695 **/
696static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
697{
698 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
699
700 if (pcix_cmd_reg) {
701 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704 return -EIO;
705 }
1da177e4
LT
706 }
707
708 return 0;
709}
710
35a39691
BK
711/**
712 * ipr_sata_eh_done - done function for aborted SATA commands
713 * @ipr_cmd: ipr command struct
714 *
715 * This function is invoked for ops generated to SATA
716 * devices which are being aborted.
717 *
718 * Return value:
719 * none
720 **/
721static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
722{
723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724 struct ata_queued_cmd *qc = ipr_cmd->qc;
725 struct ipr_sata_port *sata_port = qc->ap->private_data;
726
727 qc->err_mask |= AC_ERR_OTHER;
728 sata_port->ioasa.status |= ATA_BUSY;
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730 ata_qc_complete(qc);
731}
732
1da177e4
LT
733/**
734 * ipr_scsi_eh_done - mid-layer done function for aborted ops
735 * @ipr_cmd: ipr command struct
736 *
737 * This function is invoked by the interrupt handler for
738 * ops generated by the SCSI mid-layer which are being aborted.
739 *
740 * Return value:
741 * none
742 **/
743static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
744{
745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
747
748 scsi_cmd->result |= (DID_ERROR << 16);
749
63015bc9 750 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
751 scsi_cmd->scsi_done(scsi_cmd);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753}
754
755/**
756 * ipr_fail_all_ops - Fails all outstanding ops.
757 * @ioa_cfg: ioa config struct
758 *
759 * This function fails all outstanding ops.
760 *
761 * Return value:
762 * none
763 **/
764static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
765{
766 struct ipr_cmnd *ipr_cmd, *temp;
767
768 ENTER;
769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770 list_del(&ipr_cmd->queue);
771
96d21f00
WB
772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
1da177e4
LT
774
775 if (ipr_cmd->scsi_cmd)
776 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
777 else if (ipr_cmd->qc)
778 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
779
780 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781 del_timer(&ipr_cmd->timer);
782 ipr_cmd->done(ipr_cmd);
783 }
784
785 LEAVE;
786}
787
a32c055f
WB
788/**
789 * ipr_send_command - Send driver initiated requests.
790 * @ipr_cmd: ipr command struct
791 *
792 * This function sends a command to the adapter using the correct write call.
793 * In the case of sis64, calculate the ioarcb size required. Then or in the
794 * appropriate bits.
795 *
796 * Return value:
797 * none
798 **/
799static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
800{
801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
803
804 if (ioa_cfg->sis64) {
805 /* The default size is 256 bytes */
806 send_dma_addr |= 0x1;
807
808 /* If the number of ioadls * size of ioadl > 128 bytes,
809 then use a 512 byte ioarcb */
810 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811 send_dma_addr |= 0x4;
812 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813 } else
814 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815}
816
1da177e4
LT
817/**
818 * ipr_do_req - Send driver initiated requests.
819 * @ipr_cmd: ipr command struct
820 * @done: done function
821 * @timeout_func: timeout function
822 * @timeout: timeout value
823 *
824 * This function sends the specified command to the adapter with the
825 * timeout given. The done function is invoked on command completion.
826 *
827 * Return value:
828 * none
829 **/
830static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831 void (*done) (struct ipr_cmnd *),
832 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
833{
834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
835
836 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
837
838 ipr_cmd->done = done;
839
840 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841 ipr_cmd->timer.expires = jiffies + timeout;
842 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
843
844 add_timer(&ipr_cmd->timer);
845
846 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
847
848 mb();
a32c055f
WB
849
850 ipr_send_command(ipr_cmd);
1da177e4
LT
851}
852
853/**
854 * ipr_internal_cmd_done - Op done function for an internally generated op.
855 * @ipr_cmd: ipr command struct
856 *
857 * This function is the op done function for an internally generated,
858 * blocking op. It simply wakes the sleeping thread.
859 *
860 * Return value:
861 * none
862 **/
863static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
864{
865 if (ipr_cmd->sibling)
866 ipr_cmd->sibling = NULL;
867 else
868 complete(&ipr_cmd->completion);
869}
870
a32c055f
WB
871/**
872 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873 * @ipr_cmd: ipr command struct
874 * @dma_addr: dma address
875 * @len: transfer length
876 * @flags: ioadl flag value
877 *
878 * This function initializes an ioadl in the case where there is only a single
879 * descriptor.
880 *
881 * Return value:
882 * nothing
883 **/
884static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885 u32 len, int flags)
886{
887 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
889
890 ipr_cmd->dma_use_sg = 1;
891
892 if (ipr_cmd->ioa_cfg->sis64) {
893 ioadl64->flags = cpu_to_be32(flags);
894 ioadl64->data_len = cpu_to_be32(len);
895 ioadl64->address = cpu_to_be64(dma_addr);
896
897 ipr_cmd->ioarcb.ioadl_len =
898 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900 } else {
901 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902 ioadl->address = cpu_to_be32(dma_addr);
903
904 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905 ipr_cmd->ioarcb.read_ioadl_len =
906 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908 } else {
909 ipr_cmd->ioarcb.ioadl_len =
910 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
912 }
913 }
914}
915
1da177e4
LT
916/**
917 * ipr_send_blocking_cmd - Send command and sleep on its completion.
918 * @ipr_cmd: ipr command struct
919 * @timeout_func: function to invoke if command times out
920 * @timeout: timeout
921 *
922 * Return value:
923 * none
924 **/
925static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927 u32 timeout)
928{
929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
930
931 init_completion(&ipr_cmd->completion);
932 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
933
934 spin_unlock_irq(ioa_cfg->host->host_lock);
935 wait_for_completion(&ipr_cmd->completion);
936 spin_lock_irq(ioa_cfg->host->host_lock);
937}
938
939/**
940 * ipr_send_hcam - Send an HCAM to the adapter.
941 * @ioa_cfg: ioa config struct
942 * @type: HCAM type
943 * @hostrcb: hostrcb struct
944 *
945 * This function will send a Host Controlled Async command to the adapter.
946 * If HCAMs are currently not allowed to be issued to the adapter, it will
947 * place the hostrcb on the free queue.
948 *
949 * Return value:
950 * none
951 **/
952static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953 struct ipr_hostrcb *hostrcb)
954{
955 struct ipr_cmnd *ipr_cmd;
956 struct ipr_ioarcb *ioarcb;
957
958 if (ioa_cfg->allow_cmds) {
959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
962
963 ipr_cmd->u.hostrcb = hostrcb;
964 ioarcb = &ipr_cmd->ioarcb;
965
966 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969 ioarcb->cmd_pkt.cdb[1] = type;
970 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
972
a32c055f
WB
973 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
975
976 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977 ipr_cmd->done = ipr_process_ccn;
978 else
979 ipr_cmd->done = ipr_process_error;
980
981 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
982
983 mb();
a32c055f
WB
984
985 ipr_send_command(ipr_cmd);
1da177e4
LT
986 } else {
987 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
988 }
989}
990
3e7ebdfa
WB
991/**
992 * ipr_update_ata_class - Update the ata class in the resource entry
993 * @res: resource entry struct
994 * @proto: cfgte device bus protocol value
995 *
996 * Return value:
997 * none
998 **/
999static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1000{
1001 switch(proto) {
1002 case IPR_PROTO_SATA:
1003 case IPR_PROTO_SAS_STP:
1004 res->ata_class = ATA_DEV_ATA;
1005 break;
1006 case IPR_PROTO_SATA_ATAPI:
1007 case IPR_PROTO_SAS_STP_ATAPI:
1008 res->ata_class = ATA_DEV_ATAPI;
1009 break;
1010 default:
1011 res->ata_class = ATA_DEV_UNKNOWN;
1012 break;
1013 };
1014}
1015
1da177e4
LT
1016/**
1017 * ipr_init_res_entry - Initialize a resource entry struct.
1018 * @res: resource entry struct
3e7ebdfa 1019 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1020 *
1021 * Return value:
1022 * none
1023 **/
3e7ebdfa
WB
1024static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1026{
3e7ebdfa
WB
1027 int found = 0;
1028 unsigned int proto;
1029 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030 struct ipr_resource_entry *gscsi_res = NULL;
1031
ee0a90fa 1032 res->needs_sync_complete = 0;
1da177e4
LT
1033 res->in_erp = 0;
1034 res->add_to_ml = 0;
1035 res->del_from_ml = 0;
1036 res->resetting_device = 0;
1037 res->sdev = NULL;
35a39691 1038 res->sata_port = NULL;
3e7ebdfa
WB
1039
1040 if (ioa_cfg->sis64) {
1041 proto = cfgtew->u.cfgte64->proto;
1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1044 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1045
1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047 sizeof(res->res_path));
1048
1049 res->bus = 0;
1050 res->lun = scsilun_to_int(&res->dev_lun);
1051
1052 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055 found = 1;
1056 res->target = gscsi_res->target;
1057 break;
1058 }
1059 }
1060 if (!found) {
1061 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062 ioa_cfg->max_devs_supported);
1063 set_bit(res->target, ioa_cfg->target_ids);
1064 }
1065
1066 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067 sizeof(res->dev_lun.scsi_lun));
1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 res->target = 0;
1071 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 ioa_cfg->max_devs_supported);
1075 set_bit(res->target, ioa_cfg->array_ids);
1076 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 res->bus = IPR_VSET_VIRTUAL_BUS;
1078 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->vset_ids);
1081 } else {
1082 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 ioa_cfg->max_devs_supported);
1084 set_bit(res->target, ioa_cfg->target_ids);
1085 }
1086 } else {
1087 proto = cfgtew->u.cfgte->proto;
1088 res->qmodel = IPR_QUEUEING_MODEL(res);
1089 res->flags = cfgtew->u.cfgte->flags;
1090 if (res->flags & IPR_IS_IOA_RESOURCE)
1091 res->type = IPR_RES_TYPE_IOAFP;
1092 else
1093 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 res->target = cfgtew->u.cfgte->res_addr.target;
1097 res->lun = cfgtew->u.cfgte->res_addr.lun;
1098 }
1099
1100 ipr_update_ata_class(res, proto);
1101}
1102
1103/**
1104 * ipr_is_same_device - Determine if two devices are the same.
1105 * @res: resource entry struct
1106 * @cfgtew: config table entry wrapper struct
1107 *
1108 * Return value:
1109 * 1 if the devices are the same / 0 otherwise
1110 **/
1111static int ipr_is_same_device(struct ipr_resource_entry *res,
1112 struct ipr_config_table_entry_wrapper *cfgtew)
1113{
1114 if (res->ioa_cfg->sis64) {
1115 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118 sizeof(cfgtew->u.cfgte64->lun))) {
1119 return 1;
1120 }
1121 } else {
1122 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123 res->target == cfgtew->u.cfgte->res_addr.target &&
1124 res->lun == cfgtew->u.cfgte->res_addr.lun)
1125 return 1;
1126 }
1127
1128 return 0;
1129}
1130
1131/**
5adcbeb3 1132 * ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1133 * @res_path: resource path
1134 * @buf: buffer
1135 *
1136 * Return value:
1137 * pointer to buffer
1138 **/
5adcbeb3 1139static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1140{
1141 int i;
5adcbeb3 1142 char *p = buffer;
3e7ebdfa 1143
5adcbeb3
WB
1144 res_path[0] = '\0';
1145 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1146 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1147 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1148
1149 return buffer;
1150}
1151
1152/**
1153 * ipr_update_res_entry - Update the resource entry.
1154 * @res: resource entry struct
1155 * @cfgtew: config table entry wrapper struct
1156 *
1157 * Return value:
1158 * none
1159 **/
1160static void ipr_update_res_entry(struct ipr_resource_entry *res,
1161 struct ipr_config_table_entry_wrapper *cfgtew)
1162{
1163 char buffer[IPR_MAX_RES_PATH_LENGTH];
1164 unsigned int proto;
1165 int new_path = 0;
1166
1167 if (res->ioa_cfg->sis64) {
1168 res->flags = cfgtew->u.cfgte64->flags;
1169 res->res_flags = cfgtew->u.cfgte64->res_flags;
1170 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1171
1172 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1173 sizeof(struct ipr_std_inq_data));
1174
1175 res->qmodel = IPR_QUEUEING_MODEL64(res);
1176 proto = cfgtew->u.cfgte64->proto;
1177 res->res_handle = cfgtew->u.cfgte64->res_handle;
1178 res->dev_id = cfgtew->u.cfgte64->dev_id;
1179
1180 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181 sizeof(res->dev_lun.scsi_lun));
1182
1183 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1184 sizeof(res->res_path))) {
1185 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1186 sizeof(res->res_path));
1187 new_path = 1;
1188 }
1189
1190 if (res->sdev && new_path)
1191 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
5adcbeb3
WB
1192 ipr_format_res_path(res->res_path, buffer,
1193 sizeof(buffer)));
3e7ebdfa
WB
1194 } else {
1195 res->flags = cfgtew->u.cfgte->flags;
1196 if (res->flags & IPR_IS_IOA_RESOURCE)
1197 res->type = IPR_RES_TYPE_IOAFP;
1198 else
1199 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1200
1201 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1202 sizeof(struct ipr_std_inq_data));
1203
1204 res->qmodel = IPR_QUEUEING_MODEL(res);
1205 proto = cfgtew->u.cfgte->proto;
1206 res->res_handle = cfgtew->u.cfgte->res_handle;
1207 }
1208
1209 ipr_update_ata_class(res, proto);
1210}
1211
1212/**
1213 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1214 * for the resource.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1217 *
1218 * Return value:
1219 * none
1220 **/
1221static void ipr_clear_res_target(struct ipr_resource_entry *res)
1222{
1223 struct ipr_resource_entry *gscsi_res = NULL;
1224 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1225
1226 if (!ioa_cfg->sis64)
1227 return;
1228
1229 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1230 clear_bit(res->target, ioa_cfg->array_ids);
1231 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1232 clear_bit(res->target, ioa_cfg->vset_ids);
1233 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1234 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1235 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1236 return;
1237 clear_bit(res->target, ioa_cfg->target_ids);
1238
1239 } else if (res->bus == 0)
1240 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1241}
1242
1243/**
1244 * ipr_handle_config_change - Handle a config change from the adapter
1245 * @ioa_cfg: ioa config struct
1246 * @hostrcb: hostrcb
1247 *
1248 * Return value:
1249 * none
1250 **/
1251static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1252 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1253{
1254 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1255 struct ipr_config_table_entry_wrapper cfgtew;
1256 __be32 cc_res_handle;
1257
1da177e4
LT
1258 u32 is_ndn = 1;
1259
3e7ebdfa
WB
1260 if (ioa_cfg->sis64) {
1261 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1262 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1263 } else {
1264 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1265 cc_res_handle = cfgtew.u.cfgte->res_handle;
1266 }
1da177e4
LT
1267
1268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1269 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1270 is_ndn = 0;
1271 break;
1272 }
1273 }
1274
1275 if (is_ndn) {
1276 if (list_empty(&ioa_cfg->free_res_q)) {
1277 ipr_send_hcam(ioa_cfg,
1278 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1279 hostrcb);
1280 return;
1281 }
1282
1283 res = list_entry(ioa_cfg->free_res_q.next,
1284 struct ipr_resource_entry, queue);
1285
1286 list_del(&res->queue);
3e7ebdfa 1287 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1288 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1289 }
1290
3e7ebdfa 1291 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1292
1293 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1294 if (res->sdev) {
1da177e4 1295 res->del_from_ml = 1;
3e7ebdfa 1296 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1297 if (ioa_cfg->allow_ml_add_del)
1298 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1299 } else {
1300 ipr_clear_res_target(res);
1da177e4 1301 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1302 }
1da177e4
LT
1303 } else if (!res->sdev) {
1304 res->add_to_ml = 1;
1305 if (ioa_cfg->allow_ml_add_del)
1306 schedule_work(&ioa_cfg->work_q);
1307 }
1308
1309 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1310}
1311
1312/**
1313 * ipr_process_ccn - Op done function for a CCN.
1314 * @ipr_cmd: ipr command struct
1315 *
1316 * This function is the op done function for a configuration
1317 * change notification host controlled async from the adapter.
1318 *
1319 * Return value:
1320 * none
1321 **/
1322static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1323{
1324 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1325 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1326 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1327
1328 list_del(&hostrcb->queue);
1329 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1330
1331 if (ioasc) {
1332 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1333 dev_err(&ioa_cfg->pdev->dev,
1334 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1335
1336 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1337 } else {
1338 ipr_handle_config_change(ioa_cfg, hostrcb);
1339 }
1340}
1341
8cf093e2
BK
1342/**
1343 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1344 * @i: index into buffer
1345 * @buf: string to modify
1346 *
1347 * This function will strip all trailing whitespace, pad the end
1348 * of the string with a single space, and NULL terminate the string.
1349 *
1350 * Return value:
1351 * new length of string
1352 **/
1353static int strip_and_pad_whitespace(int i, char *buf)
1354{
1355 while (i && buf[i] == ' ')
1356 i--;
1357 buf[i+1] = ' ';
1358 buf[i+2] = '\0';
1359 return i + 2;
1360}
1361
1362/**
1363 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1364 * @prefix: string to print at start of printk
1365 * @hostrcb: hostrcb pointer
1366 * @vpd: vendor/product id/sn struct
1367 *
1368 * Return value:
1369 * none
1370 **/
1371static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1372 struct ipr_vpd *vpd)
1373{
1374 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1375 int i = 0;
1376
1377 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1378 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1379
1380 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1381 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1382
1383 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1384 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1385
1386 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1387}
1388
1da177e4
LT
1389/**
1390 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1391 * @vpd: vendor/product id/sn struct
1da177e4
LT
1392 *
1393 * Return value:
1394 * none
1395 **/
cfc32139 1396static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1397{
1398 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1399 + IPR_SERIAL_NUM_LEN];
1400
cfc32139
BK
1401 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1402 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1403 IPR_PROD_ID_LEN);
1404 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1405 ipr_err("Vendor/Product ID: %s\n", buffer);
1406
cfc32139 1407 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1408 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1409 ipr_err(" Serial Number: %s\n", buffer);
1410}
1411
8cf093e2
BK
1412/**
1413 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1414 * @prefix: string to print at start of printk
1415 * @hostrcb: hostrcb pointer
1416 * @vpd: vendor/product id/sn/wwn struct
1417 *
1418 * Return value:
1419 * none
1420 **/
1421static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1422 struct ipr_ext_vpd *vpd)
1423{
1424 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1425 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1426 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1427}
1428
ee0f05b8
BK
1429/**
1430 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1431 * @vpd: vendor/product id/sn/wwn struct
1432 *
1433 * Return value:
1434 * none
1435 **/
1436static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1437{
1438 ipr_log_vpd(&vpd->vpd);
1439 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1440 be32_to_cpu(vpd->wwid[1]));
1441}
1442
1443/**
1444 * ipr_log_enhanced_cache_error - Log a cache error.
1445 * @ioa_cfg: ioa config struct
1446 * @hostrcb: hostrcb struct
1447 *
1448 * Return value:
1449 * none
1450 **/
1451static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1452 struct ipr_hostrcb *hostrcb)
1453{
4565e370
WB
1454 struct ipr_hostrcb_type_12_error *error;
1455
1456 if (ioa_cfg->sis64)
1457 error = &hostrcb->hcam.u.error64.u.type_12_error;
1458 else
1459 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1460
1461 ipr_err("-----Current Configuration-----\n");
1462 ipr_err("Cache Directory Card Information:\n");
1463 ipr_log_ext_vpd(&error->ioa_vpd);
1464 ipr_err("Adapter Card Information:\n");
1465 ipr_log_ext_vpd(&error->cfc_vpd);
1466
1467 ipr_err("-----Expected Configuration-----\n");
1468 ipr_err("Cache Directory Card Information:\n");
1469 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1470 ipr_err("Adapter Card Information:\n");
1471 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1472
1473 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1474 be32_to_cpu(error->ioa_data[0]),
1475 be32_to_cpu(error->ioa_data[1]),
1476 be32_to_cpu(error->ioa_data[2]));
1477}
1478
1da177e4
LT
1479/**
1480 * ipr_log_cache_error - Log a cache error.
1481 * @ioa_cfg: ioa config struct
1482 * @hostrcb: hostrcb struct
1483 *
1484 * Return value:
1485 * none
1486 **/
1487static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1488 struct ipr_hostrcb *hostrcb)
1489{
1490 struct ipr_hostrcb_type_02_error *error =
1491 &hostrcb->hcam.u.error.u.type_02_error;
1492
1493 ipr_err("-----Current Configuration-----\n");
1494 ipr_err("Cache Directory Card Information:\n");
cfc32139 1495 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1496 ipr_err("Adapter Card Information:\n");
cfc32139 1497 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1498
1499 ipr_err("-----Expected Configuration-----\n");
1500 ipr_err("Cache Directory Card Information:\n");
cfc32139 1501 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1502 ipr_err("Adapter Card Information:\n");
cfc32139 1503 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1504
1505 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1506 be32_to_cpu(error->ioa_data[0]),
1507 be32_to_cpu(error->ioa_data[1]),
1508 be32_to_cpu(error->ioa_data[2]));
1509}
1510
ee0f05b8
BK
1511/**
1512 * ipr_log_enhanced_config_error - Log a configuration error.
1513 * @ioa_cfg: ioa config struct
1514 * @hostrcb: hostrcb struct
1515 *
1516 * Return value:
1517 * none
1518 **/
1519static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1520 struct ipr_hostrcb *hostrcb)
1521{
1522 int errors_logged, i;
1523 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1524 struct ipr_hostrcb_type_13_error *error;
1525
1526 error = &hostrcb->hcam.u.error.u.type_13_error;
1527 errors_logged = be32_to_cpu(error->errors_logged);
1528
1529 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1530 be32_to_cpu(error->errors_detected), errors_logged);
1531
1532 dev_entry = error->dev;
1533
1534 for (i = 0; i < errors_logged; i++, dev_entry++) {
1535 ipr_err_separator;
1536
1537 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1538 ipr_log_ext_vpd(&dev_entry->vpd);
1539
1540 ipr_err("-----New Device Information-----\n");
1541 ipr_log_ext_vpd(&dev_entry->new_vpd);
1542
1543 ipr_err("Cache Directory Card Information:\n");
1544 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1545
1546 ipr_err("Adapter Card Information:\n");
1547 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1548 }
1549}
1550
4565e370
WB
1551/**
1552 * ipr_log_sis64_config_error - Log a device error.
1553 * @ioa_cfg: ioa config struct
1554 * @hostrcb: hostrcb struct
1555 *
1556 * Return value:
1557 * none
1558 **/
1559static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1560 struct ipr_hostrcb *hostrcb)
1561{
1562 int errors_logged, i;
1563 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1564 struct ipr_hostrcb_type_23_error *error;
1565 char buffer[IPR_MAX_RES_PATH_LENGTH];
1566
1567 error = &hostrcb->hcam.u.error64.u.type_23_error;
1568 errors_logged = be32_to_cpu(error->errors_logged);
1569
1570 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1571 be32_to_cpu(error->errors_detected), errors_logged);
1572
1573 dev_entry = error->dev;
1574
1575 for (i = 0; i < errors_logged; i++, dev_entry++) {
1576 ipr_err_separator;
1577
1578 ipr_err("Device %d : %s", i + 1,
5adcbeb3
WB
1579 ipr_format_res_path(dev_entry->res_path, buffer,
1580 sizeof(buffer)));
4565e370
WB
1581 ipr_log_ext_vpd(&dev_entry->vpd);
1582
1583 ipr_err("-----New Device Information-----\n");
1584 ipr_log_ext_vpd(&dev_entry->new_vpd);
1585
1586 ipr_err("Cache Directory Card Information:\n");
1587 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1588
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1591 }
1592}
1593
1da177e4
LT
1594/**
1595 * ipr_log_config_error - Log a configuration error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1598 *
1599 * Return value:
1600 * none
1601 **/
1602static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1604{
1605 int errors_logged, i;
1606 struct ipr_hostrcb_device_data_entry *dev_entry;
1607 struct ipr_hostrcb_type_03_error *error;
1608
1609 error = &hostrcb->hcam.u.error.u.type_03_error;
1610 errors_logged = be32_to_cpu(error->errors_logged);
1611
1612 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1613 be32_to_cpu(error->errors_detected), errors_logged);
1614
cfc32139 1615 dev_entry = error->dev;
1da177e4
LT
1616
1617 for (i = 0; i < errors_logged; i++, dev_entry++) {
1618 ipr_err_separator;
1619
fa15b1f6 1620 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1621 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1622
1623 ipr_err("-----New Device Information-----\n");
cfc32139 1624 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1625
1626 ipr_err("Cache Directory Card Information:\n");
cfc32139 1627 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1628
1629 ipr_err("Adapter Card Information:\n");
cfc32139 1630 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1631
1632 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1633 be32_to_cpu(dev_entry->ioa_data[0]),
1634 be32_to_cpu(dev_entry->ioa_data[1]),
1635 be32_to_cpu(dev_entry->ioa_data[2]),
1636 be32_to_cpu(dev_entry->ioa_data[3]),
1637 be32_to_cpu(dev_entry->ioa_data[4]));
1638 }
1639}
1640
ee0f05b8
BK
1641/**
1642 * ipr_log_enhanced_array_error - Log an array configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int i, num_entries;
1653 struct ipr_hostrcb_type_14_error *error;
1654 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1655 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1656
1657 error = &hostrcb->hcam.u.error.u.type_14_error;
1658
1659 ipr_err_separator;
1660
1661 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1662 error->protection_level,
1663 ioa_cfg->host->host_no,
1664 error->last_func_vset_res_addr.bus,
1665 error->last_func_vset_res_addr.target,
1666 error->last_func_vset_res_addr.lun);
1667
1668 ipr_err_separator;
1669
1670 array_entry = error->array_member;
1671 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1672 sizeof(error->array_member));
1673
1674 for (i = 0; i < num_entries; i++, array_entry++) {
1675 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1676 continue;
1677
1678 if (be32_to_cpu(error->exposed_mode_adn) == i)
1679 ipr_err("Exposed Array Member %d:\n", i);
1680 else
1681 ipr_err("Array Member %d:\n", i);
1682
1683 ipr_log_ext_vpd(&array_entry->vpd);
1684 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1685 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1686 "Expected Location");
1687
1688 ipr_err_separator;
1689 }
1690}
1691
1da177e4
LT
1692/**
1693 * ipr_log_array_error - Log an array configuration error.
1694 * @ioa_cfg: ioa config struct
1695 * @hostrcb: hostrcb struct
1696 *
1697 * Return value:
1698 * none
1699 **/
1700static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1701 struct ipr_hostrcb *hostrcb)
1702{
1703 int i;
1704 struct ipr_hostrcb_type_04_error *error;
1705 struct ipr_hostrcb_array_data_entry *array_entry;
1706 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1707
1708 error = &hostrcb->hcam.u.error.u.type_04_error;
1709
1710 ipr_err_separator;
1711
1712 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1713 error->protection_level,
1714 ioa_cfg->host->host_no,
1715 error->last_func_vset_res_addr.bus,
1716 error->last_func_vset_res_addr.target,
1717 error->last_func_vset_res_addr.lun);
1718
1719 ipr_err_separator;
1720
1721 array_entry = error->array_member;
1722
1723 for (i = 0; i < 18; i++) {
cfc32139 1724 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1725 continue;
1726
fa15b1f6 1727 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1728 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1729 else
1da177e4 1730 ipr_err("Array Member %d:\n", i);
1da177e4 1731
cfc32139 1732 ipr_log_vpd(&array_entry->vpd);
1da177e4 1733
fa15b1f6
BK
1734 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1735 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1736 "Expected Location");
1da177e4
LT
1737
1738 ipr_err_separator;
1739
1740 if (i == 9)
1741 array_entry = error->array_member2;
1742 else
1743 array_entry++;
1744 }
1745}
1746
1747/**
b0df54bb 1748 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1749 * @ioa_cfg: ioa config struct
b0df54bb
BK
1750 * @data: IOA error data
1751 * @len: data length
1da177e4
LT
1752 *
1753 * Return value:
1754 * none
1755 **/
ac719aba 1756static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1757{
1758 int i;
1da177e4 1759
b0df54bb 1760 if (len == 0)
1da177e4
LT
1761 return;
1762
ac719aba
BK
1763 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1764 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1765
b0df54bb 1766 for (i = 0; i < len / 4; i += 4) {
1da177e4 1767 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1768 be32_to_cpu(data[i]),
1769 be32_to_cpu(data[i+1]),
1770 be32_to_cpu(data[i+2]),
1771 be32_to_cpu(data[i+3]));
1da177e4
LT
1772 }
1773}
1774
ee0f05b8
BK
1775/**
1776 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1777 * @ioa_cfg: ioa config struct
1778 * @hostrcb: hostrcb struct
1779 *
1780 * Return value:
1781 * none
1782 **/
1783static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1784 struct ipr_hostrcb *hostrcb)
1785{
1786 struct ipr_hostrcb_type_17_error *error;
1787
4565e370
WB
1788 if (ioa_cfg->sis64)
1789 error = &hostrcb->hcam.u.error64.u.type_17_error;
1790 else
1791 error = &hostrcb->hcam.u.error.u.type_17_error;
1792
ee0f05b8 1793 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1794 strim(error->failure_reason);
ee0f05b8 1795
8cf093e2
BK
1796 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1797 be32_to_cpu(hostrcb->hcam.u.error.prc));
1798 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1799 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1800 be32_to_cpu(hostrcb->hcam.length) -
1801 (offsetof(struct ipr_hostrcb_error, u) +
1802 offsetof(struct ipr_hostrcb_type_17_error, data)));
1803}
1804
b0df54bb
BK
1805/**
1806 * ipr_log_dual_ioa_error - Log a dual adapter error.
1807 * @ioa_cfg: ioa config struct
1808 * @hostrcb: hostrcb struct
1809 *
1810 * Return value:
1811 * none
1812 **/
1813static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1814 struct ipr_hostrcb *hostrcb)
1815{
1816 struct ipr_hostrcb_type_07_error *error;
1817
1818 error = &hostrcb->hcam.u.error.u.type_07_error;
1819 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1820 strim(error->failure_reason);
b0df54bb 1821
8cf093e2
BK
1822 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1823 be32_to_cpu(hostrcb->hcam.u.error.prc));
1824 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1825 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1826 be32_to_cpu(hostrcb->hcam.length) -
1827 (offsetof(struct ipr_hostrcb_error, u) +
1828 offsetof(struct ipr_hostrcb_type_07_error, data)));
1829}
1830
49dc6a18
BK
1831static const struct {
1832 u8 active;
1833 char *desc;
1834} path_active_desc[] = {
1835 { IPR_PATH_NO_INFO, "Path" },
1836 { IPR_PATH_ACTIVE, "Active path" },
1837 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1838};
1839
1840static const struct {
1841 u8 state;
1842 char *desc;
1843} path_state_desc[] = {
1844 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1845 { IPR_PATH_HEALTHY, "is healthy" },
1846 { IPR_PATH_DEGRADED, "is degraded" },
1847 { IPR_PATH_FAILED, "is failed" }
1848};
1849
1850/**
1851 * ipr_log_fabric_path - Log a fabric path error
1852 * @hostrcb: hostrcb struct
1853 * @fabric: fabric descriptor
1854 *
1855 * Return value:
1856 * none
1857 **/
1858static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1859 struct ipr_hostrcb_fabric_desc *fabric)
1860{
1861 int i, j;
1862 u8 path_state = fabric->path_state;
1863 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1864 u8 state = path_state & IPR_PATH_STATE_MASK;
1865
1866 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1867 if (path_active_desc[i].active != active)
1868 continue;
1869
1870 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1871 if (path_state_desc[j].state != state)
1872 continue;
1873
1874 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1875 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1876 path_active_desc[i].desc, path_state_desc[j].desc,
1877 fabric->ioa_port);
1878 } else if (fabric->cascaded_expander == 0xff) {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port, fabric->phy);
1882 } else if (fabric->phy == 0xff) {
1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1884 path_active_desc[i].desc, path_state_desc[j].desc,
1885 fabric->ioa_port, fabric->cascaded_expander);
1886 } else {
1887 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1888 path_active_desc[i].desc, path_state_desc[j].desc,
1889 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1890 }
1891 return;
1892 }
1893 }
1894
1895 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1896 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1897}
1898
4565e370
WB
1899/**
1900 * ipr_log64_fabric_path - Log a fabric path error
1901 * @hostrcb: hostrcb struct
1902 * @fabric: fabric descriptor
1903 *
1904 * Return value:
1905 * none
1906 **/
1907static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1908 struct ipr_hostrcb64_fabric_desc *fabric)
1909{
1910 int i, j;
1911 u8 path_state = fabric->path_state;
1912 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1913 u8 state = path_state & IPR_PATH_STATE_MASK;
1914 char buffer[IPR_MAX_RES_PATH_LENGTH];
1915
1916 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1917 if (path_active_desc[i].active != active)
1918 continue;
1919
1920 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1921 if (path_state_desc[j].state != state)
1922 continue;
1923
1924 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1925 path_active_desc[i].desc, path_state_desc[j].desc,
5adcbeb3
WB
1926 ipr_format_res_path(fabric->res_path, buffer,
1927 sizeof(buffer)));
4565e370
WB
1928 return;
1929 }
1930 }
1931
1932 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
5adcbeb3 1933 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
4565e370
WB
1934}
1935
49dc6a18
BK
1936static const struct {
1937 u8 type;
1938 char *desc;
1939} path_type_desc[] = {
1940 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1941 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1942 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1943 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1944};
1945
1946static const struct {
1947 u8 status;
1948 char *desc;
1949} path_status_desc[] = {
1950 { IPR_PATH_CFG_NO_PROB, "Functional" },
1951 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1952 { IPR_PATH_CFG_FAILED, "Failed" },
1953 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1954 { IPR_PATH_NOT_DETECTED, "Missing" },
1955 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1956};
1957
1958static const char *link_rate[] = {
1959 "unknown",
1960 "disabled",
1961 "phy reset problem",
1962 "spinup hold",
1963 "port selector",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "1.5Gbps",
1968 "3.0Gbps",
1969 "unknown",
1970 "unknown",
1971 "unknown",
1972 "unknown",
1973 "unknown",
1974 "unknown"
1975};
1976
1977/**
1978 * ipr_log_path_elem - Log a fabric path element.
1979 * @hostrcb: hostrcb struct
1980 * @cfg: fabric path element struct
1981 *
1982 * Return value:
1983 * none
1984 **/
1985static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1986 struct ipr_hostrcb_config_element *cfg)
1987{
1988 int i, j;
1989 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1991
1992 if (type == IPR_PATH_CFG_NOT_EXIST)
1993 return;
1994
1995 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1996 if (path_type_desc[i].type != type)
1997 continue;
1998
1999 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2000 if (path_status_desc[j].status != status)
2001 continue;
2002
2003 if (type == IPR_PATH_CFG_IOA_PORT) {
2004 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2007 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2008 } else {
2009 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2010 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2011 path_status_desc[j].desc, path_type_desc[i].desc,
2012 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2013 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014 } else if (cfg->cascaded_expander == 0xff) {
2015 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2016 "WWN=%08X%08X\n", path_status_desc[j].desc,
2017 path_type_desc[i].desc, cfg->phy,
2018 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2019 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2020 } else if (cfg->phy == 0xff) {
2021 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2022 "WWN=%08X%08X\n", path_status_desc[j].desc,
2023 path_type_desc[i].desc, cfg->cascaded_expander,
2024 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2025 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2026 } else {
2027 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2028 "WWN=%08X%08X\n", path_status_desc[j].desc,
2029 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2030 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2031 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2032 }
2033 }
2034 return;
2035 }
2036 }
2037
2038 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2039 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2040 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2041 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2042}
2043
4565e370
WB
2044/**
2045 * ipr_log64_path_elem - Log a fabric path element.
2046 * @hostrcb: hostrcb struct
2047 * @cfg: fabric path element struct
2048 *
2049 * Return value:
2050 * none
2051 **/
2052static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2053 struct ipr_hostrcb64_config_element *cfg)
2054{
2055 int i, j;
2056 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2057 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2058 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2059 char buffer[IPR_MAX_RES_PATH_LENGTH];
2060
2061 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2062 return;
2063
2064 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2065 if (path_type_desc[i].type != type)
2066 continue;
2067
2068 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2069 if (path_status_desc[j].status != status)
2070 continue;
2071
2072 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2073 path_status_desc[j].desc, path_type_desc[i].desc,
5adcbeb3
WB
2074 ipr_format_res_path(cfg->res_path, buffer,
2075 sizeof(buffer)),
4565e370
WB
2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078 return;
2079 }
2080 }
2081 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2082 "WWN=%08X%08X\n", cfg->type_status,
5adcbeb3 2083 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
4565e370
WB
2084 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2085 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2086}
2087
49dc6a18
BK
2088/**
2089 * ipr_log_fabric_error - Log a fabric error.
2090 * @ioa_cfg: ioa config struct
2091 * @hostrcb: hostrcb struct
2092 *
2093 * Return value:
2094 * none
2095 **/
2096static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2097 struct ipr_hostrcb *hostrcb)
2098{
2099 struct ipr_hostrcb_type_20_error *error;
2100 struct ipr_hostrcb_fabric_desc *fabric;
2101 struct ipr_hostrcb_config_element *cfg;
2102 int i, add_len;
2103
2104 error = &hostrcb->hcam.u.error.u.type_20_error;
2105 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2106 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2107
2108 add_len = be32_to_cpu(hostrcb->hcam.length) -
2109 (offsetof(struct ipr_hostrcb_error, u) +
2110 offsetof(struct ipr_hostrcb_type_20_error, desc));
2111
2112 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2113 ipr_log_fabric_path(hostrcb, fabric);
2114 for_each_fabric_cfg(fabric, cfg)
2115 ipr_log_path_elem(hostrcb, cfg);
2116
2117 add_len -= be16_to_cpu(fabric->length);
2118 fabric = (struct ipr_hostrcb_fabric_desc *)
2119 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2120 }
2121
ac719aba 2122 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2123}
2124
4565e370
WB
2125/**
2126 * ipr_log_sis64_array_error - Log a sis64 array error.
2127 * @ioa_cfg: ioa config struct
2128 * @hostrcb: hostrcb struct
2129 *
2130 * Return value:
2131 * none
2132 **/
2133static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2134 struct ipr_hostrcb *hostrcb)
2135{
2136 int i, num_entries;
2137 struct ipr_hostrcb_type_24_error *error;
2138 struct ipr_hostrcb64_array_data_entry *array_entry;
2139 char buffer[IPR_MAX_RES_PATH_LENGTH];
2140 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2141
2142 error = &hostrcb->hcam.u.error64.u.type_24_error;
2143
2144 ipr_err_separator;
2145
2146 ipr_err("RAID %s Array Configuration: %s\n",
2147 error->protection_level,
5adcbeb3 2148 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
4565e370
WB
2149
2150 ipr_err_separator;
2151
2152 array_entry = error->array_member;
2153 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2154 sizeof(error->array_member));
2155
2156 for (i = 0; i < num_entries; i++, array_entry++) {
2157
2158 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2159 continue;
2160
2161 if (error->exposed_mode_adn == i)
2162 ipr_err("Exposed Array Member %d:\n", i);
2163 else
2164 ipr_err("Array Member %d:\n", i);
2165
2166 ipr_err("Array Member %d:\n", i);
2167 ipr_log_ext_vpd(&array_entry->vpd);
2168 ipr_err("Current Location: %s",
5adcbeb3
WB
2169 ipr_format_res_path(array_entry->res_path, buffer,
2170 sizeof(buffer)));
4565e370 2171 ipr_err("Expected Location: %s",
5adcbeb3
WB
2172 ipr_format_res_path(array_entry->expected_res_path,
2173 buffer, sizeof(buffer)));
4565e370
WB
2174
2175 ipr_err_separator;
2176 }
2177}
2178
2179/**
2180 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2181 * @ioa_cfg: ioa config struct
2182 * @hostrcb: hostrcb struct
2183 *
2184 * Return value:
2185 * none
2186 **/
2187static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2188 struct ipr_hostrcb *hostrcb)
2189{
2190 struct ipr_hostrcb_type_30_error *error;
2191 struct ipr_hostrcb64_fabric_desc *fabric;
2192 struct ipr_hostrcb64_config_element *cfg;
2193 int i, add_len;
2194
2195 error = &hostrcb->hcam.u.error64.u.type_30_error;
2196
2197 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2198 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2199
2200 add_len = be32_to_cpu(hostrcb->hcam.length) -
2201 (offsetof(struct ipr_hostrcb64_error, u) +
2202 offsetof(struct ipr_hostrcb_type_30_error, desc));
2203
2204 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2205 ipr_log64_fabric_path(hostrcb, fabric);
2206 for_each_fabric_cfg(fabric, cfg)
2207 ipr_log64_path_elem(hostrcb, cfg);
2208
2209 add_len -= be16_to_cpu(fabric->length);
2210 fabric = (struct ipr_hostrcb64_fabric_desc *)
2211 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2212 }
2213
2214 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2215}
2216
b0df54bb
BK
2217/**
2218 * ipr_log_generic_error - Log an adapter error.
2219 * @ioa_cfg: ioa config struct
2220 * @hostrcb: hostrcb struct
2221 *
2222 * Return value:
2223 * none
2224 **/
2225static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2226 struct ipr_hostrcb *hostrcb)
2227{
ac719aba 2228 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2229 be32_to_cpu(hostrcb->hcam.length));
2230}
2231
1da177e4
LT
2232/**
2233 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2234 * @ioasc: IOASC
2235 *
2236 * This function will return the index of into the ipr_error_table
2237 * for the specified IOASC. If the IOASC is not in the table,
2238 * 0 will be returned, which points to the entry used for unknown errors.
2239 *
2240 * Return value:
2241 * index into the ipr_error_table
2242 **/
2243static u32 ipr_get_error(u32 ioasc)
2244{
2245 int i;
2246
2247 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2248 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2249 return i;
2250
2251 return 0;
2252}
2253
2254/**
2255 * ipr_handle_log_data - Log an adapter error.
2256 * @ioa_cfg: ioa config struct
2257 * @hostrcb: hostrcb struct
2258 *
2259 * This function logs an adapter error to the system.
2260 *
2261 * Return value:
2262 * none
2263 **/
2264static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2265 struct ipr_hostrcb *hostrcb)
2266{
2267 u32 ioasc;
2268 int error_index;
2269
2270 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2271 return;
2272
2273 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2274 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2275
4565e370
WB
2276 if (ioa_cfg->sis64)
2277 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2278 else
2279 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2280
4565e370
WB
2281 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2282 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2283 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2284 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2285 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2286 }
2287
2288 error_index = ipr_get_error(ioasc);
2289
2290 if (!ipr_error_table[error_index].log_hcam)
2291 return;
2292
49dc6a18 2293 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2294
2295 /* Set indication we have logged an error */
2296 ioa_cfg->errors_logged++;
2297
933916f3 2298 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2299 return;
cf852037
BK
2300 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2301 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2302
2303 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2304 case IPR_HOST_RCB_OVERLAY_ID_2:
2305 ipr_log_cache_error(ioa_cfg, hostrcb);
2306 break;
2307 case IPR_HOST_RCB_OVERLAY_ID_3:
2308 ipr_log_config_error(ioa_cfg, hostrcb);
2309 break;
2310 case IPR_HOST_RCB_OVERLAY_ID_4:
2311 case IPR_HOST_RCB_OVERLAY_ID_6:
2312 ipr_log_array_error(ioa_cfg, hostrcb);
2313 break;
b0df54bb
BK
2314 case IPR_HOST_RCB_OVERLAY_ID_7:
2315 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2316 break;
ee0f05b8
BK
2317 case IPR_HOST_RCB_OVERLAY_ID_12:
2318 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2319 break;
2320 case IPR_HOST_RCB_OVERLAY_ID_13:
2321 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2322 break;
2323 case IPR_HOST_RCB_OVERLAY_ID_14:
2324 case IPR_HOST_RCB_OVERLAY_ID_16:
2325 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_17:
2328 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2329 break;
49dc6a18
BK
2330 case IPR_HOST_RCB_OVERLAY_ID_20:
2331 ipr_log_fabric_error(ioa_cfg, hostrcb);
2332 break;
4565e370
WB
2333 case IPR_HOST_RCB_OVERLAY_ID_23:
2334 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2335 break;
2336 case IPR_HOST_RCB_OVERLAY_ID_24:
2337 case IPR_HOST_RCB_OVERLAY_ID_26:
2338 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2339 break;
2340 case IPR_HOST_RCB_OVERLAY_ID_30:
2341 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2342 break;
cf852037 2343 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2344 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2345 default:
a9cfca96 2346 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2347 break;
2348 }
2349}
2350
2351/**
2352 * ipr_process_error - Op done function for an adapter error log.
2353 * @ipr_cmd: ipr command struct
2354 *
2355 * This function is the op done function for an error log host
2356 * controlled async from the adapter. It will log the error and
2357 * send the HCAM back to the adapter.
2358 *
2359 * Return value:
2360 * none
2361 **/
2362static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2363{
2364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2365 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2366 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2367 u32 fd_ioasc;
2368
2369 if (ioa_cfg->sis64)
2370 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2371 else
2372 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2373
2374 list_del(&hostrcb->queue);
2375 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2376
2377 if (!ioasc) {
2378 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2379 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2381 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2382 dev_err(&ioa_cfg->pdev->dev,
2383 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2384 }
2385
2386 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2387}
2388
2389/**
2390 * ipr_timeout - An internally generated op has timed out.
2391 * @ipr_cmd: ipr command struct
2392 *
2393 * This function blocks host requests and initiates an
2394 * adapter reset.
2395 *
2396 * Return value:
2397 * none
2398 **/
2399static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2400{
2401 unsigned long lock_flags = 0;
2402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2403
2404 ENTER;
2405 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2406
2407 ioa_cfg->errors_logged++;
2408 dev_err(&ioa_cfg->pdev->dev,
2409 "Adapter being reset due to command timeout.\n");
2410
2411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2412 ioa_cfg->sdt_state = GET_DUMP;
2413
2414 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2416
2417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 LEAVE;
2419}
2420
2421/**
2422 * ipr_oper_timeout - Adapter timed out transitioning to operational
2423 * @ipr_cmd: ipr command struct
2424 *
2425 * This function blocks host requests and initiates an
2426 * adapter reset.
2427 *
2428 * Return value:
2429 * none
2430 **/
2431static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2432{
2433 unsigned long lock_flags = 0;
2434 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2435
2436 ENTER;
2437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2438
2439 ioa_cfg->errors_logged++;
2440 dev_err(&ioa_cfg->pdev->dev,
2441 "Adapter timed out transitioning to operational.\n");
2442
2443 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2444 ioa_cfg->sdt_state = GET_DUMP;
2445
2446 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2447 if (ipr_fastfail)
2448 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2450 }
2451
2452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2453 LEAVE;
2454}
2455
2456/**
2457 * ipr_reset_reload - Reset/Reload the IOA
2458 * @ioa_cfg: ioa config struct
2459 * @shutdown_type: shutdown type
2460 *
2461 * This function resets the adapter and re-initializes it.
2462 * This function assumes that all new host commands have been stopped.
2463 * Return value:
2464 * SUCCESS / FAILED
2465 **/
2466static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2467 enum ipr_shutdown_type shutdown_type)
2468{
2469 if (!ioa_cfg->in_reset_reload)
2470 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2471
2472 spin_unlock_irq(ioa_cfg->host->host_lock);
2473 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474 spin_lock_irq(ioa_cfg->host->host_lock);
2475
2476 /* If we got hit with a host reset while we were already resetting
2477 the adapter for some reason, and the reset failed. */
2478 if (ioa_cfg->ioa_is_dead) {
2479 ipr_trace;
2480 return FAILED;
2481 }
2482
2483 return SUCCESS;
2484}
2485
2486/**
2487 * ipr_find_ses_entry - Find matching SES in SES table
2488 * @res: resource entry struct of SES
2489 *
2490 * Return value:
2491 * pointer to SES table entry / NULL on failure
2492 **/
2493static const struct ipr_ses_table_entry *
2494ipr_find_ses_entry(struct ipr_resource_entry *res)
2495{
2496 int i, j, matches;
3e7ebdfa 2497 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2498 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2499
2500 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2501 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2502 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2503 vpids = &res->std_inq_data.vpids;
2504 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2505 matches++;
2506 else
2507 break;
2508 } else
2509 matches++;
2510 }
2511
2512 if (matches == IPR_PROD_ID_LEN)
2513 return ste;
2514 }
2515
2516 return NULL;
2517}
2518
2519/**
2520 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2521 * @ioa_cfg: ioa config struct
2522 * @bus: SCSI bus
2523 * @bus_width: bus width
2524 *
2525 * Return value:
2526 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2527 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2528 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2529 * max 160MHz = max 320MB/sec).
2530 **/
2531static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2532{
2533 struct ipr_resource_entry *res;
2534 const struct ipr_ses_table_entry *ste;
2535 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2536
2537 /* Loop through each config table entry in the config table buffer */
2538 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2539 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2540 continue;
2541
3e7ebdfa 2542 if (bus != res->bus)
1da177e4
LT
2543 continue;
2544
2545 if (!(ste = ipr_find_ses_entry(res)))
2546 continue;
2547
2548 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2549 }
2550
2551 return max_xfer_rate;
2552}
2553
2554/**
2555 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2556 * @ioa_cfg: ioa config struct
2557 * @max_delay: max delay in micro-seconds to wait
2558 *
2559 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2560 *
2561 * Return value:
2562 * 0 on success / other on failure
2563 **/
2564static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2565{
2566 volatile u32 pcii_reg;
2567 int delay = 1;
2568
2569 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2570 while (delay < max_delay) {
2571 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2572
2573 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2574 return 0;
2575
2576 /* udelay cannot be used if delay is more than a few milliseconds */
2577 if ((delay / 1000) > MAX_UDELAY_MS)
2578 mdelay(delay / 1000);
2579 else
2580 udelay(delay);
2581
2582 delay += delay;
2583 }
2584 return -EIO;
2585}
2586
dcbad00e
WB
2587/**
2588 * ipr_get_sis64_dump_data_section - Dump IOA memory
2589 * @ioa_cfg: ioa config struct
2590 * @start_addr: adapter address to dump
2591 * @dest: destination kernel buffer
2592 * @length_in_words: length to dump in 4 byte words
2593 *
2594 * Return value:
2595 * 0 on success
2596 **/
2597static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2598 u32 start_addr,
2599 __be32 *dest, u32 length_in_words)
2600{
2601 int i;
2602
2603 for (i = 0; i < length_in_words; i++) {
2604 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2605 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2606 dest++;
2607 }
2608
2609 return 0;
2610}
2611
1da177e4
LT
2612/**
2613 * ipr_get_ldump_data_section - Dump IOA memory
2614 * @ioa_cfg: ioa config struct
2615 * @start_addr: adapter address to dump
2616 * @dest: destination kernel buffer
2617 * @length_in_words: length to dump in 4 byte words
2618 *
2619 * Return value:
2620 * 0 on success / -EIO on failure
2621 **/
2622static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2623 u32 start_addr,
2624 __be32 *dest, u32 length_in_words)
2625{
2626 volatile u32 temp_pcii_reg;
2627 int i, delay = 0;
2628
dcbad00e
WB
2629 if (ioa_cfg->sis64)
2630 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2631 dest, length_in_words);
2632
1da177e4
LT
2633 /* Write IOA interrupt reg starting LDUMP state */
2634 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2635 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2636
2637 /* Wait for IO debug acknowledge */
2638 if (ipr_wait_iodbg_ack(ioa_cfg,
2639 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2640 dev_err(&ioa_cfg->pdev->dev,
2641 "IOA dump long data transfer timeout\n");
2642 return -EIO;
2643 }
2644
2645 /* Signal LDUMP interlocked - clear IO debug ack */
2646 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2647 ioa_cfg->regs.clr_interrupt_reg);
2648
2649 /* Write Mailbox with starting address */
2650 writel(start_addr, ioa_cfg->ioa_mailbox);
2651
2652 /* Signal address valid - clear IOA Reset alert */
2653 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2654 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2655
2656 for (i = 0; i < length_in_words; i++) {
2657 /* Wait for IO debug acknowledge */
2658 if (ipr_wait_iodbg_ack(ioa_cfg,
2659 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2660 dev_err(&ioa_cfg->pdev->dev,
2661 "IOA dump short data transfer timeout\n");
2662 return -EIO;
2663 }
2664
2665 /* Read data from mailbox and increment destination pointer */
2666 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2667 dest++;
2668
2669 /* For all but the last word of data, signal data received */
2670 if (i < (length_in_words - 1)) {
2671 /* Signal dump data received - Clear IO debug Ack */
2672 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2673 ioa_cfg->regs.clr_interrupt_reg);
2674 }
2675 }
2676
2677 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2678 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2679 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2680
2681 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2682 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2683
2684 /* Signal dump data received - Clear IO debug Ack */
2685 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2686 ioa_cfg->regs.clr_interrupt_reg);
2687
2688 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2689 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2690 temp_pcii_reg =
214777ba 2691 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2692
2693 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2694 return 0;
2695
2696 udelay(10);
2697 delay += 10;
2698 }
2699
2700 return 0;
2701}
2702
2703#ifdef CONFIG_SCSI_IPR_DUMP
2704/**
2705 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2706 * @ioa_cfg: ioa config struct
2707 * @pci_address: adapter address
2708 * @length: length of data to copy
2709 *
2710 * Copy data from PCI adapter to kernel buffer.
2711 * Note: length MUST be a 4 byte multiple
2712 * Return value:
2713 * 0 on success / other on failure
2714 **/
2715static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2716 unsigned long pci_address, u32 length)
2717{
2718 int bytes_copied = 0;
2719 int cur_len, rc, rem_len, rem_page_len;
2720 __be32 *page;
2721 unsigned long lock_flags = 0;
2722 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2723
2724 while (bytes_copied < length &&
2725 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2726 if (ioa_dump->page_offset >= PAGE_SIZE ||
2727 ioa_dump->page_offset == 0) {
2728 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2729
2730 if (!page) {
2731 ipr_trace;
2732 return bytes_copied;
2733 }
2734
2735 ioa_dump->page_offset = 0;
2736 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2737 ioa_dump->next_page_index++;
2738 } else
2739 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2740
2741 rem_len = length - bytes_copied;
2742 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2743 cur_len = min(rem_len, rem_page_len);
2744
2745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2746 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2747 rc = -EIO;
2748 } else {
2749 rc = ipr_get_ldump_data_section(ioa_cfg,
2750 pci_address + bytes_copied,
2751 &page[ioa_dump->page_offset / 4],
2752 (cur_len / sizeof(u32)));
2753 }
2754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2755
2756 if (!rc) {
2757 ioa_dump->page_offset += cur_len;
2758 bytes_copied += cur_len;
2759 } else {
2760 ipr_trace;
2761 break;
2762 }
2763 schedule();
2764 }
2765
2766 return bytes_copied;
2767}
2768
2769/**
2770 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2771 * @hdr: dump entry header struct
2772 *
2773 * Return value:
2774 * nothing
2775 **/
2776static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2777{
2778 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2779 hdr->num_elems = 1;
2780 hdr->offset = sizeof(*hdr);
2781 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2782}
2783
2784/**
2785 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2786 * @ioa_cfg: ioa config struct
2787 * @driver_dump: driver dump struct
2788 *
2789 * Return value:
2790 * nothing
2791 **/
2792static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2793 struct ipr_driver_dump *driver_dump)
2794{
2795 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2796
2797 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2798 driver_dump->ioa_type_entry.hdr.len =
2799 sizeof(struct ipr_dump_ioa_type_entry) -
2800 sizeof(struct ipr_dump_entry_header);
2801 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2802 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2803 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2804 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2805 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2806 ucode_vpd->minor_release[1];
2807 driver_dump->hdr.num_entries++;
2808}
2809
2810/**
2811 * ipr_dump_version_data - Fill in the driver version in the dump.
2812 * @ioa_cfg: ioa config struct
2813 * @driver_dump: driver dump struct
2814 *
2815 * Return value:
2816 * nothing
2817 **/
2818static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2819 struct ipr_driver_dump *driver_dump)
2820{
2821 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2822 driver_dump->version_entry.hdr.len =
2823 sizeof(struct ipr_dump_version_entry) -
2824 sizeof(struct ipr_dump_entry_header);
2825 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2826 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2827 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2828 driver_dump->hdr.num_entries++;
2829}
2830
2831/**
2832 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2833 * @ioa_cfg: ioa config struct
2834 * @driver_dump: driver dump struct
2835 *
2836 * Return value:
2837 * nothing
2838 **/
2839static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2840 struct ipr_driver_dump *driver_dump)
2841{
2842 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2843 driver_dump->trace_entry.hdr.len =
2844 sizeof(struct ipr_dump_trace_entry) -
2845 sizeof(struct ipr_dump_entry_header);
2846 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2847 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2848 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2849 driver_dump->hdr.num_entries++;
2850}
2851
2852/**
2853 * ipr_dump_location_data - Fill in the IOA location in the dump.
2854 * @ioa_cfg: ioa config struct
2855 * @driver_dump: driver dump struct
2856 *
2857 * Return value:
2858 * nothing
2859 **/
2860static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2861 struct ipr_driver_dump *driver_dump)
2862{
2863 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2864 driver_dump->location_entry.hdr.len =
2865 sizeof(struct ipr_dump_location_entry) -
2866 sizeof(struct ipr_dump_entry_header);
2867 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2869 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2870 driver_dump->hdr.num_entries++;
2871}
2872
2873/**
2874 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2875 * @ioa_cfg: ioa config struct
2876 * @dump: dump struct
2877 *
2878 * Return value:
2879 * nothing
2880 **/
2881static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2882{
2883 unsigned long start_addr, sdt_word;
2884 unsigned long lock_flags = 0;
2885 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2886 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2887 u32 num_entries, start_off, end_off;
2888 u32 bytes_to_copy, bytes_copied, rc;
2889 struct ipr_sdt *sdt;
dcbad00e 2890 int valid = 1;
1da177e4
LT
2891 int i;
2892
2893 ENTER;
2894
2895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2896
2897 if (ioa_cfg->sdt_state != GET_DUMP) {
2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 return;
2900 }
2901
2902 start_addr = readl(ioa_cfg->ioa_mailbox);
2903
dcbad00e 2904 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "Invalid dump table format: %lx\n", start_addr);
2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908 return;
2909 }
2910
2911 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2912
2913 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2914
2915 /* Initialize the overall dump header */
2916 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2917 driver_dump->hdr.num_entries = 1;
2918 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2919 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2920 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2921 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2922
2923 ipr_dump_version_data(ioa_cfg, driver_dump);
2924 ipr_dump_location_data(ioa_cfg, driver_dump);
2925 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2926 ipr_dump_trace_data(ioa_cfg, driver_dump);
2927
2928 /* Update dump_header */
2929 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2930
2931 /* IOA Dump entry */
2932 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2933 ioa_dump->hdr.len = 0;
2934 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2935 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2936
2937 /* First entries in sdt are actually a list of dump addresses and
2938 lengths to gather the real dump data. sdt represents the pointer
2939 to the ioa generated dump table. Dump data will be extracted based
2940 on entries in this table */
2941 sdt = &ioa_dump->sdt;
2942
2943 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2944 sizeof(struct ipr_sdt) / sizeof(__be32));
2945
2946 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2947 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2948 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2949 dev_err(&ioa_cfg->pdev->dev,
2950 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2951 rc, be32_to_cpu(sdt->hdr.state));
2952 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2953 ioa_cfg->sdt_state = DUMP_OBTAINED;
2954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955 return;
2956 }
2957
2958 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2959
2960 if (num_entries > IPR_NUM_SDT_ENTRIES)
2961 num_entries = IPR_NUM_SDT_ENTRIES;
2962
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964
2965 for (i = 0; i < num_entries; i++) {
2966 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2967 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2968 break;
2969 }
2970
2971 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2972 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2973 if (ioa_cfg->sis64)
2974 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2975 else {
2976 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2977 end_off = be32_to_cpu(sdt->entry[i].end_token);
2978
2979 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2980 bytes_to_copy = end_off - start_off;
2981 else
2982 valid = 0;
2983 }
2984 if (valid) {
1da177e4
LT
2985 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2986 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2987 continue;
2988 }
2989
2990 /* Copy data from adapter to driver buffers */
2991 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2992 bytes_to_copy);
2993
2994 ioa_dump->hdr.len += bytes_copied;
2995
2996 if (bytes_copied != bytes_to_copy) {
2997 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2998 break;
2999 }
3000 }
3001 }
3002 }
3003
3004 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3005
3006 /* Update dump_header */
3007 driver_dump->hdr.len += ioa_dump->hdr.len;
3008 wmb();
3009 ioa_cfg->sdt_state = DUMP_OBTAINED;
3010 LEAVE;
3011}
3012
3013#else
3014#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3015#endif
3016
3017/**
3018 * ipr_release_dump - Free adapter dump memory
3019 * @kref: kref struct
3020 *
3021 * Return value:
3022 * nothing
3023 **/
3024static void ipr_release_dump(struct kref *kref)
3025{
3026 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3027 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3028 unsigned long lock_flags = 0;
3029 int i;
3030
3031 ENTER;
3032 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3033 ioa_cfg->dump = NULL;
3034 ioa_cfg->sdt_state = INACTIVE;
3035 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3036
3037 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3038 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3039
3040 kfree(dump);
3041 LEAVE;
3042}
3043
3044/**
3045 * ipr_worker_thread - Worker thread
c4028958 3046 * @work: ioa config struct
1da177e4
LT
3047 *
3048 * Called at task level from a work thread. This function takes care
3049 * of adding and removing device from the mid-layer as configuration
3050 * changes are detected by the adapter.
3051 *
3052 * Return value:
3053 * nothing
3054 **/
c4028958 3055static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3056{
3057 unsigned long lock_flags;
3058 struct ipr_resource_entry *res;
3059 struct scsi_device *sdev;
3060 struct ipr_dump *dump;
c4028958
DH
3061 struct ipr_ioa_cfg *ioa_cfg =
3062 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3063 u8 bus, target, lun;
3064 int did_work;
3065
3066 ENTER;
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068
3069 if (ioa_cfg->sdt_state == GET_DUMP) {
3070 dump = ioa_cfg->dump;
3071 if (!dump) {
3072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3073 return;
3074 }
3075 kref_get(&dump->kref);
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 ipr_get_ioa_dump(ioa_cfg, dump);
3078 kref_put(&dump->kref, ipr_release_dump);
3079
3080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3081 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3082 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 return;
3085 }
3086
3087restart:
3088 do {
3089 did_work = 0;
3090 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 return;
3093 }
3094
3095 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3096 if (res->del_from_ml && res->sdev) {
3097 did_work = 1;
3098 sdev = res->sdev;
3099 if (!scsi_device_get(sdev)) {
1da177e4
LT
3100 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102 scsi_remove_device(sdev);
3103 scsi_device_put(sdev);
3104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3105 }
3106 break;
3107 }
3108 }
3109 } while(did_work);
3110
3111 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3112 if (res->add_to_ml) {
3e7ebdfa
WB
3113 bus = res->bus;
3114 target = res->target;
3115 lun = res->lun;
1121b794 3116 res->add_to_ml = 0;
1da177e4
LT
3117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3118 scsi_add_device(ioa_cfg->host, bus, target, lun);
3119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3120 goto restart;
3121 }
3122 }
3123
3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3125 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3126 LEAVE;
3127}
3128
3129#ifdef CONFIG_SCSI_IPR_TRACE
3130/**
3131 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3132 * @filp: open sysfs file
1da177e4 3133 * @kobj: kobject struct
91a69029 3134 * @bin_attr: bin_attribute struct
1da177e4
LT
3135 * @buf: buffer
3136 * @off: offset
3137 * @count: buffer size
3138 *
3139 * Return value:
3140 * number of bytes printed to buffer
3141 **/
2c3c8bea 3142static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3143 struct bin_attribute *bin_attr,
3144 char *buf, loff_t off, size_t count)
1da177e4 3145{
ee959b00
TJ
3146 struct device *dev = container_of(kobj, struct device, kobj);
3147 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3148 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3149 unsigned long lock_flags = 0;
d777aaf3 3150 ssize_t ret;
1da177e4
LT
3151
3152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3153 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3154 IPR_TRACE_SIZE);
1da177e4 3155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3156
3157 return ret;
1da177e4
LT
3158}
3159
3160static struct bin_attribute ipr_trace_attr = {
3161 .attr = {
3162 .name = "trace",
3163 .mode = S_IRUGO,
3164 },
3165 .size = 0,
3166 .read = ipr_read_trace,
3167};
3168#endif
3169
3170/**
3171 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3172 * @dev: class device struct
3173 * @buf: buffer
1da177e4
LT
3174 *
3175 * Return value:
3176 * number of bytes printed to buffer
3177 **/
ee959b00
TJ
3178static ssize_t ipr_show_fw_version(struct device *dev,
3179 struct device_attribute *attr, char *buf)
1da177e4 3180{
ee959b00 3181 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3182 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3183 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3184 unsigned long lock_flags = 0;
3185 int len;
3186
3187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3189 ucode_vpd->major_release, ucode_vpd->card_type,
3190 ucode_vpd->minor_release[0],
3191 ucode_vpd->minor_release[1]);
3192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193 return len;
3194}
3195
ee959b00 3196static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3197 .attr = {
3198 .name = "fw_version",
3199 .mode = S_IRUGO,
3200 },
3201 .show = ipr_show_fw_version,
3202};
3203
3204/**
3205 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3206 * @dev: class device struct
3207 * @buf: buffer
1da177e4
LT
3208 *
3209 * Return value:
3210 * number of bytes printed to buffer
3211 **/
ee959b00
TJ
3212static ssize_t ipr_show_log_level(struct device *dev,
3213 struct device_attribute *attr, char *buf)
1da177e4 3214{
ee959b00 3215 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3216 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3217 unsigned long lock_flags = 0;
3218 int len;
3219
3220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3221 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 return len;
3224}
3225
3226/**
3227 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3228 * @dev: class device struct
3229 * @buf: buffer
1da177e4
LT
3230 *
3231 * Return value:
3232 * number of bytes printed to buffer
3233 **/
ee959b00
TJ
3234static ssize_t ipr_store_log_level(struct device *dev,
3235 struct device_attribute *attr,
1da177e4
LT
3236 const char *buf, size_t count)
3237{
ee959b00 3238 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3239 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3240 unsigned long lock_flags = 0;
3241
3242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245 return strlen(buf);
3246}
3247
ee959b00 3248static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3249 .attr = {
3250 .name = "log_level",
3251 .mode = S_IRUGO | S_IWUSR,
3252 },
3253 .show = ipr_show_log_level,
3254 .store = ipr_store_log_level
3255};
3256
3257/**
3258 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3259 * @dev: device struct
3260 * @buf: buffer
3261 * @count: buffer size
1da177e4
LT
3262 *
3263 * This function will reset the adapter and wait a reasonable
3264 * amount of time for any errors that the adapter might log.
3265 *
3266 * Return value:
3267 * count on success / other on failure
3268 **/
ee959b00
TJ
3269static ssize_t ipr_store_diagnostics(struct device *dev,
3270 struct device_attribute *attr,
1da177e4
LT
3271 const char *buf, size_t count)
3272{
ee959b00 3273 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3274 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3275 unsigned long lock_flags = 0;
3276 int rc = count;
3277
3278 if (!capable(CAP_SYS_ADMIN))
3279 return -EACCES;
3280
1da177e4 3281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3282 while(ioa_cfg->in_reset_reload) {
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3285 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286 }
3287
1da177e4
LT
3288 ioa_cfg->errors_logged = 0;
3289 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3290
3291 if (ioa_cfg->in_reset_reload) {
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3294
3295 /* Wait for a second for any errors to be logged */
3296 msleep(1000);
3297 } else {
3298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299 return -EIO;
3300 }
3301
3302 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3303 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3304 rc = -EIO;
3305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3306
3307 return rc;
3308}
3309
ee959b00 3310static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3311 .attr = {
3312 .name = "run_diagnostics",
3313 .mode = S_IWUSR,
3314 },
3315 .store = ipr_store_diagnostics
3316};
3317
f37eb54b
BK
3318/**
3319 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3320 * @class_dev: device struct
3321 * @buf: buffer
f37eb54b
BK
3322 *
3323 * Return value:
3324 * number of bytes printed to buffer
3325 **/
ee959b00
TJ
3326static ssize_t ipr_show_adapter_state(struct device *dev,
3327 struct device_attribute *attr, char *buf)
f37eb54b 3328{
ee959b00 3329 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3330 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3331 unsigned long lock_flags = 0;
3332 int len;
3333
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335 if (ioa_cfg->ioa_is_dead)
3336 len = snprintf(buf, PAGE_SIZE, "offline\n");
3337 else
3338 len = snprintf(buf, PAGE_SIZE, "online\n");
3339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340 return len;
3341}
3342
3343/**
3344 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3345 * @dev: device struct
3346 * @buf: buffer
3347 * @count: buffer size
f37eb54b
BK
3348 *
3349 * This function will change the adapter's state.
3350 *
3351 * Return value:
3352 * count on success / other on failure
3353 **/
ee959b00
TJ
3354static ssize_t ipr_store_adapter_state(struct device *dev,
3355 struct device_attribute *attr,
f37eb54b
BK
3356 const char *buf, size_t count)
3357{
ee959b00 3358 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags;
3361 int result = count;
3362
3363 if (!capable(CAP_SYS_ADMIN))
3364 return -EACCES;
3365
3366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3368 ioa_cfg->ioa_is_dead = 0;
3369 ioa_cfg->reset_retries = 0;
3370 ioa_cfg->in_ioa_bringdown = 0;
3371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3372 }
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3375
3376 return result;
3377}
3378
ee959b00 3379static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3380 .attr = {
49dd0961 3381 .name = "online_state",
f37eb54b
BK
3382 .mode = S_IRUGO | S_IWUSR,
3383 },
3384 .show = ipr_show_adapter_state,
3385 .store = ipr_store_adapter_state
3386};
3387
1da177e4
LT
3388/**
3389 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3390 * @dev: device struct
3391 * @buf: buffer
3392 * @count: buffer size
1da177e4
LT
3393 *
3394 * This function will reset the adapter.
3395 *
3396 * Return value:
3397 * count on success / other on failure
3398 **/
ee959b00
TJ
3399static ssize_t ipr_store_reset_adapter(struct device *dev,
3400 struct device_attribute *attr,
1da177e4
LT
3401 const char *buf, size_t count)
3402{
ee959b00 3403 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags;
3406 int result = count;
3407
3408 if (!capable(CAP_SYS_ADMIN))
3409 return -EACCES;
3410
3411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3412 if (!ioa_cfg->in_reset_reload)
3413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3416
3417 return result;
3418}
3419
ee959b00 3420static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3421 .attr = {
3422 .name = "reset_host",
3423 .mode = S_IWUSR,
3424 },
3425 .store = ipr_store_reset_adapter
3426};
3427
3428/**
3429 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3430 * @buf_len: buffer length
3431 *
3432 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3433 * list to use for microcode download
3434 *
3435 * Return value:
3436 * pointer to sglist / NULL on failure
3437 **/
3438static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3439{
3440 int sg_size, order, bsize_elem, num_elem, i, j;
3441 struct ipr_sglist *sglist;
3442 struct scatterlist *scatterlist;
3443 struct page *page;
3444
3445 /* Get the minimum size per scatter/gather element */
3446 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3447
3448 /* Get the actual size per element */
3449 order = get_order(sg_size);
3450
3451 /* Determine the actual number of bytes per element */
3452 bsize_elem = PAGE_SIZE * (1 << order);
3453
3454 /* Determine the actual number of sg entries needed */
3455 if (buf_len % bsize_elem)
3456 num_elem = (buf_len / bsize_elem) + 1;
3457 else
3458 num_elem = buf_len / bsize_elem;
3459
3460 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3461 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3462 (sizeof(struct scatterlist) * (num_elem - 1)),
3463 GFP_KERNEL);
3464
3465 if (sglist == NULL) {
3466 ipr_trace;
3467 return NULL;
3468 }
3469
1da177e4 3470 scatterlist = sglist->scatterlist;
45711f1a 3471 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3472
3473 sglist->order = order;
3474 sglist->num_sg = num_elem;
3475
3476 /* Allocate a bunch of sg elements */
3477 for (i = 0; i < num_elem; i++) {
3478 page = alloc_pages(GFP_KERNEL, order);
3479 if (!page) {
3480 ipr_trace;
3481
3482 /* Free up what we already allocated */
3483 for (j = i - 1; j >= 0; j--)
45711f1a 3484 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3485 kfree(sglist);
3486 return NULL;
3487 }
3488
642f1490 3489 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3490 }
3491
3492 return sglist;
3493}
3494
3495/**
3496 * ipr_free_ucode_buffer - Frees a microcode download buffer
3497 * @p_dnld: scatter/gather list pointer
3498 *
3499 * Free a DMA'able ucode download buffer previously allocated with
3500 * ipr_alloc_ucode_buffer
3501 *
3502 * Return value:
3503 * nothing
3504 **/
3505static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3506{
3507 int i;
3508
3509 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3510 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3511
3512 kfree(sglist);
3513}
3514
3515/**
3516 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3517 * @sglist: scatter/gather list pointer
3518 * @buffer: buffer pointer
3519 * @len: buffer length
3520 *
3521 * Copy a microcode image from a user buffer into a buffer allocated by
3522 * ipr_alloc_ucode_buffer
3523 *
3524 * Return value:
3525 * 0 on success / other on failure
3526 **/
3527static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3528 u8 *buffer, u32 len)
3529{
3530 int bsize_elem, i, result = 0;
3531 struct scatterlist *scatterlist;
3532 void *kaddr;
3533
3534 /* Determine the actual number of bytes per element */
3535 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3536
3537 scatterlist = sglist->scatterlist;
3538
3539 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3540 struct page *page = sg_page(&scatterlist[i]);
3541
3542 kaddr = kmap(page);
1da177e4 3543 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3544 kunmap(page);
1da177e4
LT
3545
3546 scatterlist[i].length = bsize_elem;
3547
3548 if (result != 0) {
3549 ipr_trace;
3550 return result;
3551 }
3552 }
3553
3554 if (len % bsize_elem) {
45711f1a
JA
3555 struct page *page = sg_page(&scatterlist[i]);
3556
3557 kaddr = kmap(page);
1da177e4 3558 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3559 kunmap(page);
1da177e4
LT
3560
3561 scatterlist[i].length = len % bsize_elem;
3562 }
3563
3564 sglist->buffer_len = len;
3565 return result;
3566}
3567
a32c055f
WB
3568/**
3569 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3570 * @ipr_cmd: ipr command struct
3571 * @sglist: scatter/gather list
3572 *
3573 * Builds a microcode download IOA data list (IOADL).
3574 *
3575 **/
3576static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3577 struct ipr_sglist *sglist)
3578{
3579 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3580 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3581 struct scatterlist *scatterlist = sglist->scatterlist;
3582 int i;
3583
3584 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3585 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3586 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3587
3588 ioarcb->ioadl_len =
3589 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3590 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3591 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3592 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3593 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3594 }
3595
3596 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3597}
3598
1da177e4 3599/**
12baa420 3600 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3601 * @ipr_cmd: ipr command struct
3602 * @sglist: scatter/gather list
1da177e4 3603 *
12baa420 3604 * Builds a microcode download IOA data list (IOADL).
1da177e4 3605 *
1da177e4 3606 **/
12baa420
BK
3607static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3608 struct ipr_sglist *sglist)
1da177e4 3609{
1da177e4 3610 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3611 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3612 struct scatterlist *scatterlist = sglist->scatterlist;
3613 int i;
3614
12baa420 3615 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3616 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3617 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3618
3619 ioarcb->ioadl_len =
1da177e4
LT
3620 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3621
3622 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3623 ioadl[i].flags_and_data_len =
3624 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3625 ioadl[i].address =
3626 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3627 }
3628
12baa420
BK
3629 ioadl[i-1].flags_and_data_len |=
3630 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3631}
3632
3633/**
3634 * ipr_update_ioa_ucode - Update IOA's microcode
3635 * @ioa_cfg: ioa config struct
3636 * @sglist: scatter/gather list
3637 *
3638 * Initiate an adapter reset to update the IOA's microcode
3639 *
3640 * Return value:
3641 * 0 on success / -EIO on failure
3642 **/
3643static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3644 struct ipr_sglist *sglist)
3645{
3646 unsigned long lock_flags;
3647
3648 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3649 while(ioa_cfg->in_reset_reload) {
3650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3651 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3653 }
12baa420
BK
3654
3655 if (ioa_cfg->ucode_sglist) {
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 dev_err(&ioa_cfg->pdev->dev,
3658 "Microcode download already in progress\n");
3659 return -EIO;
1da177e4 3660 }
12baa420
BK
3661
3662 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3663 sglist->num_sg, DMA_TO_DEVICE);
3664
3665 if (!sglist->num_dma_sg) {
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 dev_err(&ioa_cfg->pdev->dev,
3668 "Failed to map microcode download buffer!\n");
1da177e4
LT
3669 return -EIO;
3670 }
3671
12baa420
BK
3672 ioa_cfg->ucode_sglist = sglist;
3673 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3676
3677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3678 ioa_cfg->ucode_sglist = NULL;
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3680 return 0;
3681}
3682
3683/**
3684 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3685 * @class_dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
1da177e4
LT
3688 *
3689 * This function will update the firmware on the adapter.
3690 *
3691 * Return value:
3692 * count on success / other on failure
3693 **/
ee959b00
TJ
3694static ssize_t ipr_store_update_fw(struct device *dev,
3695 struct device_attribute *attr,
3696 const char *buf, size_t count)
1da177e4 3697{
ee959b00 3698 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 struct ipr_ucode_image_header *image_hdr;
3701 const struct firmware *fw_entry;
3702 struct ipr_sglist *sglist;
1da177e4
LT
3703 char fname[100];
3704 char *src;
3705 int len, result, dnld_size;
3706
3707 if (!capable(CAP_SYS_ADMIN))
3708 return -EACCES;
3709
3710 len = snprintf(fname, 99, "%s", buf);
3711 fname[len-1] = '\0';
3712
3713 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3714 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3715 return -EIO;
3716 }
3717
3718 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3719
3720 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3721 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3722 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3723 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3724 release_firmware(fw_entry);
3725 return -EINVAL;
3726 }
3727
3728 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3729 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3730 sglist = ipr_alloc_ucode_buffer(dnld_size);
3731
3732 if (!sglist) {
3733 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3734 release_firmware(fw_entry);
3735 return -ENOMEM;
3736 }
3737
3738 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3739
3740 if (result) {
3741 dev_err(&ioa_cfg->pdev->dev,
3742 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3743 goto out;
1da177e4
LT
3744 }
3745
12baa420 3746 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3747
12baa420
BK
3748 if (!result)
3749 result = count;
3750out:
1da177e4
LT
3751 ipr_free_ucode_buffer(sglist);
3752 release_firmware(fw_entry);
12baa420 3753 return result;
1da177e4
LT
3754}
3755
ee959b00 3756static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3757 .attr = {
3758 .name = "update_fw",
3759 .mode = S_IWUSR,
3760 },
3761 .store = ipr_store_update_fw
3762};
3763
ee959b00 3764static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3765 &ipr_fw_version_attr,
3766 &ipr_log_level_attr,
3767 &ipr_diagnostics_attr,
f37eb54b 3768 &ipr_ioa_state_attr,
1da177e4
LT
3769 &ipr_ioa_reset_attr,
3770 &ipr_update_fw_attr,
3771 NULL,
3772};
3773
3774#ifdef CONFIG_SCSI_IPR_DUMP
3775/**
3776 * ipr_read_dump - Dump the adapter
2c3c8bea 3777 * @filp: open sysfs file
1da177e4 3778 * @kobj: kobject struct
91a69029 3779 * @bin_attr: bin_attribute struct
1da177e4
LT
3780 * @buf: buffer
3781 * @off: offset
3782 * @count: buffer size
3783 *
3784 * Return value:
3785 * number of bytes printed to buffer
3786 **/
2c3c8bea 3787static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3788 struct bin_attribute *bin_attr,
3789 char *buf, loff_t off, size_t count)
1da177e4 3790{
ee959b00 3791 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3792 struct Scsi_Host *shost = class_to_shost(cdev);
3793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3794 struct ipr_dump *dump;
3795 unsigned long lock_flags = 0;
3796 char *src;
3797 int len;
3798 size_t rc = count;
3799
3800 if (!capable(CAP_SYS_ADMIN))
3801 return -EACCES;
3802
3803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3804 dump = ioa_cfg->dump;
3805
3806 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3808 return 0;
3809 }
3810 kref_get(&dump->kref);
3811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3812
3813 if (off > dump->driver_dump.hdr.len) {
3814 kref_put(&dump->kref, ipr_release_dump);
3815 return 0;
3816 }
3817
3818 if (off + count > dump->driver_dump.hdr.len) {
3819 count = dump->driver_dump.hdr.len - off;
3820 rc = count;
3821 }
3822
3823 if (count && off < sizeof(dump->driver_dump)) {
3824 if (off + count > sizeof(dump->driver_dump))
3825 len = sizeof(dump->driver_dump) - off;
3826 else
3827 len = count;
3828 src = (u8 *)&dump->driver_dump + off;
3829 memcpy(buf, src, len);
3830 buf += len;
3831 off += len;
3832 count -= len;
3833 }
3834
3835 off -= sizeof(dump->driver_dump);
3836
3837 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3838 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3839 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3840 else
3841 len = count;
3842 src = (u8 *)&dump->ioa_dump + off;
3843 memcpy(buf, src, len);
3844 buf += len;
3845 off += len;
3846 count -= len;
3847 }
3848
3849 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3850
3851 while (count) {
3852 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3853 len = PAGE_ALIGN(off) - off;
3854 else
3855 len = count;
3856 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3857 src += off & ~PAGE_MASK;
3858 memcpy(buf, src, len);
3859 buf += len;
3860 off += len;
3861 count -= len;
3862 }
3863
3864 kref_put(&dump->kref, ipr_release_dump);
3865 return rc;
3866}
3867
3868/**
3869 * ipr_alloc_dump - Prepare for adapter dump
3870 * @ioa_cfg: ioa config struct
3871 *
3872 * Return value:
3873 * 0 on success / other on failure
3874 **/
3875static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3876{
3877 struct ipr_dump *dump;
3878 unsigned long lock_flags = 0;
3879
0bc42e35 3880 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3881
3882 if (!dump) {
3883 ipr_err("Dump memory allocation failed\n");
3884 return -ENOMEM;
3885 }
3886
1da177e4
LT
3887 kref_init(&dump->kref);
3888 dump->ioa_cfg = ioa_cfg;
3889
3890 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3891
3892 if (INACTIVE != ioa_cfg->sdt_state) {
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 kfree(dump);
3895 return 0;
3896 }
3897
3898 ioa_cfg->dump = dump;
3899 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3900 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3901 ioa_cfg->dump_taken = 1;
3902 schedule_work(&ioa_cfg->work_q);
3903 }
3904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3905
1da177e4
LT
3906 return 0;
3907}
3908
3909/**
3910 * ipr_free_dump - Free adapter dump memory
3911 * @ioa_cfg: ioa config struct
3912 *
3913 * Return value:
3914 * 0 on success / other on failure
3915 **/
3916static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3917{
3918 struct ipr_dump *dump;
3919 unsigned long lock_flags = 0;
3920
3921 ENTER;
3922
3923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924 dump = ioa_cfg->dump;
3925 if (!dump) {
3926 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3927 return 0;
3928 }
3929
3930 ioa_cfg->dump = NULL;
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932
3933 kref_put(&dump->kref, ipr_release_dump);
3934
3935 LEAVE;
3936 return 0;
3937}
3938
3939/**
3940 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 3941 * @filp: open sysfs file
1da177e4 3942 * @kobj: kobject struct
91a69029 3943 * @bin_attr: bin_attribute struct
1da177e4
LT
3944 * @buf: buffer
3945 * @off: offset
3946 * @count: buffer size
3947 *
3948 * Return value:
3949 * number of bytes printed to buffer
3950 **/
2c3c8bea 3951static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3952 struct bin_attribute *bin_attr,
3953 char *buf, loff_t off, size_t count)
1da177e4 3954{
ee959b00 3955 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3956 struct Scsi_Host *shost = class_to_shost(cdev);
3957 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3958 int rc;
3959
3960 if (!capable(CAP_SYS_ADMIN))
3961 return -EACCES;
3962
3963 if (buf[0] == '1')
3964 rc = ipr_alloc_dump(ioa_cfg);
3965 else if (buf[0] == '0')
3966 rc = ipr_free_dump(ioa_cfg);
3967 else
3968 return -EINVAL;
3969
3970 if (rc)
3971 return rc;
3972 else
3973 return count;
3974}
3975
3976static struct bin_attribute ipr_dump_attr = {
3977 .attr = {
3978 .name = "dump",
3979 .mode = S_IRUSR | S_IWUSR,
3980 },
3981 .size = 0,
3982 .read = ipr_read_dump,
3983 .write = ipr_write_dump
3984};
3985#else
3986static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3987#endif
3988
3989/**
3990 * ipr_change_queue_depth - Change the device's queue depth
3991 * @sdev: scsi device struct
3992 * @qdepth: depth to set
e881a172 3993 * @reason: calling context
1da177e4
LT
3994 *
3995 * Return value:
3996 * actual depth set
3997 **/
e881a172
MC
3998static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3999 int reason)
1da177e4 4000{
35a39691
BK
4001 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4002 struct ipr_resource_entry *res;
4003 unsigned long lock_flags = 0;
4004
e881a172
MC
4005 if (reason != SCSI_QDEPTH_DEFAULT)
4006 return -EOPNOTSUPP;
4007
35a39691
BK
4008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4009 res = (struct ipr_resource_entry *)sdev->hostdata;
4010
4011 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4012 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014
1da177e4
LT
4015 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4016 return sdev->queue_depth;
4017}
4018
4019/**
4020 * ipr_change_queue_type - Change the device's queue type
4021 * @dsev: scsi device struct
4022 * @tag_type: type of tags to use
4023 *
4024 * Return value:
4025 * actual queue type set
4026 **/
4027static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4028{
4029 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4030 struct ipr_resource_entry *res;
4031 unsigned long lock_flags = 0;
4032
4033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4034 res = (struct ipr_resource_entry *)sdev->hostdata;
4035
4036 if (res) {
4037 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4038 /*
4039 * We don't bother quiescing the device here since the
4040 * adapter firmware does it for us.
4041 */
4042 scsi_set_tag_type(sdev, tag_type);
4043
4044 if (tag_type)
4045 scsi_activate_tcq(sdev, sdev->queue_depth);
4046 else
4047 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4048 } else
4049 tag_type = 0;
4050 } else
4051 tag_type = 0;
4052
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054 return tag_type;
4055}
4056
4057/**
4058 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4059 * @dev: device struct
4060 * @buf: buffer
4061 *
4062 * Return value:
4063 * number of bytes printed to buffer
4064 **/
10523b3b 4065static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4066{
4067 struct scsi_device *sdev = to_scsi_device(dev);
4068 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4069 struct ipr_resource_entry *res;
4070 unsigned long lock_flags = 0;
4071 ssize_t len = -ENXIO;
4072
4073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4074 res = (struct ipr_resource_entry *)sdev->hostdata;
4075 if (res)
3e7ebdfa 4076 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078 return len;
4079}
4080
4081static struct device_attribute ipr_adapter_handle_attr = {
4082 .attr = {
4083 .name = "adapter_handle",
4084 .mode = S_IRUSR,
4085 },
4086 .show = ipr_show_adapter_handle
4087};
4088
3e7ebdfa 4089/**
5adcbeb3
WB
4090 * ipr_show_resource_path - Show the resource path or the resource address for
4091 * this device.
3e7ebdfa
WB
4092 * @dev: device struct
4093 * @buf: buffer
4094 *
4095 * Return value:
4096 * number of bytes printed to buffer
4097 **/
4098static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4099{
4100 struct scsi_device *sdev = to_scsi_device(dev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4102 struct ipr_resource_entry *res;
4103 unsigned long lock_flags = 0;
4104 ssize_t len = -ENXIO;
4105 char buffer[IPR_MAX_RES_PATH_LENGTH];
4106
4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4109 if (res && ioa_cfg->sis64)
3e7ebdfa 4110 len = snprintf(buf, PAGE_SIZE, "%s\n",
5adcbeb3
WB
4111 ipr_format_res_path(res->res_path, buffer,
4112 sizeof(buffer)));
4113 else if (res)
4114 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4115 res->bus, res->target, res->lun);
4116
3e7ebdfa
WB
4117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118 return len;
4119}
4120
4121static struct device_attribute ipr_resource_path_attr = {
4122 .attr = {
4123 .name = "resource_path",
4124 .mode = S_IRUSR,
4125 },
4126 .show = ipr_show_resource_path
4127};
4128
1da177e4
LT
4129static struct device_attribute *ipr_dev_attrs[] = {
4130 &ipr_adapter_handle_attr,
3e7ebdfa 4131 &ipr_resource_path_attr,
1da177e4
LT
4132 NULL,
4133};
4134
4135/**
4136 * ipr_biosparam - Return the HSC mapping
4137 * @sdev: scsi device struct
4138 * @block_device: block device pointer
4139 * @capacity: capacity of the device
4140 * @parm: Array containing returned HSC values.
4141 *
4142 * This function generates the HSC parms that fdisk uses.
4143 * We want to make sure we return something that places partitions
4144 * on 4k boundaries for best performance with the IOA.
4145 *
4146 * Return value:
4147 * 0 on success
4148 **/
4149static int ipr_biosparam(struct scsi_device *sdev,
4150 struct block_device *block_device,
4151 sector_t capacity, int *parm)
4152{
4153 int heads, sectors;
4154 sector_t cylinders;
4155
4156 heads = 128;
4157 sectors = 32;
4158
4159 cylinders = capacity;
4160 sector_div(cylinders, (128 * 32));
4161
4162 /* return result */
4163 parm[0] = heads;
4164 parm[1] = sectors;
4165 parm[2] = cylinders;
4166
4167 return 0;
4168}
4169
35a39691
BK
4170/**
4171 * ipr_find_starget - Find target based on bus/target.
4172 * @starget: scsi target struct
4173 *
4174 * Return value:
4175 * resource entry pointer if found / NULL if not found
4176 **/
4177static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4178{
4179 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4181 struct ipr_resource_entry *res;
4182
4183 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4184 if ((res->bus == starget->channel) &&
4185 (res->target == starget->id) &&
4186 (res->lun == 0)) {
35a39691
BK
4187 return res;
4188 }
4189 }
4190
4191 return NULL;
4192}
4193
4194static struct ata_port_info sata_port_info;
4195
4196/**
4197 * ipr_target_alloc - Prepare for commands to a SCSI target
4198 * @starget: scsi target struct
4199 *
4200 * If the device is a SATA device, this function allocates an
4201 * ATA port with libata, else it does nothing.
4202 *
4203 * Return value:
4204 * 0 on success / non-0 on failure
4205 **/
4206static int ipr_target_alloc(struct scsi_target *starget)
4207{
4208 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4210 struct ipr_sata_port *sata_port;
4211 struct ata_port *ap;
4212 struct ipr_resource_entry *res;
4213 unsigned long lock_flags;
4214
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216 res = ipr_find_starget(starget);
4217 starget->hostdata = NULL;
4218
4219 if (res && ipr_is_gata(res)) {
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4222 if (!sata_port)
4223 return -ENOMEM;
4224
4225 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4226 if (ap) {
4227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4228 sata_port->ioa_cfg = ioa_cfg;
4229 sata_port->ap = ap;
4230 sata_port->res = res;
4231
4232 res->sata_port = sata_port;
4233 ap->private_data = sata_port;
4234 starget->hostdata = sata_port;
4235 } else {
4236 kfree(sata_port);
4237 return -ENOMEM;
4238 }
4239 }
4240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4241
4242 return 0;
4243}
4244
4245/**
4246 * ipr_target_destroy - Destroy a SCSI target
4247 * @starget: scsi target struct
4248 *
4249 * If the device was a SATA device, this function frees the libata
4250 * ATA port, else it does nothing.
4251 *
4252 **/
4253static void ipr_target_destroy(struct scsi_target *starget)
4254{
4255 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4256 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4257 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4258
4259 if (ioa_cfg->sis64) {
4260 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4261 clear_bit(starget->id, ioa_cfg->array_ids);
4262 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4263 clear_bit(starget->id, ioa_cfg->vset_ids);
4264 else if (starget->channel == 0)
4265 clear_bit(starget->id, ioa_cfg->target_ids);
4266 }
35a39691
BK
4267
4268 if (sata_port) {
4269 starget->hostdata = NULL;
4270 ata_sas_port_destroy(sata_port->ap);
4271 kfree(sata_port);
4272 }
4273}
4274
4275/**
4276 * ipr_find_sdev - Find device based on bus/target/lun.
4277 * @sdev: scsi device struct
4278 *
4279 * Return value:
4280 * resource entry pointer if found / NULL if not found
4281 **/
4282static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4283{
4284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4285 struct ipr_resource_entry *res;
4286
4287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4288 if ((res->bus == sdev->channel) &&
4289 (res->target == sdev->id) &&
4290 (res->lun == sdev->lun))
35a39691
BK
4291 return res;
4292 }
4293
4294 return NULL;
4295}
4296
1da177e4
LT
4297/**
4298 * ipr_slave_destroy - Unconfigure a SCSI device
4299 * @sdev: scsi device struct
4300 *
4301 * Return value:
4302 * nothing
4303 **/
4304static void ipr_slave_destroy(struct scsi_device *sdev)
4305{
4306 struct ipr_resource_entry *res;
4307 struct ipr_ioa_cfg *ioa_cfg;
4308 unsigned long lock_flags = 0;
4309
4310 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4311
4312 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4313 res = (struct ipr_resource_entry *) sdev->hostdata;
4314 if (res) {
35a39691 4315 if (res->sata_port)
3e4ec344 4316 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4317 sdev->hostdata = NULL;
4318 res->sdev = NULL;
35a39691 4319 res->sata_port = NULL;
1da177e4
LT
4320 }
4321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4322}
4323
4324/**
4325 * ipr_slave_configure - Configure a SCSI device
4326 * @sdev: scsi device struct
4327 *
4328 * This function configures the specified scsi device.
4329 *
4330 * Return value:
4331 * 0 on success
4332 **/
4333static int ipr_slave_configure(struct scsi_device *sdev)
4334{
4335 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4336 struct ipr_resource_entry *res;
dd406ef8 4337 struct ata_port *ap = NULL;
1da177e4 4338 unsigned long lock_flags = 0;
3e7ebdfa 4339 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4340
4341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4342 res = sdev->hostdata;
4343 if (res) {
4344 if (ipr_is_af_dasd_device(res))
4345 sdev->type = TYPE_RAID;
0726ce26 4346 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4347 sdev->scsi_level = 4;
0726ce26
BK
4348 sdev->no_uld_attach = 1;
4349 }
1da177e4 4350 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4351 blk_queue_rq_timeout(sdev->request_queue,
4352 IPR_VSET_RW_TIMEOUT);
086fa5ff 4353 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4354 }
e4fbf44e 4355 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 4356 sdev->allow_restart = 1;
dd406ef8
BK
4357 if (ipr_is_gata(res) && res->sata_port)
4358 ap = res->sata_port->ap;
4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4360
4361 if (ap) {
35a39691 4362 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4363 ata_sas_slave_configure(sdev, ap);
4364 } else
35a39691 4365 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4366 if (ioa_cfg->sis64)
4367 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
5adcbeb3
WB
4368 ipr_format_res_path(res->res_path, buffer,
4369 sizeof(buffer)));
dd406ef8 4370 return 0;
1da177e4
LT
4371 }
4372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373 return 0;
4374}
4375
35a39691
BK
4376/**
4377 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4378 * @sdev: scsi device struct
4379 *
4380 * This function initializes an ATA port so that future commands
4381 * sent through queuecommand will work.
4382 *
4383 * Return value:
4384 * 0 on success
4385 **/
4386static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4387{
4388 struct ipr_sata_port *sata_port = NULL;
4389 int rc = -ENXIO;
4390
4391 ENTER;
4392 if (sdev->sdev_target)
4393 sata_port = sdev->sdev_target->hostdata;
4394 if (sata_port)
4395 rc = ata_sas_port_init(sata_port->ap);
4396 if (rc)
4397 ipr_slave_destroy(sdev);
4398
4399 LEAVE;
4400 return rc;
4401}
4402
1da177e4
LT
4403/**
4404 * ipr_slave_alloc - Prepare for commands to a device.
4405 * @sdev: scsi device struct
4406 *
4407 * This function saves a pointer to the resource entry
4408 * in the scsi device struct if the device exists. We
4409 * can then use this pointer in ipr_queuecommand when
4410 * handling new commands.
4411 *
4412 * Return value:
692aebfc 4413 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4414 **/
4415static int ipr_slave_alloc(struct scsi_device *sdev)
4416{
4417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4418 struct ipr_resource_entry *res;
4419 unsigned long lock_flags;
692aebfc 4420 int rc = -ENXIO;
1da177e4
LT
4421
4422 sdev->hostdata = NULL;
4423
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425
35a39691
BK
4426 res = ipr_find_sdev(sdev);
4427 if (res) {
4428 res->sdev = sdev;
4429 res->add_to_ml = 0;
4430 res->in_erp = 0;
4431 sdev->hostdata = res;
4432 if (!ipr_is_naca_model(res))
4433 res->needs_sync_complete = 1;
4434 rc = 0;
4435 if (ipr_is_gata(res)) {
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4438 }
4439 }
4440
4441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4442
692aebfc 4443 return rc;
1da177e4
LT
4444}
4445
4446/**
4447 * ipr_eh_host_reset - Reset the host adapter
4448 * @scsi_cmd: scsi command struct
4449 *
4450 * Return value:
4451 * SUCCESS / FAILED
4452 **/
df0ae249 4453static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4454{
4455 struct ipr_ioa_cfg *ioa_cfg;
4456 int rc;
4457
4458 ENTER;
4459 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4460
4461 dev_err(&ioa_cfg->pdev->dev,
4462 "Adapter being reset as a result of error recovery.\n");
4463
4464 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4465 ioa_cfg->sdt_state = GET_DUMP;
4466
4467 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4468
4469 LEAVE;
4470 return rc;
4471}
4472
df0ae249
JG
4473static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4474{
4475 int rc;
4476
4477 spin_lock_irq(cmd->device->host->host_lock);
4478 rc = __ipr_eh_host_reset(cmd);
4479 spin_unlock_irq(cmd->device->host->host_lock);
4480
4481 return rc;
4482}
4483
c6513096
BK
4484/**
4485 * ipr_device_reset - Reset the device
4486 * @ioa_cfg: ioa config struct
4487 * @res: resource entry struct
4488 *
4489 * This function issues a device reset to the affected device.
4490 * If the device is a SCSI device, a LUN reset will be sent
4491 * to the device first. If that does not work, a target reset
35a39691
BK
4492 * will be sent. If the device is a SATA device, a PHY reset will
4493 * be sent.
c6513096
BK
4494 *
4495 * Return value:
4496 * 0 on success / non-zero on failure
4497 **/
4498static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4499 struct ipr_resource_entry *res)
4500{
4501 struct ipr_cmnd *ipr_cmd;
4502 struct ipr_ioarcb *ioarcb;
4503 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4504 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4505 u32 ioasc;
4506
4507 ENTER;
4508 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4509 ioarcb = &ipr_cmd->ioarcb;
4510 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4511
4512 if (ipr_cmd->ioa_cfg->sis64) {
4513 regs = &ipr_cmd->i.ata_ioadl.regs;
4514 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4515 } else
4516 regs = &ioarcb->u.add_data.u.regs;
c6513096 4517
3e7ebdfa 4518 ioarcb->res_handle = res->res_handle;
c6513096
BK
4519 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4520 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4521 if (ipr_is_gata(res)) {
4522 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4523 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4524 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4525 }
c6513096
BK
4526
4527 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4528 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
c6513096 4529 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
96d21f00
WB
4530 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4531 if (ipr_cmd->ioa_cfg->sis64)
4532 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4533 sizeof(struct ipr_ioasa_gata));
4534 else
4535 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4536 sizeof(struct ipr_ioasa_gata));
4537 }
c6513096
BK
4538
4539 LEAVE;
4540 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4541}
4542
35a39691
BK
4543/**
4544 * ipr_sata_reset - Reset the SATA port
cc0680a5 4545 * @link: SATA link to reset
35a39691
BK
4546 * @classes: class of the attached device
4547 *
cc0680a5 4548 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4549 *
4550 * Return value:
4551 * 0 on success / non-zero on failure
4552 **/
cc0680a5 4553static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4554 unsigned long deadline)
35a39691 4555{
cc0680a5 4556 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4557 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4558 struct ipr_resource_entry *res;
4559 unsigned long lock_flags = 0;
4560 int rc = -ENXIO;
4561
4562 ENTER;
4563 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4564 while(ioa_cfg->in_reset_reload) {
4565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4567 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4568 }
4569
35a39691
BK
4570 res = sata_port->res;
4571 if (res) {
4572 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4573 *classes = res->ata_class;
35a39691
BK
4574 }
4575
4576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4577 LEAVE;
4578 return rc;
4579}
4580
1da177e4
LT
4581/**
4582 * ipr_eh_dev_reset - Reset the device
4583 * @scsi_cmd: scsi command struct
4584 *
4585 * This function issues a device reset to the affected device.
4586 * A LUN reset will be sent to the device first. If that does
4587 * not work, a target reset will be sent.
4588 *
4589 * Return value:
4590 * SUCCESS / FAILED
4591 **/
94d0e7b8 4592static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4593{
4594 struct ipr_cmnd *ipr_cmd;
4595 struct ipr_ioa_cfg *ioa_cfg;
4596 struct ipr_resource_entry *res;
35a39691
BK
4597 struct ata_port *ap;
4598 int rc = 0;
1da177e4
LT
4599
4600 ENTER;
4601 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4602 res = scsi_cmd->device->hostdata;
4603
eeb88307 4604 if (!res)
1da177e4
LT
4605 return FAILED;
4606
4607 /*
4608 * If we are currently going through reset/reload, return failed. This will force the
4609 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4610 * reset to complete
4611 */
4612 if (ioa_cfg->in_reset_reload)
4613 return FAILED;
4614 if (ioa_cfg->ioa_is_dead)
4615 return FAILED;
4616
4617 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4618 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4619 if (ipr_cmd->scsi_cmd)
4620 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4621 if (ipr_cmd->qc)
4622 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4623 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4624 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4625 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4626 }
1da177e4
LT
4627 }
4628 }
4629
4630 res->resetting_device = 1;
fb3ed3cb 4631 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4632
4633 if (ipr_is_gata(res) && res->sata_port) {
4634 ap = res->sata_port->ap;
4635 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4636 ata_std_error_handler(ap);
35a39691 4637 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4638
4639 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4640 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4641 rc = -EIO;
4642 break;
4643 }
4644 }
35a39691
BK
4645 } else
4646 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4647 res->resetting_device = 0;
4648
1da177e4 4649 LEAVE;
c6513096 4650 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4651}
4652
94d0e7b8
JG
4653static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4654{
4655 int rc;
4656
4657 spin_lock_irq(cmd->device->host->host_lock);
4658 rc = __ipr_eh_dev_reset(cmd);
4659 spin_unlock_irq(cmd->device->host->host_lock);
4660
4661 return rc;
4662}
4663
1da177e4
LT
4664/**
4665 * ipr_bus_reset_done - Op done function for bus reset.
4666 * @ipr_cmd: ipr command struct
4667 *
4668 * This function is the op done function for a bus reset
4669 *
4670 * Return value:
4671 * none
4672 **/
4673static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4674{
4675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4676 struct ipr_resource_entry *res;
4677
4678 ENTER;
3e7ebdfa
WB
4679 if (!ioa_cfg->sis64)
4680 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4681 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4682 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4683 break;
4684 }
1da177e4 4685 }
1da177e4
LT
4686
4687 /*
4688 * If abort has not completed, indicate the reset has, else call the
4689 * abort's done function to wake the sleeping eh thread
4690 */
4691 if (ipr_cmd->sibling->sibling)
4692 ipr_cmd->sibling->sibling = NULL;
4693 else
4694 ipr_cmd->sibling->done(ipr_cmd->sibling);
4695
4696 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4697 LEAVE;
4698}
4699
4700/**
4701 * ipr_abort_timeout - An abort task has timed out
4702 * @ipr_cmd: ipr command struct
4703 *
4704 * This function handles when an abort task times out. If this
4705 * happens we issue a bus reset since we have resources tied
4706 * up that must be freed before returning to the midlayer.
4707 *
4708 * Return value:
4709 * none
4710 **/
4711static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4712{
4713 struct ipr_cmnd *reset_cmd;
4714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4715 struct ipr_cmd_pkt *cmd_pkt;
4716 unsigned long lock_flags = 0;
4717
4718 ENTER;
4719 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4720 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 return;
4723 }
4724
fb3ed3cb 4725 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4726 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4727 ipr_cmd->sibling = reset_cmd;
4728 reset_cmd->sibling = ipr_cmd;
4729 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4730 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4731 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4732 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4733 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4734
4735 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 LEAVE;
4738}
4739
4740/**
4741 * ipr_cancel_op - Cancel specified op
4742 * @scsi_cmd: scsi command struct
4743 *
4744 * This function cancels specified op.
4745 *
4746 * Return value:
4747 * SUCCESS / FAILED
4748 **/
4749static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4750{
4751 struct ipr_cmnd *ipr_cmd;
4752 struct ipr_ioa_cfg *ioa_cfg;
4753 struct ipr_resource_entry *res;
4754 struct ipr_cmd_pkt *cmd_pkt;
4755 u32 ioasc;
4756 int op_found = 0;
4757
4758 ENTER;
4759 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4760 res = scsi_cmd->device->hostdata;
4761
8fa728a2
JG
4762 /* If we are currently going through reset/reload, return failed.
4763 * This will force the mid-layer to call ipr_eh_host_reset,
4764 * which will then go to sleep and wait for the reset to complete
4765 */
4766 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4767 return FAILED;
04d9768f 4768 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4769 return FAILED;
4770
4771 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4772 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4773 ipr_cmd->done = ipr_scsi_eh_done;
4774 op_found = 1;
4775 break;
4776 }
4777 }
4778
4779 if (!op_found)
4780 return SUCCESS;
4781
4782 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4783 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4784 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4785 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4786 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4787 ipr_cmd->u.sdev = scsi_cmd->device;
4788
fb3ed3cb
BK
4789 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4790 scsi_cmd->cmnd[0]);
1da177e4 4791 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 4792 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4793
4794 /*
4795 * If the abort task timed out and we sent a bus reset, we will get
4796 * one the following responses to the abort
4797 */
4798 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4799 ioasc = 0;
4800 ipr_trace;
4801 }
4802
4803 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4804 if (!ipr_is_naca_model(res))
4805 res->needs_sync_complete = 1;
1da177e4
LT
4806
4807 LEAVE;
4808 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4809}
4810
4811/**
4812 * ipr_eh_abort - Abort a single op
4813 * @scsi_cmd: scsi command struct
4814 *
4815 * Return value:
4816 * SUCCESS / FAILED
4817 **/
4818static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4819{
8fa728a2
JG
4820 unsigned long flags;
4821 int rc;
1da177e4
LT
4822
4823 ENTER;
1da177e4 4824
8fa728a2
JG
4825 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4826 rc = ipr_cancel_op(scsi_cmd);
4827 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4828
4829 LEAVE;
8fa728a2 4830 return rc;
1da177e4
LT
4831}
4832
4833/**
4834 * ipr_handle_other_interrupt - Handle "other" interrupts
4835 * @ioa_cfg: ioa config struct
1da177e4
LT
4836 *
4837 * Return value:
4838 * IRQ_NONE / IRQ_HANDLED
4839 **/
64ffdb76 4840static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
4841{
4842 irqreturn_t rc = IRQ_HANDLED;
64ffdb76
WB
4843 volatile u32 int_reg, int_mask_reg;
4844
4845 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4847
4848 /* If an interrupt on the adapter did not occur, ignore it.
4849 * Or in the case of SIS 64, check for a stage change interrupt.
4850 */
4851 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4852 if (ioa_cfg->sis64) {
4853 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4854 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4855 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4856
4857 /* clear stage change */
4858 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4859 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4860 list_del(&ioa_cfg->reset_cmd->queue);
4861 del_timer(&ioa_cfg->reset_cmd->timer);
4862 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4863 return IRQ_HANDLED;
4864 }
4865 }
4866
4867 return IRQ_NONE;
4868 }
1da177e4
LT
4869
4870 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4871 /* Mask the interrupt */
4872 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4873
4874 /* Clear the interrupt */
4875 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4876 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4877
4878 list_del(&ioa_cfg->reset_cmd->queue);
4879 del_timer(&ioa_cfg->reset_cmd->timer);
4880 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4881 } else {
4882 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4883 ioa_cfg->ioa_unit_checked = 1;
4884 else
4885 dev_err(&ioa_cfg->pdev->dev,
4886 "Permanent IOA failure. 0x%08X\n", int_reg);
4887
4888 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4889 ioa_cfg->sdt_state = GET_DUMP;
4890
4891 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4892 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4893 }
4894
4895 return rc;
4896}
4897
3feeb89d
WB
4898/**
4899 * ipr_isr_eh - Interrupt service routine error handler
4900 * @ioa_cfg: ioa config struct
4901 * @msg: message to log
4902 *
4903 * Return value:
4904 * none
4905 **/
4906static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4907{
4908 ioa_cfg->errors_logged++;
4909 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4910
4911 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4912 ioa_cfg->sdt_state = GET_DUMP;
4913
4914 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4915}
4916
1da177e4
LT
4917/**
4918 * ipr_isr - Interrupt service routine
4919 * @irq: irq number
4920 * @devp: pointer to ioa config struct
1da177e4
LT
4921 *
4922 * Return value:
4923 * IRQ_NONE / IRQ_HANDLED
4924 **/
7d12e780 4925static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4926{
4927 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4928 unsigned long lock_flags = 0;
64ffdb76 4929 volatile u32 int_reg;
1da177e4
LT
4930 u32 ioasc;
4931 u16 cmd_index;
3feeb89d 4932 int num_hrrq = 0;
1da177e4
LT
4933 struct ipr_cmnd *ipr_cmd;
4934 irqreturn_t rc = IRQ_NONE;
4935
4936 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4937
4938 /* If interrupts are disabled, ignore the interrupt */
4939 if (!ioa_cfg->allow_interrupts) {
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return IRQ_NONE;
4942 }
4943
1da177e4
LT
4944 while (1) {
4945 ipr_cmd = NULL;
4946
4947 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4948 ioa_cfg->toggle_bit) {
4949
4950 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4951 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4952
4953 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4954 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956 return IRQ_HANDLED;
4957 }
4958
4959 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4960
96d21f00 4961 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4962
4963 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4964
4965 list_del(&ipr_cmd->queue);
4966 del_timer(&ipr_cmd->timer);
4967 ipr_cmd->done(ipr_cmd);
4968
4969 rc = IRQ_HANDLED;
4970
4971 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4972 ioa_cfg->hrrq_curr++;
4973 } else {
4974 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4975 ioa_cfg->toggle_bit ^= 1u;
4976 }
4977 }
4978
4979 if (ipr_cmd != NULL) {
4980 /* Clear the PCI interrupt */
3feeb89d 4981 do {
214777ba 4982 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
64ffdb76 4983 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
4984 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4985 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4986
4987 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4988 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4990 return IRQ_HANDLED;
4991 }
4992
1da177e4
LT
4993 } else
4994 break;
4995 }
4996
4997 if (unlikely(rc == IRQ_NONE))
64ffdb76 4998 rc = ipr_handle_other_interrupt(ioa_cfg);
1da177e4
LT
4999
5000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5001 return rc;
5002}
5003
a32c055f
WB
5004/**
5005 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5006 * @ioa_cfg: ioa config struct
5007 * @ipr_cmd: ipr command struct
5008 *
5009 * Return value:
5010 * 0 on success / -1 on failure
5011 **/
5012static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5013 struct ipr_cmnd *ipr_cmd)
5014{
5015 int i, nseg;
5016 struct scatterlist *sg;
5017 u32 length;
5018 u32 ioadl_flags = 0;
5019 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5020 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5021 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5022
5023 length = scsi_bufflen(scsi_cmd);
5024 if (!length)
5025 return 0;
5026
5027 nseg = scsi_dma_map(scsi_cmd);
5028 if (nseg < 0) {
5029 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5030 return -1;
5031 }
5032
5033 ipr_cmd->dma_use_sg = nseg;
5034
438b0331 5035 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5036 ioarcb->ioadl_len =
5037 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5038
a32c055f
WB
5039 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5040 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5041 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5042 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5043 ioadl_flags = IPR_IOADL_FLAGS_READ;
5044
5045 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5046 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5047 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5048 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5049 }
5050
5051 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5052 return 0;
5053}
5054
1da177e4
LT
5055/**
5056 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5057 * @ioa_cfg: ioa config struct
5058 * @ipr_cmd: ipr command struct
5059 *
5060 * Return value:
5061 * 0 on success / -1 on failure
5062 **/
5063static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5064 struct ipr_cmnd *ipr_cmd)
5065{
63015bc9
FT
5066 int i, nseg;
5067 struct scatterlist *sg;
1da177e4
LT
5068 u32 length;
5069 u32 ioadl_flags = 0;
5070 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5071 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5072 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5073
63015bc9
FT
5074 length = scsi_bufflen(scsi_cmd);
5075 if (!length)
1da177e4
LT
5076 return 0;
5077
63015bc9
FT
5078 nseg = scsi_dma_map(scsi_cmd);
5079 if (nseg < 0) {
5080 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5081 return -1;
5082 }
51b1c7e1 5083
63015bc9
FT
5084 ipr_cmd->dma_use_sg = nseg;
5085
5086 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5087 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5088 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5089 ioarcb->data_transfer_length = cpu_to_be32(length);
5090 ioarcb->ioadl_len =
63015bc9
FT
5091 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5092 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5093 ioadl_flags = IPR_IOADL_FLAGS_READ;
5094 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5095 ioarcb->read_ioadl_len =
5096 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5097 }
1da177e4 5098
a32c055f
WB
5099 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5100 ioadl = ioarcb->u.add_data.u.ioadl;
5101 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5102 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5103 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5104 }
1da177e4 5105
63015bc9
FT
5106 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5107 ioadl[i].flags_and_data_len =
5108 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5109 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5110 }
5111
63015bc9
FT
5112 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5113 return 0;
1da177e4
LT
5114}
5115
5116/**
5117 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5118 * @scsi_cmd: scsi command struct
5119 *
5120 * Return value:
5121 * task attributes
5122 **/
5123static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5124{
5125 u8 tag[2];
5126 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5127
5128 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5129 switch (tag[0]) {
5130 case MSG_SIMPLE_TAG:
5131 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5132 break;
5133 case MSG_HEAD_TAG:
5134 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5135 break;
5136 case MSG_ORDERED_TAG:
5137 rc = IPR_FLAGS_LO_ORDERED_TASK;
5138 break;
5139 };
5140 }
5141
5142 return rc;
5143}
5144
5145/**
5146 * ipr_erp_done - Process completion of ERP for a device
5147 * @ipr_cmd: ipr command struct
5148 *
5149 * This function copies the sense buffer into the scsi_cmd
5150 * struct and pushes the scsi_done function.
5151 *
5152 * Return value:
5153 * nothing
5154 **/
5155static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5156{
5157 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5158 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 5160 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5161
5162 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5163 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5164 scmd_printk(KERN_ERR, scsi_cmd,
5165 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5166 } else {
5167 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5168 SCSI_SENSE_BUFFERSIZE);
5169 }
5170
5171 if (res) {
ee0a90fa
BK
5172 if (!ipr_is_naca_model(res))
5173 res->needs_sync_complete = 1;
1da177e4
LT
5174 res->in_erp = 0;
5175 }
63015bc9 5176 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5177 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5178 scsi_cmd->scsi_done(scsi_cmd);
5179}
5180
5181/**
5182 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5183 * @ipr_cmd: ipr command struct
5184 *
5185 * Return value:
5186 * none
5187 **/
5188static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5189{
51b1c7e1 5190 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5191 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5192 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5193
5194 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5195 ioarcb->data_transfer_length = 0;
1da177e4 5196 ioarcb->read_data_transfer_length = 0;
a32c055f 5197 ioarcb->ioadl_len = 0;
1da177e4 5198 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5199 ioasa->hdr.ioasc = 0;
5200 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5201
5202 if (ipr_cmd->ioa_cfg->sis64)
5203 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5204 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5205 else {
5206 ioarcb->write_ioadl_addr =
5207 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5208 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5209 }
1da177e4
LT
5210}
5211
5212/**
5213 * ipr_erp_request_sense - Send request sense to a device
5214 * @ipr_cmd: ipr command struct
5215 *
5216 * This function sends a request sense to a device as a result
5217 * of a check condition.
5218 *
5219 * Return value:
5220 * nothing
5221 **/
5222static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5223{
5224 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5225 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5226
5227 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5228 ipr_erp_done(ipr_cmd);
5229 return;
5230 }
5231
5232 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5233
5234 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5235 cmd_pkt->cdb[0] = REQUEST_SENSE;
5236 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5237 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5238 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5239 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5240
a32c055f
WB
5241 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5242 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5243
5244 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5245 IPR_REQUEST_SENSE_TIMEOUT * 2);
5246}
5247
5248/**
5249 * ipr_erp_cancel_all - Send cancel all to a device
5250 * @ipr_cmd: ipr command struct
5251 *
5252 * This function sends a cancel all to a device to clear the
5253 * queue. If we are running TCQ on the device, QERR is set to 1,
5254 * which means all outstanding ops have been dropped on the floor.
5255 * Cancel all will return them to us.
5256 *
5257 * Return value:
5258 * nothing
5259 **/
5260static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5261{
5262 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5263 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5264 struct ipr_cmd_pkt *cmd_pkt;
5265
5266 res->in_erp = 1;
5267
5268 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5269
5270 if (!scsi_get_tag_type(scsi_cmd->device)) {
5271 ipr_erp_request_sense(ipr_cmd);
5272 return;
5273 }
5274
5275 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5276 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5278
5279 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5280 IPR_CANCEL_ALL_TIMEOUT);
5281}
5282
5283/**
5284 * ipr_dump_ioasa - Dump contents of IOASA
5285 * @ioa_cfg: ioa config struct
5286 * @ipr_cmd: ipr command struct
fe964d0a 5287 * @res: resource entry struct
1da177e4
LT
5288 *
5289 * This function is invoked by the interrupt handler when ops
5290 * fail. It will log the IOASA if appropriate. Only called
5291 * for GPDD ops.
5292 *
5293 * Return value:
5294 * none
5295 **/
5296static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5297 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5298{
5299 int i;
5300 u16 data_len;
b0692dd4 5301 u32 ioasc, fd_ioasc;
96d21f00 5302 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5303 __be32 *ioasa_data = (__be32 *)ioasa;
5304 int error_index;
5305
96d21f00
WB
5306 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5307 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5308
5309 if (0 == ioasc)
5310 return;
5311
5312 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5313 return;
5314
b0692dd4
BK
5315 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5316 error_index = ipr_get_error(fd_ioasc);
5317 else
5318 error_index = ipr_get_error(ioasc);
1da177e4
LT
5319
5320 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5321 /* Don't log an error if the IOA already logged one */
96d21f00 5322 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5323 return;
5324
cc9bd5d4
BK
5325 if (!ipr_is_gscsi(res))
5326 return;
5327
1da177e4
LT
5328 if (ipr_error_table[error_index].log_ioasa == 0)
5329 return;
5330 }
5331
fe964d0a 5332 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5333
96d21f00
WB
5334 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5335 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5336 data_len = sizeof(struct ipr_ioasa64);
5337 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5338 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5339
5340 ipr_err("IOASA Dump:\n");
5341
5342 for (i = 0; i < data_len / 4; i += 4) {
5343 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5344 be32_to_cpu(ioasa_data[i]),
5345 be32_to_cpu(ioasa_data[i+1]),
5346 be32_to_cpu(ioasa_data[i+2]),
5347 be32_to_cpu(ioasa_data[i+3]));
5348 }
5349}
5350
5351/**
5352 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5353 * @ioasa: IOASA
5354 * @sense_buf: sense data buffer
5355 *
5356 * Return value:
5357 * none
5358 **/
5359static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5360{
5361 u32 failing_lba;
5362 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5363 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5364 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5365 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5366
5367 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5368
5369 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5370 return;
5371
5372 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5373
5374 if (ipr_is_vset_device(res) &&
5375 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5376 ioasa->u.vset.failing_lba_hi != 0) {
5377 sense_buf[0] = 0x72;
5378 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5379 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5380 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5381
5382 sense_buf[7] = 12;
5383 sense_buf[8] = 0;
5384 sense_buf[9] = 0x0A;
5385 sense_buf[10] = 0x80;
5386
5387 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5388
5389 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5390 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5391 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5392 sense_buf[15] = failing_lba & 0x000000ff;
5393
5394 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5395
5396 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5397 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5398 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5399 sense_buf[19] = failing_lba & 0x000000ff;
5400 } else {
5401 sense_buf[0] = 0x70;
5402 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5403 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5404 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5405
5406 /* Illegal request */
5407 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5408 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5409 sense_buf[7] = 10; /* additional length */
5410
5411 /* IOARCB was in error */
5412 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5413 sense_buf[15] = 0xC0;
5414 else /* Parameter data was invalid */
5415 sense_buf[15] = 0x80;
5416
5417 sense_buf[16] =
5418 ((IPR_FIELD_POINTER_MASK &
96d21f00 5419 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5420 sense_buf[17] =
5421 (IPR_FIELD_POINTER_MASK &
96d21f00 5422 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5423 } else {
5424 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5425 if (ipr_is_vset_device(res))
5426 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5427 else
5428 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5429
5430 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5431 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5432 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5433 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5434 sense_buf[6] = failing_lba & 0x000000ff;
5435 }
5436
5437 sense_buf[7] = 6; /* additional length */
5438 }
5439 }
5440}
5441
ee0a90fa
BK
5442/**
5443 * ipr_get_autosense - Copy autosense data to sense buffer
5444 * @ipr_cmd: ipr command struct
5445 *
5446 * This function copies the autosense buffer to the buffer
5447 * in the scsi_cmd, if there is autosense available.
5448 *
5449 * Return value:
5450 * 1 if autosense was available / 0 if not
5451 **/
5452static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5453{
96d21f00
WB
5454 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5455 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5456
96d21f00 5457 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5458 return 0;
5459
96d21f00
WB
5460 if (ipr_cmd->ioa_cfg->sis64)
5461 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5462 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5463 SCSI_SENSE_BUFFERSIZE));
5464 else
5465 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5466 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5467 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5468 return 1;
5469}
5470
1da177e4
LT
5471/**
5472 * ipr_erp_start - Process an error response for a SCSI op
5473 * @ioa_cfg: ioa config struct
5474 * @ipr_cmd: ipr command struct
5475 *
5476 * This function determines whether or not to initiate ERP
5477 * on the affected device.
5478 *
5479 * Return value:
5480 * nothing
5481 **/
5482static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5483 struct ipr_cmnd *ipr_cmd)
5484{
5485 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5486 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5487 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5488 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5489
5490 if (!res) {
5491 ipr_scsi_eh_done(ipr_cmd);
5492 return;
5493 }
5494
8a048994 5495 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5496 ipr_gen_sense(ipr_cmd);
5497
cc9bd5d4
BK
5498 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5499
8a048994 5500 switch (masked_ioasc) {
1da177e4 5501 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5502 if (ipr_is_naca_model(res))
5503 scsi_cmd->result |= (DID_ABORT << 16);
5504 else
5505 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5506 break;
5507 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5508 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5509 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5510 break;
5511 case IPR_IOASC_HW_SEL_TIMEOUT:
5512 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5513 if (!ipr_is_naca_model(res))
5514 res->needs_sync_complete = 1;
1da177e4
LT
5515 break;
5516 case IPR_IOASC_SYNC_REQUIRED:
5517 if (!res->in_erp)
5518 res->needs_sync_complete = 1;
5519 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5520 break;
5521 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5522 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5523 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5524 break;
5525 case IPR_IOASC_BUS_WAS_RESET:
5526 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5527 /*
5528 * Report the bus reset and ask for a retry. The device
5529 * will give CC/UA the next command.
5530 */
5531 if (!res->resetting_device)
5532 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5533 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5534 if (!ipr_is_naca_model(res))
5535 res->needs_sync_complete = 1;
1da177e4
LT
5536 break;
5537 case IPR_IOASC_HW_DEV_BUS_STATUS:
5538 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5539 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5540 if (!ipr_get_autosense(ipr_cmd)) {
5541 if (!ipr_is_naca_model(res)) {
5542 ipr_erp_cancel_all(ipr_cmd);
5543 return;
5544 }
5545 }
1da177e4 5546 }
ee0a90fa
BK
5547 if (!ipr_is_naca_model(res))
5548 res->needs_sync_complete = 1;
1da177e4
LT
5549 break;
5550 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5551 break;
5552 default:
5b7304fb
BK
5553 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5554 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5555 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5556 res->needs_sync_complete = 1;
5557 break;
5558 }
5559
63015bc9 5560 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5561 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5562 scsi_cmd->scsi_done(scsi_cmd);
5563}
5564
5565/**
5566 * ipr_scsi_done - mid-layer done function
5567 * @ipr_cmd: ipr command struct
5568 *
5569 * This function is invoked by the interrupt handler for
5570 * ops generated by the SCSI mid-layer
5571 *
5572 * Return value:
5573 * none
5574 **/
5575static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5576{
5577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5578 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5579 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 5580
96d21f00 5581 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5582
5583 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5584 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5585 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5586 scsi_cmd->scsi_done(scsi_cmd);
5587 } else
5588 ipr_erp_start(ioa_cfg, ipr_cmd);
5589}
5590
1da177e4
LT
5591/**
5592 * ipr_queuecommand - Queue a mid-layer request
5593 * @scsi_cmd: scsi command struct
5594 * @done: done function
5595 *
5596 * This function queues a request generated by the mid-layer.
5597 *
5598 * Return value:
5599 * 0 on success
5600 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5601 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5602 **/
5603static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5604 void (*done) (struct scsi_cmnd *))
5605{
5606 struct ipr_ioa_cfg *ioa_cfg;
5607 struct ipr_resource_entry *res;
5608 struct ipr_ioarcb *ioarcb;
5609 struct ipr_cmnd *ipr_cmd;
5610 int rc = 0;
5611
5612 scsi_cmd->scsi_done = done;
5613 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5614 res = scsi_cmd->device->hostdata;
5615 scsi_cmd->result = (DID_OK << 16);
5616
5617 /*
5618 * We are currently blocking all devices due to a host reset
5619 * We have told the host to stop giving us new requests, but
5620 * ERP ops don't count. FIXME
5621 */
5622 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5623 return SCSI_MLQUEUE_HOST_BUSY;
5624
5625 /*
5626 * FIXME - Create scsi_set_host_offline interface
5627 * and the ioa_is_dead check can be removed
5628 */
5629 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5630 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5631 scsi_cmd->result = (DID_NO_CONNECT << 16);
5632 scsi_cmd->scsi_done(scsi_cmd);
5633 return 0;
5634 }
5635
35a39691
BK
5636 if (ipr_is_gata(res) && res->sata_port)
5637 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5638
1da177e4
LT
5639 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5640 ioarcb = &ipr_cmd->ioarcb;
5641 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5642
5643 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5644 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5645 ioarcb->res_handle = res->res_handle;
1da177e4 5646 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5647 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5648
5649 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5650 if (scsi_cmd->underflow == 0)
5651 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5652
5653 if (res->needs_sync_complete) {
5654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5655 res->needs_sync_complete = 0;
5656 }
5657
5658 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5659 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5660 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5661 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5662 }
5663
5664 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5665 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5666 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5667
a32c055f
WB
5668 if (likely(rc == 0)) {
5669 if (ioa_cfg->sis64)
5670 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5671 else
5672 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5673 }
1da177e4
LT
5674
5675 if (likely(rc == 0)) {
5676 mb();
a32c055f 5677 ipr_send_command(ipr_cmd);
1da177e4
LT
5678 } else {
5679 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5680 return SCSI_MLQUEUE_HOST_BUSY;
5681 }
5682
5683 return 0;
5684}
5685
35a39691
BK
5686/**
5687 * ipr_ioctl - IOCTL handler
5688 * @sdev: scsi device struct
5689 * @cmd: IOCTL cmd
5690 * @arg: IOCTL arg
5691 *
5692 * Return value:
5693 * 0 on success / other on failure
5694 **/
bd705f2d 5695static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5696{
5697 struct ipr_resource_entry *res;
5698
5699 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5700 if (res && ipr_is_gata(res)) {
5701 if (cmd == HDIO_GET_IDENTITY)
5702 return -ENOTTY;
94be9a58 5703 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5704 }
35a39691
BK
5705
5706 return -EINVAL;
5707}
5708
1da177e4
LT
5709/**
5710 * ipr_info - Get information about the card/driver
5711 * @scsi_host: scsi host struct
5712 *
5713 * Return value:
5714 * pointer to buffer with description string
5715 **/
5716static const char * ipr_ioa_info(struct Scsi_Host *host)
5717{
5718 static char buffer[512];
5719 struct ipr_ioa_cfg *ioa_cfg;
5720 unsigned long lock_flags = 0;
5721
5722 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5723
5724 spin_lock_irqsave(host->host_lock, lock_flags);
5725 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5726 spin_unlock_irqrestore(host->host_lock, lock_flags);
5727
5728 return buffer;
5729}
5730
5731static struct scsi_host_template driver_template = {
5732 .module = THIS_MODULE,
5733 .name = "IPR",
5734 .info = ipr_ioa_info,
35a39691 5735 .ioctl = ipr_ioctl,
1da177e4
LT
5736 .queuecommand = ipr_queuecommand,
5737 .eh_abort_handler = ipr_eh_abort,
5738 .eh_device_reset_handler = ipr_eh_dev_reset,
5739 .eh_host_reset_handler = ipr_eh_host_reset,
5740 .slave_alloc = ipr_slave_alloc,
5741 .slave_configure = ipr_slave_configure,
5742 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5743 .target_alloc = ipr_target_alloc,
5744 .target_destroy = ipr_target_destroy,
1da177e4
LT
5745 .change_queue_depth = ipr_change_queue_depth,
5746 .change_queue_type = ipr_change_queue_type,
5747 .bios_param = ipr_biosparam,
5748 .can_queue = IPR_MAX_COMMANDS,
5749 .this_id = -1,
5750 .sg_tablesize = IPR_MAX_SGLIST,
5751 .max_sectors = IPR_IOA_MAX_SECTORS,
5752 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5753 .use_clustering = ENABLE_CLUSTERING,
5754 .shost_attrs = ipr_ioa_attrs,
5755 .sdev_attrs = ipr_dev_attrs,
5756 .proc_name = IPR_NAME
5757};
5758
35a39691
BK
5759/**
5760 * ipr_ata_phy_reset - libata phy_reset handler
5761 * @ap: ata port to reset
5762 *
5763 **/
5764static void ipr_ata_phy_reset(struct ata_port *ap)
5765{
5766 unsigned long flags;
5767 struct ipr_sata_port *sata_port = ap->private_data;
5768 struct ipr_resource_entry *res = sata_port->res;
5769 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5770 int rc;
5771
5772 ENTER;
5773 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5774 while(ioa_cfg->in_reset_reload) {
5775 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5776 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5778 }
5779
5780 if (!ioa_cfg->allow_cmds)
5781 goto out_unlock;
5782
5783 rc = ipr_device_reset(ioa_cfg, res);
5784
5785 if (rc) {
3e4ec344 5786 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5787 goto out_unlock;
5788 }
5789
3e7ebdfa
WB
5790 ap->link.device[0].class = res->ata_class;
5791 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 5792 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5793
5794out_unlock:
5795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5796 LEAVE;
5797}
5798
5799/**
5800 * ipr_ata_post_internal - Cleanup after an internal command
5801 * @qc: ATA queued command
5802 *
5803 * Return value:
5804 * none
5805 **/
5806static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5807{
5808 struct ipr_sata_port *sata_port = qc->ap->private_data;
5809 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5810 struct ipr_cmnd *ipr_cmd;
5811 unsigned long flags;
5812
5813 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5814 while(ioa_cfg->in_reset_reload) {
5815 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5816 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5817 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5818 }
5819
35a39691
BK
5820 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5821 if (ipr_cmd->qc == qc) {
5822 ipr_device_reset(ioa_cfg, sata_port->res);
5823 break;
5824 }
5825 }
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5827}
5828
35a39691
BK
5829/**
5830 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5831 * @regs: destination
5832 * @tf: source ATA taskfile
5833 *
5834 * Return value:
5835 * none
5836 **/
5837static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5838 struct ata_taskfile *tf)
5839{
5840 regs->feature = tf->feature;
5841 regs->nsect = tf->nsect;
5842 regs->lbal = tf->lbal;
5843 regs->lbam = tf->lbam;
5844 regs->lbah = tf->lbah;
5845 regs->device = tf->device;
5846 regs->command = tf->command;
5847 regs->hob_feature = tf->hob_feature;
5848 regs->hob_nsect = tf->hob_nsect;
5849 regs->hob_lbal = tf->hob_lbal;
5850 regs->hob_lbam = tf->hob_lbam;
5851 regs->hob_lbah = tf->hob_lbah;
5852 regs->ctl = tf->ctl;
5853}
5854
5855/**
5856 * ipr_sata_done - done function for SATA commands
5857 * @ipr_cmd: ipr command struct
5858 *
5859 * This function is invoked by the interrupt handler for
5860 * ops generated by the SCSI mid-layer to SATA devices
5861 *
5862 * Return value:
5863 * none
5864 **/
5865static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5866{
5867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5868 struct ata_queued_cmd *qc = ipr_cmd->qc;
5869 struct ipr_sata_port *sata_port = qc->ap->private_data;
5870 struct ipr_resource_entry *res = sata_port->res;
96d21f00 5871 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 5872
96d21f00
WB
5873 if (ipr_cmd->ioa_cfg->sis64)
5874 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5875 sizeof(struct ipr_ioasa_gata));
5876 else
5877 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5878 sizeof(struct ipr_ioasa_gata));
35a39691
BK
5879 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5880
96d21f00 5881 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5882 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5883
5884 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 5885 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 5886 else
96d21f00 5887 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
35a39691
BK
5888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5889 ata_qc_complete(qc);
5890}
5891
a32c055f
WB
5892/**
5893 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5894 * @ipr_cmd: ipr command struct
5895 * @qc: ATA queued command
5896 *
5897 **/
5898static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5899 struct ata_queued_cmd *qc)
5900{
5901 u32 ioadl_flags = 0;
5902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5903 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5904 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5905 int len = qc->nbytes;
5906 struct scatterlist *sg;
5907 unsigned int si;
5908 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5909
5910 if (len == 0)
5911 return;
5912
5913 if (qc->dma_dir == DMA_TO_DEVICE) {
5914 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5916 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5917 ioadl_flags = IPR_IOADL_FLAGS_READ;
5918
5919 ioarcb->data_transfer_length = cpu_to_be32(len);
5920 ioarcb->ioadl_len =
5921 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5922 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5923 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5924
5925 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5926 ioadl64->flags = cpu_to_be32(ioadl_flags);
5927 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5928 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5929
5930 last_ioadl64 = ioadl64;
5931 ioadl64++;
5932 }
5933
5934 if (likely(last_ioadl64))
5935 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5936}
5937
35a39691
BK
5938/**
5939 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5940 * @ipr_cmd: ipr command struct
5941 * @qc: ATA queued command
5942 *
5943 **/
5944static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5945 struct ata_queued_cmd *qc)
5946{
5947 u32 ioadl_flags = 0;
5948 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5949 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5950 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5951 int len = qc->nbytes;
35a39691 5952 struct scatterlist *sg;
ff2aeb1e 5953 unsigned int si;
35a39691
BK
5954
5955 if (len == 0)
5956 return;
5957
5958 if (qc->dma_dir == DMA_TO_DEVICE) {
5959 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5960 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5961 ioarcb->data_transfer_length = cpu_to_be32(len);
5962 ioarcb->ioadl_len =
35a39691
BK
5963 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5964 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_READ;
5966 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5967 ioarcb->read_ioadl_len =
5968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5969 }
5970
ff2aeb1e 5971 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5972 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5973 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5974
5975 last_ioadl = ioadl;
5976 ioadl++;
35a39691 5977 }
3be6cbd7
JG
5978
5979 if (likely(last_ioadl))
5980 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5981}
5982
5983/**
5984 * ipr_qc_issue - Issue a SATA qc to a device
5985 * @qc: queued command
5986 *
5987 * Return value:
5988 * 0 if success
5989 **/
5990static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5991{
5992 struct ata_port *ap = qc->ap;
5993 struct ipr_sata_port *sata_port = ap->private_data;
5994 struct ipr_resource_entry *res = sata_port->res;
5995 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5996 struct ipr_cmnd *ipr_cmd;
5997 struct ipr_ioarcb *ioarcb;
5998 struct ipr_ioarcb_ata_regs *regs;
5999
6000 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 6001 return AC_ERR_SYSTEM;
35a39691
BK
6002
6003 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6004 ioarcb = &ipr_cmd->ioarcb;
35a39691 6005
a32c055f
WB
6006 if (ioa_cfg->sis64) {
6007 regs = &ipr_cmd->i.ata_ioadl.regs;
6008 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6009 } else
6010 regs = &ioarcb->u.add_data.u.regs;
6011
6012 memset(regs, 0, sizeof(*regs));
6013 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
6014
6015 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6016 ipr_cmd->qc = qc;
6017 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6018 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6019 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6020 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6021 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6022 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6023
a32c055f
WB
6024 if (ioa_cfg->sis64)
6025 ipr_build_ata_ioadl64(ipr_cmd, qc);
6026 else
6027 ipr_build_ata_ioadl(ipr_cmd, qc);
6028
35a39691
BK
6029 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6030 ipr_copy_sata_tf(regs, &qc->tf);
6031 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6032 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6033
6034 switch (qc->tf.protocol) {
6035 case ATA_PROT_NODATA:
6036 case ATA_PROT_PIO:
6037 break;
6038
6039 case ATA_PROT_DMA:
6040 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6041 break;
6042
0dc36888
TH
6043 case ATAPI_PROT_PIO:
6044 case ATAPI_PROT_NODATA:
35a39691
BK
6045 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6046 break;
6047
0dc36888 6048 case ATAPI_PROT_DMA:
35a39691
BK
6049 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6050 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6051 break;
6052
6053 default:
6054 WARN_ON(1);
0feeed82 6055 return AC_ERR_INVALID;
35a39691
BK
6056 }
6057
6058 mb();
a32c055f
WB
6059
6060 ipr_send_command(ipr_cmd);
6061
35a39691
BK
6062 return 0;
6063}
6064
4c9bf4e7
TH
6065/**
6066 * ipr_qc_fill_rtf - Read result TF
6067 * @qc: ATA queued command
6068 *
6069 * Return value:
6070 * true
6071 **/
6072static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6073{
6074 struct ipr_sata_port *sata_port = qc->ap->private_data;
6075 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6076 struct ata_taskfile *tf = &qc->result_tf;
6077
6078 tf->feature = g->error;
6079 tf->nsect = g->nsect;
6080 tf->lbal = g->lbal;
6081 tf->lbam = g->lbam;
6082 tf->lbah = g->lbah;
6083 tf->device = g->device;
6084 tf->command = g->status;
6085 tf->hob_nsect = g->hob_nsect;
6086 tf->hob_lbal = g->hob_lbal;
6087 tf->hob_lbam = g->hob_lbam;
6088 tf->hob_lbah = g->hob_lbah;
6089 tf->ctl = g->alt_status;
6090
6091 return true;
6092}
6093
35a39691 6094static struct ata_port_operations ipr_sata_ops = {
35a39691 6095 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6096 .hardreset = ipr_sata_reset,
35a39691 6097 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6098 .qc_prep = ata_noop_qc_prep,
6099 .qc_issue = ipr_qc_issue,
4c9bf4e7 6100 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6101 .port_start = ata_sas_port_start,
6102 .port_stop = ata_sas_port_stop
6103};
6104
6105static struct ata_port_info sata_port_info = {
6106 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6107 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6108 .pio_mask = 0x10, /* pio4 */
6109 .mwdma_mask = 0x07,
6110 .udma_mask = 0x7f, /* udma0-6 */
6111 .port_ops = &ipr_sata_ops
6112};
6113
1da177e4
LT
6114#ifdef CONFIG_PPC_PSERIES
6115static const u16 ipr_blocked_processors[] = {
6116 PV_NORTHSTAR,
6117 PV_PULSAR,
6118 PV_POWER4,
6119 PV_ICESTAR,
6120 PV_SSTAR,
6121 PV_POWER4p,
6122 PV_630,
6123 PV_630p
6124};
6125
6126/**
6127 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6128 * @ioa_cfg: ioa cfg struct
6129 *
6130 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6131 * certain pSeries hardware. This function determines if the given
6132 * adapter is in one of these confgurations or not.
6133 *
6134 * Return value:
6135 * 1 if adapter is not supported / 0 if adapter is supported
6136 **/
6137static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6138{
1da177e4
LT
6139 int i;
6140
44c10138
AK
6141 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6142 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6143 if (__is_processor(ipr_blocked_processors[i]))
6144 return 1;
1da177e4
LT
6145 }
6146 }
6147 return 0;
6148}
6149#else
6150#define ipr_invalid_adapter(ioa_cfg) 0
6151#endif
6152
6153/**
6154 * ipr_ioa_bringdown_done - IOA bring down completion.
6155 * @ipr_cmd: ipr command struct
6156 *
6157 * This function processes the completion of an adapter bring down.
6158 * It wakes any reset sleepers.
6159 *
6160 * Return value:
6161 * IPR_RC_JOB_RETURN
6162 **/
6163static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6164{
6165 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6166
6167 ENTER;
6168 ioa_cfg->in_reset_reload = 0;
6169 ioa_cfg->reset_retries = 0;
6170 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6171 wake_up_all(&ioa_cfg->reset_wait_q);
6172
6173 spin_unlock_irq(ioa_cfg->host->host_lock);
6174 scsi_unblock_requests(ioa_cfg->host);
6175 spin_lock_irq(ioa_cfg->host->host_lock);
6176 LEAVE;
6177
6178 return IPR_RC_JOB_RETURN;
6179}
6180
6181/**
6182 * ipr_ioa_reset_done - IOA reset completion.
6183 * @ipr_cmd: ipr command struct
6184 *
6185 * This function processes the completion of an adapter reset.
6186 * It schedules any necessary mid-layer add/removes and
6187 * wakes any reset sleepers.
6188 *
6189 * Return value:
6190 * IPR_RC_JOB_RETURN
6191 **/
6192static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6193{
6194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6195 struct ipr_resource_entry *res;
6196 struct ipr_hostrcb *hostrcb, *temp;
6197 int i = 0;
6198
6199 ENTER;
6200 ioa_cfg->in_reset_reload = 0;
6201 ioa_cfg->allow_cmds = 1;
6202 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6203 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6204
6205 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6206 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6207 ipr_trace;
6208 break;
6209 }
6210 }
6211 schedule_work(&ioa_cfg->work_q);
6212
6213 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6214 list_del(&hostrcb->queue);
6215 if (i++ < IPR_NUM_LOG_HCAMS)
6216 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6217 else
6218 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6219 }
6220
6bb04170 6221 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6222 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6223
6224 ioa_cfg->reset_retries = 0;
6225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6226 wake_up_all(&ioa_cfg->reset_wait_q);
6227
30237853 6228 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6229 scsi_unblock_requests(ioa_cfg->host);
30237853 6230 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6231
6232 if (!ioa_cfg->allow_cmds)
6233 scsi_block_requests(ioa_cfg->host);
6234
6235 LEAVE;
6236 return IPR_RC_JOB_RETURN;
6237}
6238
6239/**
6240 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6241 * @supported_dev: supported device struct
6242 * @vpids: vendor product id struct
6243 *
6244 * Return value:
6245 * none
6246 **/
6247static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6248 struct ipr_std_inq_vpids *vpids)
6249{
6250 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6251 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6252 supported_dev->num_records = 1;
6253 supported_dev->data_length =
6254 cpu_to_be16(sizeof(struct ipr_supported_device));
6255 supported_dev->reserved = 0;
6256}
6257
6258/**
6259 * ipr_set_supported_devs - Send Set Supported Devices for a device
6260 * @ipr_cmd: ipr command struct
6261 *
a32c055f 6262 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6263 *
6264 * Return value:
6265 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6266 **/
6267static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6268{
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6271 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6272 struct ipr_resource_entry *res = ipr_cmd->u.res;
6273
6274 ipr_cmd->job_step = ipr_ioa_reset_done;
6275
6276 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6277 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6278 continue;
6279
6280 ipr_cmd->u.res = res;
3e7ebdfa 6281 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6282
6283 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6284 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6285 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6286
6287 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6288 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6289 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6290 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6291
a32c055f
WB
6292 ipr_init_ioadl(ipr_cmd,
6293 ioa_cfg->vpd_cbs_dma +
6294 offsetof(struct ipr_misc_cbs, supp_dev),
6295 sizeof(struct ipr_supported_device),
6296 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6297
6298 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6299 IPR_SET_SUP_DEVICE_TIMEOUT);
6300
3e7ebdfa
WB
6301 if (!ioa_cfg->sis64)
6302 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6303 return IPR_RC_JOB_RETURN;
6304 }
6305
6306 return IPR_RC_JOB_CONTINUE;
6307}
6308
6309/**
6310 * ipr_get_mode_page - Locate specified mode page
6311 * @mode_pages: mode page buffer
6312 * @page_code: page code to find
6313 * @len: minimum required length for mode page
6314 *
6315 * Return value:
6316 * pointer to mode page / NULL on failure
6317 **/
6318static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6319 u32 page_code, u32 len)
6320{
6321 struct ipr_mode_page_hdr *mode_hdr;
6322 u32 page_length;
6323 u32 length;
6324
6325 if (!mode_pages || (mode_pages->hdr.length == 0))
6326 return NULL;
6327
6328 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6329 mode_hdr = (struct ipr_mode_page_hdr *)
6330 (mode_pages->data + mode_pages->hdr.block_desc_len);
6331
6332 while (length) {
6333 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6334 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6335 return mode_hdr;
6336 break;
6337 } else {
6338 page_length = (sizeof(struct ipr_mode_page_hdr) +
6339 mode_hdr->page_length);
6340 length -= page_length;
6341 mode_hdr = (struct ipr_mode_page_hdr *)
6342 ((unsigned long)mode_hdr + page_length);
6343 }
6344 }
6345 return NULL;
6346}
6347
6348/**
6349 * ipr_check_term_power - Check for term power errors
6350 * @ioa_cfg: ioa config struct
6351 * @mode_pages: IOAFP mode pages buffer
6352 *
6353 * Check the IOAFP's mode page 28 for term power errors
6354 *
6355 * Return value:
6356 * nothing
6357 **/
6358static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6359 struct ipr_mode_pages *mode_pages)
6360{
6361 int i;
6362 int entry_length;
6363 struct ipr_dev_bus_entry *bus;
6364 struct ipr_mode_page28 *mode_page;
6365
6366 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6367 sizeof(struct ipr_mode_page28));
6368
6369 entry_length = mode_page->entry_length;
6370
6371 bus = mode_page->bus;
6372
6373 for (i = 0; i < mode_page->num_entries; i++) {
6374 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6375 dev_err(&ioa_cfg->pdev->dev,
6376 "Term power is absent on scsi bus %d\n",
6377 bus->res_addr.bus);
6378 }
6379
6380 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6381 }
6382}
6383
6384/**
6385 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6386 * @ioa_cfg: ioa config struct
6387 *
6388 * Looks through the config table checking for SES devices. If
6389 * the SES device is in the SES table indicating a maximum SCSI
6390 * bus speed, the speed is limited for the bus.
6391 *
6392 * Return value:
6393 * none
6394 **/
6395static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6396{
6397 u32 max_xfer_rate;
6398 int i;
6399
6400 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6401 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6402 ioa_cfg->bus_attr[i].bus_width);
6403
6404 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6405 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6406 }
6407}
6408
6409/**
6410 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6411 * @ioa_cfg: ioa config struct
6412 * @mode_pages: mode page 28 buffer
6413 *
6414 * Updates mode page 28 based on driver configuration
6415 *
6416 * Return value:
6417 * none
6418 **/
6419static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6420 struct ipr_mode_pages *mode_pages)
6421{
6422 int i, entry_length;
6423 struct ipr_dev_bus_entry *bus;
6424 struct ipr_bus_attributes *bus_attr;
6425 struct ipr_mode_page28 *mode_page;
6426
6427 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6428 sizeof(struct ipr_mode_page28));
6429
6430 entry_length = mode_page->entry_length;
6431
6432 /* Loop for each device bus entry */
6433 for (i = 0, bus = mode_page->bus;
6434 i < mode_page->num_entries;
6435 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6436 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6437 dev_err(&ioa_cfg->pdev->dev,
6438 "Invalid resource address reported: 0x%08X\n",
6439 IPR_GET_PHYS_LOC(bus->res_addr));
6440 continue;
6441 }
6442
6443 bus_attr = &ioa_cfg->bus_attr[i];
6444 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6445 bus->bus_width = bus_attr->bus_width;
6446 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6447 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6448 if (bus_attr->qas_enabled)
6449 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6450 else
6451 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6452 }
6453}
6454
6455/**
6456 * ipr_build_mode_select - Build a mode select command
6457 * @ipr_cmd: ipr command struct
6458 * @res_handle: resource handle to send command to
6459 * @parm: Byte 2 of Mode Sense command
6460 * @dma_addr: DMA buffer address
6461 * @xfer_len: data transfer length
6462 *
6463 * Return value:
6464 * none
6465 **/
6466static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6467 __be32 res_handle, u8 parm,
6468 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6469{
1da177e4
LT
6470 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6471
6472 ioarcb->res_handle = res_handle;
6473 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6474 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6475 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6476 ioarcb->cmd_pkt.cdb[1] = parm;
6477 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6478
a32c055f 6479 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6480}
6481
6482/**
6483 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6484 * @ipr_cmd: ipr command struct
6485 *
6486 * This function sets up the SCSI bus attributes and sends
6487 * a Mode Select for Page 28 to activate them.
6488 *
6489 * Return value:
6490 * IPR_RC_JOB_RETURN
6491 **/
6492static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6493{
6494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6495 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6496 int length;
6497
6498 ENTER;
4733804c
BK
6499 ipr_scsi_bus_speed_limit(ioa_cfg);
6500 ipr_check_term_power(ioa_cfg, mode_pages);
6501 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6502 length = mode_pages->hdr.length + 1;
6503 mode_pages->hdr.length = 0;
1da177e4
LT
6504
6505 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6506 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6507 length);
6508
f72919ec
WB
6509 ipr_cmd->job_step = ipr_set_supported_devs;
6510 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6511 struct ipr_resource_entry, queue);
1da177e4
LT
6512 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6513
6514 LEAVE;
6515 return IPR_RC_JOB_RETURN;
6516}
6517
6518/**
6519 * ipr_build_mode_sense - Builds a mode sense command
6520 * @ipr_cmd: ipr command struct
6521 * @res: resource entry struct
6522 * @parm: Byte 2 of mode sense command
6523 * @dma_addr: DMA address of mode sense buffer
6524 * @xfer_len: Size of DMA buffer
6525 *
6526 * Return value:
6527 * none
6528 **/
6529static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6530 __be32 res_handle,
a32c055f 6531 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6532{
1da177e4
LT
6533 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6534
6535 ioarcb->res_handle = res_handle;
6536 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6537 ioarcb->cmd_pkt.cdb[2] = parm;
6538 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6539 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6540
a32c055f 6541 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6542}
6543
dfed823e
BK
6544/**
6545 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6546 * @ipr_cmd: ipr command struct
6547 *
6548 * This function handles the failure of an IOA bringup command.
6549 *
6550 * Return value:
6551 * IPR_RC_JOB_RETURN
6552 **/
6553static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6554{
6555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6556 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6557
6558 dev_err(&ioa_cfg->pdev->dev,
6559 "0x%02X failed with IOASC: 0x%08X\n",
6560 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6561
6562 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6563 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6564 return IPR_RC_JOB_RETURN;
6565}
6566
6567/**
6568 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6569 * @ipr_cmd: ipr command struct
6570 *
6571 * This function handles the failure of a Mode Sense to the IOAFP.
6572 * Some adapters do not handle all mode pages.
6573 *
6574 * Return value:
6575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6576 **/
6577static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6578{
f72919ec 6579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6580 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6581
6582 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6583 ipr_cmd->job_step = ipr_set_supported_devs;
6584 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6585 struct ipr_resource_entry, queue);
dfed823e
BK
6586 return IPR_RC_JOB_CONTINUE;
6587 }
6588
6589 return ipr_reset_cmd_failed(ipr_cmd);
6590}
6591
1da177e4
LT
6592/**
6593 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6594 * @ipr_cmd: ipr command struct
6595 *
6596 * This function send a Page 28 mode sense to the IOA to
6597 * retrieve SCSI bus attributes.
6598 *
6599 * Return value:
6600 * IPR_RC_JOB_RETURN
6601 **/
6602static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6603{
6604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6605
6606 ENTER;
6607 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6608 0x28, ioa_cfg->vpd_cbs_dma +
6609 offsetof(struct ipr_misc_cbs, mode_pages),
6610 sizeof(struct ipr_mode_pages));
6611
6612 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6613 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6614
6615 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6616
6617 LEAVE;
6618 return IPR_RC_JOB_RETURN;
6619}
6620
ac09c349
BK
6621/**
6622 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6623 * @ipr_cmd: ipr command struct
6624 *
6625 * This function enables dual IOA RAID support if possible.
6626 *
6627 * Return value:
6628 * IPR_RC_JOB_RETURN
6629 **/
6630static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6631{
6632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6633 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6634 struct ipr_mode_page24 *mode_page;
6635 int length;
6636
6637 ENTER;
6638 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6639 sizeof(struct ipr_mode_page24));
6640
6641 if (mode_page)
6642 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6643
6644 length = mode_pages->hdr.length + 1;
6645 mode_pages->hdr.length = 0;
6646
6647 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6648 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6649 length);
6650
6651 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6652 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6653
6654 LEAVE;
6655 return IPR_RC_JOB_RETURN;
6656}
6657
6658/**
6659 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6660 * @ipr_cmd: ipr command struct
6661 *
6662 * This function handles the failure of a Mode Sense to the IOAFP.
6663 * Some adapters do not handle all mode pages.
6664 *
6665 * Return value:
6666 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6667 **/
6668static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6669{
96d21f00 6670 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
6671
6672 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6673 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6674 return IPR_RC_JOB_CONTINUE;
6675 }
6676
6677 return ipr_reset_cmd_failed(ipr_cmd);
6678}
6679
6680/**
6681 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6682 * @ipr_cmd: ipr command struct
6683 *
6684 * This function send a mode sense to the IOA to retrieve
6685 * the IOA Advanced Function Control mode page.
6686 *
6687 * Return value:
6688 * IPR_RC_JOB_RETURN
6689 **/
6690static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6691{
6692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6693
6694 ENTER;
6695 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6696 0x24, ioa_cfg->vpd_cbs_dma +
6697 offsetof(struct ipr_misc_cbs, mode_pages),
6698 sizeof(struct ipr_mode_pages));
6699
6700 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6701 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6702
6703 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6704
6705 LEAVE;
6706 return IPR_RC_JOB_RETURN;
6707}
6708
1da177e4
LT
6709/**
6710 * ipr_init_res_table - Initialize the resource table
6711 * @ipr_cmd: ipr command struct
6712 *
6713 * This function looks through the existing resource table, comparing
6714 * it with the config table. This function will take care of old/new
6715 * devices and schedule adding/removing them from the mid-layer
6716 * as appropriate.
6717 *
6718 * Return value:
6719 * IPR_RC_JOB_CONTINUE
6720 **/
6721static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6722{
6723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6724 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6725 struct ipr_config_table_entry_wrapper cfgtew;
6726 int entries, found, flag, i;
1da177e4
LT
6727 LIST_HEAD(old_res);
6728
6729 ENTER;
3e7ebdfa
WB
6730 if (ioa_cfg->sis64)
6731 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6732 else
6733 flag = ioa_cfg->u.cfg_table->hdr.flags;
6734
6735 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6736 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6737
6738 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6739 list_move_tail(&res->queue, &old_res);
6740
3e7ebdfa 6741 if (ioa_cfg->sis64)
438b0331 6742 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
6743 else
6744 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6745
6746 for (i = 0; i < entries; i++) {
6747 if (ioa_cfg->sis64)
6748 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6749 else
6750 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6751 found = 0;
6752
6753 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6754 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6755 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6756 found = 1;
6757 break;
6758 }
6759 }
6760
6761 if (!found) {
6762 if (list_empty(&ioa_cfg->free_res_q)) {
6763 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6764 break;
6765 }
6766
6767 found = 1;
6768 res = list_entry(ioa_cfg->free_res_q.next,
6769 struct ipr_resource_entry, queue);
6770 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6771 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
6772 res->add_to_ml = 1;
6773 }
6774
6775 if (found)
3e7ebdfa 6776 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6777 }
6778
6779 list_for_each_entry_safe(res, temp, &old_res, queue) {
6780 if (res->sdev) {
6781 res->del_from_ml = 1;
3e7ebdfa 6782 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6783 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6784 }
6785 }
6786
3e7ebdfa
WB
6787 list_for_each_entry_safe(res, temp, &old_res, queue) {
6788 ipr_clear_res_target(res);
6789 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6790 }
6791
ac09c349
BK
6792 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6793 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6794 else
6795 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6796
6797 LEAVE;
6798 return IPR_RC_JOB_CONTINUE;
6799}
6800
6801/**
6802 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6803 * @ipr_cmd: ipr command struct
6804 *
6805 * This function sends a Query IOA Configuration command
6806 * to the adapter to retrieve the IOA configuration table.
6807 *
6808 * Return value:
6809 * IPR_RC_JOB_RETURN
6810 **/
6811static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6812{
6813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6814 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6815 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6816 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6817
6818 ENTER;
ac09c349
BK
6819 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6820 ioa_cfg->dual_raid = 1;
1da177e4
LT
6821 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6822 ucode_vpd->major_release, ucode_vpd->card_type,
6823 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6824 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6825 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6826
6827 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 6828 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
6829 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6830 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6831
3e7ebdfa 6832 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6833 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6834
6835 ipr_cmd->job_step = ipr_init_res_table;
6836
6837 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6838
6839 LEAVE;
6840 return IPR_RC_JOB_RETURN;
6841}
6842
6843/**
6844 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6845 * @ipr_cmd: ipr command struct
6846 *
6847 * This utility function sends an inquiry to the adapter.
6848 *
6849 * Return value:
6850 * none
6851 **/
6852static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6853 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6854{
6855 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6856
6857 ENTER;
6858 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6859 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6860
6861 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6862 ioarcb->cmd_pkt.cdb[1] = flags;
6863 ioarcb->cmd_pkt.cdb[2] = page;
6864 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6865
a32c055f 6866 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6867
6868 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6869 LEAVE;
6870}
6871
62275040
BK
6872/**
6873 * ipr_inquiry_page_supported - Is the given inquiry page supported
6874 * @page0: inquiry page 0 buffer
6875 * @page: page code.
6876 *
6877 * This function determines if the specified inquiry page is supported.
6878 *
6879 * Return value:
6880 * 1 if page is supported / 0 if not
6881 **/
6882static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6883{
6884 int i;
6885
6886 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6887 if (page0->page[i] == page)
6888 return 1;
6889
6890 return 0;
6891}
6892
ac09c349
BK
6893/**
6894 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6895 * @ipr_cmd: ipr command struct
6896 *
6897 * This function sends a Page 0xD0 inquiry to the adapter
6898 * to retrieve adapter capabilities.
6899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6907 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6908
6909 ENTER;
6910 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6911 memset(cap, 0, sizeof(*cap));
6912
6913 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6914 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6915 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6916 sizeof(struct ipr_inquiry_cap));
6917 return IPR_RC_JOB_RETURN;
6918 }
6919
6920 LEAVE;
6921 return IPR_RC_JOB_CONTINUE;
6922}
6923
1da177e4
LT
6924/**
6925 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6926 * @ipr_cmd: ipr command struct
6927 *
6928 * This function sends a Page 3 inquiry to the adapter
6929 * to retrieve software VPD information.
6930 *
6931 * Return value:
6932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6933 **/
6934static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
6935{
6936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
6937
6938 ENTER;
6939
ac09c349 6940 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
6941
6942 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6943 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6944 sizeof(struct ipr_inquiry_page3));
6945
6946 LEAVE;
6947 return IPR_RC_JOB_RETURN;
6948}
6949
6950/**
6951 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6952 * @ipr_cmd: ipr command struct
6953 *
6954 * This function sends a Page 0 inquiry to the adapter
6955 * to retrieve supported inquiry pages.
6956 *
6957 * Return value:
6958 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6959 **/
6960static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6961{
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963 char type[5];
6964
6965 ENTER;
6966
6967 /* Grab the type out of the VPD and store it away */
6968 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6969 type[4] = '\0';
6970 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6971
62275040 6972 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6973
62275040
BK
6974 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6975 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6976 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6977
6978 LEAVE;
6979 return IPR_RC_JOB_RETURN;
6980}
6981
6982/**
6983 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6984 * @ipr_cmd: ipr command struct
6985 *
6986 * This function sends a standard inquiry to the adapter.
6987 *
6988 * Return value:
6989 * IPR_RC_JOB_RETURN
6990 **/
6991static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6992{
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6994
6995 ENTER;
62275040 6996 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6997
6998 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6999 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7000 sizeof(struct ipr_ioa_vpd));
7001
7002 LEAVE;
7003 return IPR_RC_JOB_RETURN;
7004}
7005
7006/**
214777ba 7007 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7008 * @ipr_cmd: ipr command struct
7009 *
7010 * This function send an Identify Host Request Response Queue
7011 * command to establish the HRRQ with the adapter.
7012 *
7013 * Return value:
7014 * IPR_RC_JOB_RETURN
7015 **/
214777ba 7016static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7017{
7018 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7019 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7020
7021 ENTER;
7022 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7023
7024 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7025 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7026
7027 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
7028 if (ioa_cfg->sis64)
7029 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 7030 ioarcb->cmd_pkt.cdb[2] =
214777ba 7031 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 7032 ioarcb->cmd_pkt.cdb[3] =
214777ba 7033 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 7034 ioarcb->cmd_pkt.cdb[4] =
214777ba 7035 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 7036 ioarcb->cmd_pkt.cdb[5] =
214777ba 7037 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7038 ioarcb->cmd_pkt.cdb[7] =
7039 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7040 ioarcb->cmd_pkt.cdb[8] =
7041 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7042
214777ba
WB
7043 if (ioa_cfg->sis64) {
7044 ioarcb->cmd_pkt.cdb[10] =
7045 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7046 ioarcb->cmd_pkt.cdb[11] =
7047 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7048 ioarcb->cmd_pkt.cdb[12] =
7049 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7050 ioarcb->cmd_pkt.cdb[13] =
7051 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7052 }
7053
1da177e4
LT
7054 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7055
7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7057
7058 LEAVE;
7059 return IPR_RC_JOB_RETURN;
7060}
7061
7062/**
7063 * ipr_reset_timer_done - Adapter reset timer function
7064 * @ipr_cmd: ipr command struct
7065 *
7066 * Description: This function is used in adapter reset processing
7067 * for timing events. If the reset_cmd pointer in the IOA
7068 * config struct is not this adapter's we are doing nested
7069 * resets and fail_all_ops will take care of freeing the
7070 * command block.
7071 *
7072 * Return value:
7073 * none
7074 **/
7075static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7076{
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 unsigned long lock_flags = 0;
7079
7080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7081
7082 if (ioa_cfg->reset_cmd == ipr_cmd) {
7083 list_del(&ipr_cmd->queue);
7084 ipr_cmd->done(ipr_cmd);
7085 }
7086
7087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7088}
7089
7090/**
7091 * ipr_reset_start_timer - Start a timer for adapter reset job
7092 * @ipr_cmd: ipr command struct
7093 * @timeout: timeout value
7094 *
7095 * Description: This function is used in adapter reset processing
7096 * for timing events. If the reset_cmd pointer in the IOA
7097 * config struct is not this adapter's we are doing nested
7098 * resets and fail_all_ops will take care of freeing the
7099 * command block.
7100 *
7101 * Return value:
7102 * none
7103 **/
7104static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7105 unsigned long timeout)
7106{
7107 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7108 ipr_cmd->done = ipr_reset_ioa_job;
7109
7110 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7111 ipr_cmd->timer.expires = jiffies + timeout;
7112 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7113 add_timer(&ipr_cmd->timer);
7114}
7115
7116/**
7117 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7118 * @ioa_cfg: ioa cfg struct
7119 *
7120 * Return value:
7121 * nothing
7122 **/
7123static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7124{
7125 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7126
7127 /* Initialize Host RRQ pointers */
7128 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7129 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7130 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7131 ioa_cfg->toggle_bit = 1;
7132
7133 /* Zero out config table */
3e7ebdfa 7134 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7135}
7136
214777ba
WB
7137/**
7138 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7139 * @ipr_cmd: ipr command struct
7140 *
7141 * Return value:
7142 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7145{
7146 unsigned long stage, stage_time;
7147 u32 feedback;
7148 volatile u32 int_reg;
7149 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7150 u64 maskval = 0;
7151
7152 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7153 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7154 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7155
7156 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7157
7158 /* sanity check the stage_time value */
438b0331
WB
7159 if (stage_time == 0)
7160 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7161 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7162 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7163 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7164 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7165
7166 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7167 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7168 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7169 stage_time = ioa_cfg->transop_timeout;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7172 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7173 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7174 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7175 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7177 return IPR_RC_JOB_CONTINUE;
7178 }
7179
7180 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7181 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7182 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7183 ipr_cmd->done = ipr_reset_ioa_job;
7184 add_timer(&ipr_cmd->timer);
7185 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7186
7187 return IPR_RC_JOB_RETURN;
7188}
7189
1da177e4
LT
7190/**
7191 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7192 * @ipr_cmd: ipr command struct
7193 *
7194 * This function reinitializes some control blocks and
7195 * enables destructive diagnostics on the adapter.
7196 *
7197 * Return value:
7198 * IPR_RC_JOB_RETURN
7199 **/
7200static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7201{
7202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7203 volatile u32 int_reg;
7be96900 7204 volatile u64 maskval;
1da177e4
LT
7205
7206 ENTER;
214777ba 7207 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7208 ipr_init_ioa_mem(ioa_cfg);
7209
7210 ioa_cfg->allow_interrupts = 1;
7be96900 7211 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7212
7213 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7214 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7215 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7216 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7217 return IPR_RC_JOB_CONTINUE;
7218 }
7219
7220 /* Enable destructive diagnostics on IOA */
214777ba
WB
7221 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7222
7be96900
WB
7223 if (ioa_cfg->sis64) {
7224 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7225 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7226 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7227 } else
7228 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7229
1da177e4
LT
7230 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7231
7232 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7233
214777ba
WB
7234 if (ioa_cfg->sis64) {
7235 ipr_cmd->job_step = ipr_reset_next_stage;
7236 return IPR_RC_JOB_CONTINUE;
7237 }
7238
1da177e4 7239 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7240 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7241 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7242 ipr_cmd->done = ipr_reset_ioa_job;
7243 add_timer(&ipr_cmd->timer);
7244 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7245
7246 LEAVE;
7247 return IPR_RC_JOB_RETURN;
7248}
7249
7250/**
7251 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7252 * @ipr_cmd: ipr command struct
7253 *
7254 * This function is invoked when an adapter dump has run out
7255 * of processing time.
7256 *
7257 * Return value:
7258 * IPR_RC_JOB_CONTINUE
7259 **/
7260static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7261{
7262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7263
7264 if (ioa_cfg->sdt_state == GET_DUMP)
7265 ioa_cfg->sdt_state = ABORT_DUMP;
7266
7267 ipr_cmd->job_step = ipr_reset_alert;
7268
7269 return IPR_RC_JOB_CONTINUE;
7270}
7271
7272/**
7273 * ipr_unit_check_no_data - Log a unit check/no data error log
7274 * @ioa_cfg: ioa config struct
7275 *
7276 * Logs an error indicating the adapter unit checked, but for some
7277 * reason, we were unable to fetch the unit check buffer.
7278 *
7279 * Return value:
7280 * nothing
7281 **/
7282static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7283{
7284 ioa_cfg->errors_logged++;
7285 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7286}
7287
7288/**
7289 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7290 * @ioa_cfg: ioa config struct
7291 *
7292 * Fetches the unit check buffer from the adapter by clocking the data
7293 * through the mailbox register.
7294 *
7295 * Return value:
7296 * nothing
7297 **/
7298static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7299{
7300 unsigned long mailbox;
7301 struct ipr_hostrcb *hostrcb;
7302 struct ipr_uc_sdt sdt;
7303 int rc, length;
65f56475 7304 u32 ioasc;
1da177e4
LT
7305
7306 mailbox = readl(ioa_cfg->ioa_mailbox);
7307
dcbad00e 7308 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7309 ipr_unit_check_no_data(ioa_cfg);
7310 return;
7311 }
7312
7313 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7314 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7315 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7316
dcbad00e
WB
7317 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7318 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7319 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7320 ipr_unit_check_no_data(ioa_cfg);
7321 return;
7322 }
7323
7324 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7325 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7326 length = be32_to_cpu(sdt.entry[0].end_token);
7327 else
7328 length = (be32_to_cpu(sdt.entry[0].end_token) -
7329 be32_to_cpu(sdt.entry[0].start_token)) &
7330 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7331
7332 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7333 struct ipr_hostrcb, queue);
7334 list_del(&hostrcb->queue);
7335 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7336
7337 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7338 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7339 (__be32 *)&hostrcb->hcam,
7340 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7341
65f56475 7342 if (!rc) {
1da177e4 7343 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7344 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7345 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7346 ioa_cfg->sdt_state == GET_DUMP)
7347 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7348 } else
1da177e4
LT
7349 ipr_unit_check_no_data(ioa_cfg);
7350
7351 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7352}
7353
7354/**
7355 * ipr_reset_restore_cfg_space - Restore PCI config space.
7356 * @ipr_cmd: ipr command struct
7357 *
7358 * Description: This function restores the saved PCI config space of
7359 * the adapter, fails all outstanding ops back to the callers, and
7360 * fetches the dump/unit check if applicable to this reset.
7361 *
7362 * Return value:
7363 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7364 **/
7365static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7366{
7367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7368 int rc;
7369
7370 ENTER;
99c965dd 7371 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7372 rc = pci_restore_state(ioa_cfg->pdev);
7373
7374 if (rc != PCIBIOS_SUCCESSFUL) {
96d21f00 7375 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7376 return IPR_RC_JOB_CONTINUE;
7377 }
7378
7379 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7380 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7381 return IPR_RC_JOB_CONTINUE;
7382 }
7383
7384 ipr_fail_all_ops(ioa_cfg);
7385
7386 if (ioa_cfg->ioa_unit_checked) {
7387 ioa_cfg->ioa_unit_checked = 0;
7388 ipr_get_unit_check_buffer(ioa_cfg);
7389 ipr_cmd->job_step = ipr_reset_alert;
7390 ipr_reset_start_timer(ipr_cmd, 0);
7391 return IPR_RC_JOB_RETURN;
7392 }
7393
7394 if (ioa_cfg->in_ioa_bringdown) {
7395 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7396 } else {
7397 ipr_cmd->job_step = ipr_reset_enable_ioa;
7398
7399 if (GET_DUMP == ioa_cfg->sdt_state) {
7400 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7401 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7402 schedule_work(&ioa_cfg->work_q);
7403 return IPR_RC_JOB_RETURN;
7404 }
7405 }
7406
438b0331 7407 LEAVE;
1da177e4
LT
7408 return IPR_RC_JOB_CONTINUE;
7409}
7410
e619e1a7
BK
7411/**
7412 * ipr_reset_bist_done - BIST has completed on the adapter.
7413 * @ipr_cmd: ipr command struct
7414 *
7415 * Description: Unblock config space and resume the reset process.
7416 *
7417 * Return value:
7418 * IPR_RC_JOB_CONTINUE
7419 **/
7420static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7421{
7422 ENTER;
7423 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7424 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7425 LEAVE;
7426 return IPR_RC_JOB_CONTINUE;
7427}
7428
1da177e4
LT
7429/**
7430 * ipr_reset_start_bist - Run BIST on the adapter.
7431 * @ipr_cmd: ipr command struct
7432 *
7433 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7434 *
7435 * Return value:
7436 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7437 **/
7438static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7439{
7440 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7441 int rc;
7442
7443 ENTER;
b30197d2 7444 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
7445 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7446
7447 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 7448 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
96d21f00 7449 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7450 rc = IPR_RC_JOB_CONTINUE;
7451 } else {
e619e1a7 7452 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7453 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7454 rc = IPR_RC_JOB_RETURN;
7455 }
7456
7457 LEAVE;
7458 return rc;
7459}
7460
463fc696
BK
7461/**
7462 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7463 * @ipr_cmd: ipr command struct
7464 *
7465 * Description: This clears PCI reset to the adapter and delays two seconds.
7466 *
7467 * Return value:
7468 * IPR_RC_JOB_RETURN
7469 **/
7470static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7471{
7472 ENTER;
7473 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7474 ipr_cmd->job_step = ipr_reset_bist_done;
7475 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7476 LEAVE;
7477 return IPR_RC_JOB_RETURN;
7478}
7479
7480/**
7481 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7482 * @ipr_cmd: ipr command struct
7483 *
7484 * Description: This asserts PCI reset to the adapter.
7485 *
7486 * Return value:
7487 * IPR_RC_JOB_RETURN
7488 **/
7489static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7490{
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 struct pci_dev *pdev = ioa_cfg->pdev;
7493
7494 ENTER;
7495 pci_block_user_cfg_access(pdev);
7496 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7497 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7498 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7499 LEAVE;
7500 return IPR_RC_JOB_RETURN;
7501}
7502
1da177e4
LT
7503/**
7504 * ipr_reset_allowed - Query whether or not IOA can be reset
7505 * @ioa_cfg: ioa config struct
7506 *
7507 * Return value:
7508 * 0 if reset not allowed / non-zero if reset is allowed
7509 **/
7510static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7511{
7512 volatile u32 temp_reg;
7513
7514 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7515 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7516}
7517
7518/**
7519 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7520 * @ipr_cmd: ipr command struct
7521 *
7522 * Description: This function waits for adapter permission to run BIST,
7523 * then runs BIST. If the adapter does not give permission after a
7524 * reasonable time, we will reset the adapter anyway. The impact of
7525 * resetting the adapter without warning the adapter is the risk of
7526 * losing the persistent error log on the adapter. If the adapter is
7527 * reset while it is writing to the flash on the adapter, the flash
7528 * segment will have bad ECC and be zeroed.
7529 *
7530 * Return value:
7531 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7532 **/
7533static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7534{
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536 int rc = IPR_RC_JOB_RETURN;
7537
7538 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7539 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7540 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7541 } else {
463fc696 7542 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7543 rc = IPR_RC_JOB_CONTINUE;
7544 }
7545
7546 return rc;
7547}
7548
7549/**
7550 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7551 * @ipr_cmd: ipr command struct
7552 *
7553 * Description: This function alerts the adapter that it will be reset.
7554 * If memory space is not currently enabled, proceed directly
7555 * to running BIST on the adapter. The timer must always be started
7556 * so we guarantee we do not run BIST from ipr_isr.
7557 *
7558 * Return value:
7559 * IPR_RC_JOB_RETURN
7560 **/
7561static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7562{
7563 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564 u16 cmd_reg;
7565 int rc;
7566
7567 ENTER;
7568 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7569
7570 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7571 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7572 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7573 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7574 } else {
463fc696 7575 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7576 }
7577
7578 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7579 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7580
7581 LEAVE;
7582 return IPR_RC_JOB_RETURN;
7583}
7584
7585/**
7586 * ipr_reset_ucode_download_done - Microcode download completion
7587 * @ipr_cmd: ipr command struct
7588 *
7589 * Description: This function unmaps the microcode download buffer.
7590 *
7591 * Return value:
7592 * IPR_RC_JOB_CONTINUE
7593 **/
7594static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7595{
7596 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7598
7599 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7600 sglist->num_sg, DMA_TO_DEVICE);
7601
7602 ipr_cmd->job_step = ipr_reset_alert;
7603 return IPR_RC_JOB_CONTINUE;
7604}
7605
7606/**
7607 * ipr_reset_ucode_download - Download microcode to the adapter
7608 * @ipr_cmd: ipr command struct
7609 *
7610 * Description: This function checks to see if it there is microcode
7611 * to download to the adapter. If there is, a download is performed.
7612 *
7613 * Return value:
7614 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7615 **/
7616static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7617{
7618 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7619 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7620
7621 ENTER;
7622 ipr_cmd->job_step = ipr_reset_alert;
7623
7624 if (!sglist)
7625 return IPR_RC_JOB_CONTINUE;
7626
7627 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7628 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7629 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7630 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7631 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7632 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7633 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7634
a32c055f
WB
7635 if (ioa_cfg->sis64)
7636 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7637 else
7638 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7639 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7640
7641 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7642 IPR_WRITE_BUFFER_TIMEOUT);
7643
7644 LEAVE;
7645 return IPR_RC_JOB_RETURN;
7646}
7647
7648/**
7649 * ipr_reset_shutdown_ioa - Shutdown the adapter
7650 * @ipr_cmd: ipr command struct
7651 *
7652 * Description: This function issues an adapter shutdown of the
7653 * specified type to the specified adapter as part of the
7654 * adapter reset job.
7655 *
7656 * Return value:
7657 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7658 **/
7659static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7660{
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7663 unsigned long timeout;
7664 int rc = IPR_RC_JOB_CONTINUE;
7665
7666 ENTER;
7667 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7668 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7669 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7670 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7671 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7672
ac09c349
BK
7673 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7674 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7675 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7676 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7677 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7678 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7679 else
ac09c349 7680 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7681
7682 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7683
7684 rc = IPR_RC_JOB_RETURN;
7685 ipr_cmd->job_step = ipr_reset_ucode_download;
7686 } else
7687 ipr_cmd->job_step = ipr_reset_alert;
7688
7689 LEAVE;
7690 return rc;
7691}
7692
7693/**
7694 * ipr_reset_ioa_job - Adapter reset job
7695 * @ipr_cmd: ipr command struct
7696 *
7697 * Description: This function is the job router for the adapter reset job.
7698 *
7699 * Return value:
7700 * none
7701 **/
7702static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7703{
7704 u32 rc, ioasc;
1da177e4
LT
7705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7706
7707 do {
96d21f00 7708 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
7709
7710 if (ioa_cfg->reset_cmd != ipr_cmd) {
7711 /*
7712 * We are doing nested adapter resets and this is
7713 * not the current reset job.
7714 */
7715 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7716 return;
7717 }
7718
7719 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
7720 rc = ipr_cmd->job_step_failed(ipr_cmd);
7721 if (rc == IPR_RC_JOB_RETURN)
7722 return;
1da177e4
LT
7723 }
7724
7725 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7726 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7727 rc = ipr_cmd->job_step(ipr_cmd);
7728 } while(rc == IPR_RC_JOB_CONTINUE);
7729}
7730
7731/**
7732 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7733 * @ioa_cfg: ioa config struct
7734 * @job_step: first job step of reset job
7735 * @shutdown_type: shutdown type
7736 *
7737 * Description: This function will initiate the reset of the given adapter
7738 * starting at the selected job step.
7739 * If the caller needs to wait on the completion of the reset,
7740 * the caller must sleep on the reset_wait_q.
7741 *
7742 * Return value:
7743 * none
7744 **/
7745static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7746 int (*job_step) (struct ipr_cmnd *),
7747 enum ipr_shutdown_type shutdown_type)
7748{
7749 struct ipr_cmnd *ipr_cmd;
7750
7751 ioa_cfg->in_reset_reload = 1;
7752 ioa_cfg->allow_cmds = 0;
7753 scsi_block_requests(ioa_cfg->host);
7754
7755 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7756 ioa_cfg->reset_cmd = ipr_cmd;
7757 ipr_cmd->job_step = job_step;
7758 ipr_cmd->u.shutdown_type = shutdown_type;
7759
7760 ipr_reset_ioa_job(ipr_cmd);
7761}
7762
7763/**
7764 * ipr_initiate_ioa_reset - Initiate an adapter reset
7765 * @ioa_cfg: ioa config struct
7766 * @shutdown_type: shutdown type
7767 *
7768 * Description: This function will initiate the reset of the given adapter.
7769 * If the caller needs to wait on the completion of the reset,
7770 * the caller must sleep on the reset_wait_q.
7771 *
7772 * Return value:
7773 * none
7774 **/
7775static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7776 enum ipr_shutdown_type shutdown_type)
7777{
7778 if (ioa_cfg->ioa_is_dead)
7779 return;
7780
7781 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7782 ioa_cfg->sdt_state = ABORT_DUMP;
7783
7784 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7785 dev_err(&ioa_cfg->pdev->dev,
7786 "IOA taken offline - error recovery failed\n");
7787
7788 ioa_cfg->reset_retries = 0;
7789 ioa_cfg->ioa_is_dead = 1;
7790
7791 if (ioa_cfg->in_ioa_bringdown) {
7792 ioa_cfg->reset_cmd = NULL;
7793 ioa_cfg->in_reset_reload = 0;
7794 ipr_fail_all_ops(ioa_cfg);
7795 wake_up_all(&ioa_cfg->reset_wait_q);
7796
7797 spin_unlock_irq(ioa_cfg->host->host_lock);
7798 scsi_unblock_requests(ioa_cfg->host);
7799 spin_lock_irq(ioa_cfg->host->host_lock);
7800 return;
7801 } else {
7802 ioa_cfg->in_ioa_bringdown = 1;
7803 shutdown_type = IPR_SHUTDOWN_NONE;
7804 }
7805 }
7806
7807 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7808 shutdown_type);
7809}
7810
f8a88b19
LV
7811/**
7812 * ipr_reset_freeze - Hold off all I/O activity
7813 * @ipr_cmd: ipr command struct
7814 *
7815 * Description: If the PCI slot is frozen, hold off all I/O
7816 * activity; then, as soon as the slot is available again,
7817 * initiate an adapter reset.
7818 */
7819static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7820{
7821 /* Disallow new interrupts, avoid loop */
7822 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7823 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7824 ipr_cmd->done = ipr_reset_ioa_job;
7825 return IPR_RC_JOB_RETURN;
7826}
7827
7828/**
7829 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7830 * @pdev: PCI device struct
7831 *
7832 * Description: This routine is called to tell us that the PCI bus
7833 * is down. Can't do anything here, except put the device driver
7834 * into a holding pattern, waiting for the PCI bus to come back.
7835 */
7836static void ipr_pci_frozen(struct pci_dev *pdev)
7837{
7838 unsigned long flags = 0;
7839 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7840
7841 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7842 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7844}
7845
7846/**
7847 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7848 * @pdev: PCI device struct
7849 *
7850 * Description: This routine is called by the pci error recovery
7851 * code after the PCI slot has been reset, just before we
7852 * should resume normal operations.
7853 */
7854static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7855{
7856 unsigned long flags = 0;
7857 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7858
7859 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7860 if (ioa_cfg->needs_warm_reset)
7861 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7862 else
7863 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7864 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7865 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7866 return PCI_ERS_RESULT_RECOVERED;
7867}
7868
7869/**
7870 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7871 * @pdev: PCI device struct
7872 *
7873 * Description: This routine is called when the PCI bus has
7874 * permanently failed.
7875 */
7876static void ipr_pci_perm_failure(struct pci_dev *pdev)
7877{
7878 unsigned long flags = 0;
7879 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7880
7881 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7882 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7883 ioa_cfg->sdt_state = ABORT_DUMP;
7884 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7885 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7886 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7887 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7889}
7890
7891/**
7892 * ipr_pci_error_detected - Called when a PCI error is detected.
7893 * @pdev: PCI device struct
7894 * @state: PCI channel state
7895 *
7896 * Description: Called when a PCI error is detected.
7897 *
7898 * Return value:
7899 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7900 */
7901static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7902 pci_channel_state_t state)
7903{
7904 switch (state) {
7905 case pci_channel_io_frozen:
7906 ipr_pci_frozen(pdev);
7907 return PCI_ERS_RESULT_NEED_RESET;
7908 case pci_channel_io_perm_failure:
7909 ipr_pci_perm_failure(pdev);
7910 return PCI_ERS_RESULT_DISCONNECT;
7911 break;
7912 default:
7913 break;
7914 }
7915 return PCI_ERS_RESULT_NEED_RESET;
7916}
7917
1da177e4
LT
7918/**
7919 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7920 * @ioa_cfg: ioa cfg struct
7921 *
7922 * Description: This is the second phase of adapter intialization
7923 * This function takes care of initilizing the adapter to the point
7924 * where it can accept new commands.
7925
7926 * Return value:
b1c11812 7927 * 0 on success / -EIO on failure
1da177e4
LT
7928 **/
7929static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7930{
7931 int rc = 0;
7932 unsigned long host_lock_flags = 0;
7933
7934 ENTER;
7935 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7936 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
7937 if (ioa_cfg->needs_hard_reset) {
7938 ioa_cfg->needs_hard_reset = 0;
7939 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7940 } else
7941 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7942 IPR_SHUTDOWN_NONE);
1da177e4
LT
7943
7944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7946 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7947
7948 if (ioa_cfg->ioa_is_dead) {
7949 rc = -EIO;
7950 } else if (ipr_invalid_adapter(ioa_cfg)) {
7951 if (!ipr_testmode)
7952 rc = -EIO;
7953
7954 dev_err(&ioa_cfg->pdev->dev,
7955 "Adapter not supported in this hardware configuration.\n");
7956 }
7957
7958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7959
7960 LEAVE;
7961 return rc;
7962}
7963
7964/**
7965 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7966 * @ioa_cfg: ioa config struct
7967 *
7968 * Return value:
7969 * none
7970 **/
7971static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7972{
7973 int i;
7974
7975 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7976 if (ioa_cfg->ipr_cmnd_list[i])
7977 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7978 ioa_cfg->ipr_cmnd_list[i],
7979 ioa_cfg->ipr_cmnd_list_dma[i]);
7980
7981 ioa_cfg->ipr_cmnd_list[i] = NULL;
7982 }
7983
7984 if (ioa_cfg->ipr_cmd_pool)
7985 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7986
7987 ioa_cfg->ipr_cmd_pool = NULL;
7988}
7989
7990/**
7991 * ipr_free_mem - Frees memory allocated for an adapter
7992 * @ioa_cfg: ioa cfg struct
7993 *
7994 * Return value:
7995 * nothing
7996 **/
7997static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7998{
7999 int i;
8000
8001 kfree(ioa_cfg->res_entries);
8002 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8003 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8004 ipr_free_cmd_blks(ioa_cfg);
8005 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8006 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
8007 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8008 ioa_cfg->u.cfg_table,
1da177e4
LT
8009 ioa_cfg->cfg_table_dma);
8010
8011 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8012 pci_free_consistent(ioa_cfg->pdev,
8013 sizeof(struct ipr_hostrcb),
8014 ioa_cfg->hostrcb[i],
8015 ioa_cfg->hostrcb_dma[i]);
8016 }
8017
8018 ipr_free_dump(ioa_cfg);
1da177e4
LT
8019 kfree(ioa_cfg->trace);
8020}
8021
8022/**
8023 * ipr_free_all_resources - Free all allocated resources for an adapter.
8024 * @ipr_cmd: ipr command struct
8025 *
8026 * This function frees all allocated resources for the
8027 * specified adapter.
8028 *
8029 * Return value:
8030 * none
8031 **/
8032static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8033{
8034 struct pci_dev *pdev = ioa_cfg->pdev;
8035
8036 ENTER;
8037 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 8038 pci_disable_msi(pdev);
1da177e4
LT
8039 iounmap(ioa_cfg->hdw_dma_regs);
8040 pci_release_regions(pdev);
8041 ipr_free_mem(ioa_cfg);
8042 scsi_host_put(ioa_cfg->host);
8043 pci_disable_device(pdev);
8044 LEAVE;
8045}
8046
8047/**
8048 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8049 * @ioa_cfg: ioa config struct
8050 *
8051 * Return value:
8052 * 0 on success / -ENOMEM on allocation failure
8053 **/
8054static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8055{
8056 struct ipr_cmnd *ipr_cmd;
8057 struct ipr_ioarcb *ioarcb;
8058 dma_addr_t dma_addr;
8059 int i;
8060
8061 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8062 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8063
8064 if (!ioa_cfg->ipr_cmd_pool)
8065 return -ENOMEM;
8066
8067 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8068 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8069
8070 if (!ipr_cmd) {
8071 ipr_free_cmd_blks(ioa_cfg);
8072 return -ENOMEM;
8073 }
8074
8075 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8076 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8077 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8078
8079 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8080 ipr_cmd->dma_addr = dma_addr;
8081 if (ioa_cfg->sis64)
8082 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8083 else
8084 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8085
1da177e4 8086 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8087 if (ioa_cfg->sis64) {
8088 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8089 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8090 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8091 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8092 } else {
8093 ioarcb->write_ioadl_addr =
8094 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8095 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8096 ioarcb->ioasa_host_pci_addr =
96d21f00 8097 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8098 }
1da177e4
LT
8099 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8100 ipr_cmd->cmd_index = i;
8101 ipr_cmd->ioa_cfg = ioa_cfg;
8102 ipr_cmd->sense_buffer_dma = dma_addr +
8103 offsetof(struct ipr_cmnd, sense_buffer);
8104
8105 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8106 }
8107
8108 return 0;
8109}
8110
8111/**
8112 * ipr_alloc_mem - Allocate memory for an adapter
8113 * @ioa_cfg: ioa config struct
8114 *
8115 * Return value:
8116 * 0 on success / non-zero for error
8117 **/
8118static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8119{
8120 struct pci_dev *pdev = ioa_cfg->pdev;
8121 int i, rc = -ENOMEM;
8122
8123 ENTER;
0bc42e35 8124 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8125 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8126
8127 if (!ioa_cfg->res_entries)
8128 goto out;
8129
3e7ebdfa
WB
8130 if (ioa_cfg->sis64) {
8131 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8132 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8133 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8134 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8135 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8136 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8137 }
8138
8139 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8140 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8141 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8142 }
1da177e4
LT
8143
8144 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8145 sizeof(struct ipr_misc_cbs),
8146 &ioa_cfg->vpd_cbs_dma);
8147
8148 if (!ioa_cfg->vpd_cbs)
8149 goto out_free_res_entries;
8150
8151 if (ipr_alloc_cmd_blks(ioa_cfg))
8152 goto out_free_vpd_cbs;
8153
8154 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8155 sizeof(u32) * IPR_NUM_CMD_BLKS,
8156 &ioa_cfg->host_rrq_dma);
8157
8158 if (!ioa_cfg->host_rrq)
8159 goto out_ipr_free_cmd_blocks;
8160
3e7ebdfa
WB
8161 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8162 ioa_cfg->cfg_table_size,
8163 &ioa_cfg->cfg_table_dma);
1da177e4 8164
3e7ebdfa 8165 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8166 goto out_free_host_rrq;
8167
8168 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8169 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8170 sizeof(struct ipr_hostrcb),
8171 &ioa_cfg->hostrcb_dma[i]);
8172
8173 if (!ioa_cfg->hostrcb[i])
8174 goto out_free_hostrcb_dma;
8175
8176 ioa_cfg->hostrcb[i]->hostrcb_dma =
8177 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8178 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8179 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8180 }
8181
0bc42e35 8182 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8183 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8184
8185 if (!ioa_cfg->trace)
8186 goto out_free_hostrcb_dma;
8187
1da177e4
LT
8188 rc = 0;
8189out:
8190 LEAVE;
8191 return rc;
8192
8193out_free_hostrcb_dma:
8194 while (i-- > 0) {
8195 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8196 ioa_cfg->hostrcb[i],
8197 ioa_cfg->hostrcb_dma[i]);
8198 }
3e7ebdfa
WB
8199 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8200 ioa_cfg->u.cfg_table,
8201 ioa_cfg->cfg_table_dma);
1da177e4
LT
8202out_free_host_rrq:
8203 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8204 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8205out_ipr_free_cmd_blocks:
8206 ipr_free_cmd_blks(ioa_cfg);
8207out_free_vpd_cbs:
8208 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8209 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8210out_free_res_entries:
8211 kfree(ioa_cfg->res_entries);
8212 goto out;
8213}
8214
8215/**
8216 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8217 * @ioa_cfg: ioa config struct
8218 *
8219 * Return value:
8220 * none
8221 **/
8222static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8223{
8224 int i;
8225
8226 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8227 ioa_cfg->bus_attr[i].bus = i;
8228 ioa_cfg->bus_attr[i].qas_enabled = 0;
8229 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8230 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8231 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8232 else
8233 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8234 }
8235}
8236
8237/**
8238 * ipr_init_ioa_cfg - Initialize IOA config struct
8239 * @ioa_cfg: ioa config struct
8240 * @host: scsi host struct
8241 * @pdev: PCI dev struct
8242 *
8243 * Return value:
8244 * none
8245 **/
8246static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8247 struct Scsi_Host *host, struct pci_dev *pdev)
8248{
8249 const struct ipr_interrupt_offsets *p;
8250 struct ipr_interrupts *t;
8251 void __iomem *base;
8252
8253 ioa_cfg->host = host;
8254 ioa_cfg->pdev = pdev;
8255 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8256 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8257 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8258 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8259 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8260 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8261 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8262 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8263 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8264 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8265
8266 INIT_LIST_HEAD(&ioa_cfg->free_q);
8267 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8268 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8269 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8270 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8271 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8272 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8273 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8274 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8275 ioa_cfg->sdt_state = INACTIVE;
8276
8277 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8278 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8279
3e7ebdfa
WB
8280 if (ioa_cfg->sis64) {
8281 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8282 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8283 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8284 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8285 } else {
8286 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8287 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8288 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8289 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8290 }
1da177e4
LT
8291 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8292 host->unique_id = host->host_no;
8293 host->max_cmd_len = IPR_MAX_CDB_LEN;
8294 pci_set_drvdata(pdev, ioa_cfg);
8295
8296 p = &ioa_cfg->chip_cfg->regs;
8297 t = &ioa_cfg->regs;
8298 base = ioa_cfg->hdw_dma_regs;
8299
8300 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8301 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8302 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8303 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8304 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8305 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8306 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8307 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8308 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8309 t->ioarrin_reg = base + p->ioarrin_reg;
8310 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8311 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8312 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8313 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8314 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8315 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8316
8317 if (ioa_cfg->sis64) {
214777ba 8318 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8319 t->dump_addr_reg = base + p->dump_addr_reg;
8320 t->dump_data_reg = base + p->dump_data_reg;
8321 }
1da177e4
LT
8322}
8323
8324/**
1be7bd82 8325 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8326 * @dev_id: PCI device id struct
8327 *
8328 * Return value:
1be7bd82 8329 * ptr to chip information on success / NULL on failure
1da177e4 8330 **/
1be7bd82
WB
8331static const struct ipr_chip_t * __devinit
8332ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8333{
8334 int i;
8335
1da177e4
LT
8336 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8337 if (ipr_chip[i].vendor == dev_id->vendor &&
8338 ipr_chip[i].device == dev_id->device)
1be7bd82 8339 return &ipr_chip[i];
1da177e4
LT
8340 return NULL;
8341}
8342
95fecd90
WB
8343/**
8344 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8345 * @pdev: PCI device struct
8346 *
8347 * Description: Simply set the msi_received flag to 1 indicating that
8348 * Message Signaled Interrupts are supported.
8349 *
8350 * Return value:
8351 * 0 on success / non-zero on failure
8352 **/
8353static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8354{
8355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8356 unsigned long lock_flags = 0;
8357 irqreturn_t rc = IRQ_HANDLED;
8358
8359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8360
8361 ioa_cfg->msi_received = 1;
8362 wake_up(&ioa_cfg->msi_wait_q);
8363
8364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8365 return rc;
8366}
8367
8368/**
8369 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8370 * @pdev: PCI device struct
8371 *
8372 * Description: The return value from pci_enable_msi() can not always be
8373 * trusted. This routine sets up and initiates a test interrupt to determine
8374 * if the interrupt is received via the ipr_test_intr() service routine.
8375 * If the tests fails, the driver will fall back to LSI.
8376 *
8377 * Return value:
8378 * 0 on success / non-zero on failure
8379 **/
8380static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8381 struct pci_dev *pdev)
8382{
8383 int rc;
8384 volatile u32 int_reg;
8385 unsigned long lock_flags = 0;
8386
8387 ENTER;
8388
8389 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8390 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8391 ioa_cfg->msi_received = 0;
8392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8393 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8394 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8396
8397 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8398 if (rc) {
8399 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8400 return rc;
8401 } else if (ipr_debug)
8402 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8403
214777ba 8404 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8405 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8406 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8407 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8408
8409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8410 if (!ioa_cfg->msi_received) {
8411 /* MSI test failed */
8412 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8413 rc = -EOPNOTSUPP;
8414 } else if (ipr_debug)
8415 dev_info(&pdev->dev, "MSI test succeeded.\n");
8416
8417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8418
8419 free_irq(pdev->irq, ioa_cfg);
8420
8421 LEAVE;
8422
8423 return rc;
8424}
8425
1da177e4
LT
8426/**
8427 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8428 * @pdev: PCI device struct
8429 * @dev_id: PCI device id struct
8430 *
8431 * Return value:
8432 * 0 on success / non-zero on failure
8433 **/
8434static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8435 const struct pci_device_id *dev_id)
8436{
8437 struct ipr_ioa_cfg *ioa_cfg;
8438 struct Scsi_Host *host;
8439 unsigned long ipr_regs_pci;
8440 void __iomem *ipr_regs;
a2a65a3e 8441 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8442 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8443
8444 ENTER;
8445
8446 if ((rc = pci_enable_device(pdev))) {
8447 dev_err(&pdev->dev, "Cannot enable adapter\n");
8448 goto out;
8449 }
8450
8451 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8452
8453 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8454
8455 if (!host) {
8456 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8457 rc = -ENOMEM;
8458 goto out_disable;
8459 }
8460
8461 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8462 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8463 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8464 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8465
1be7bd82 8466 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8467
1be7bd82 8468 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8469 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8470 dev_id->vendor, dev_id->device);
8471 goto out_scsi_host_put;
8472 }
8473
a32c055f
WB
8474 /* set SIS 32 or SIS 64 */
8475 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8476 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8477
5469cb5b
BK
8478 if (ipr_transop_timeout)
8479 ioa_cfg->transop_timeout = ipr_transop_timeout;
8480 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8481 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8482 else
8483 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8484
44c10138 8485 ioa_cfg->revid = pdev->revision;
463fc696 8486
1da177e4
LT
8487 ipr_regs_pci = pci_resource_start(pdev, 0);
8488
8489 rc = pci_request_regions(pdev, IPR_NAME);
8490 if (rc < 0) {
8491 dev_err(&pdev->dev,
8492 "Couldn't register memory range of registers\n");
8493 goto out_scsi_host_put;
8494 }
8495
25729a7f 8496 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8497
8498 if (!ipr_regs) {
8499 dev_err(&pdev->dev,
8500 "Couldn't map memory range of registers\n");
8501 rc = -ENOMEM;
8502 goto out_release_regions;
8503 }
8504
8505 ioa_cfg->hdw_dma_regs = ipr_regs;
8506 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8507 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8508
8509 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8510
8511 pci_set_master(pdev);
8512
a32c055f
WB
8513 if (ioa_cfg->sis64) {
8514 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8515 if (rc < 0) {
8516 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8518 }
8519
8520 } else
8521 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8522
1da177e4
LT
8523 if (rc < 0) {
8524 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8525 goto cleanup_nomem;
8526 }
8527
8528 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8529 ioa_cfg->chip_cfg->cache_line_size);
8530
8531 if (rc != PCIBIOS_SUCCESSFUL) {
8532 dev_err(&pdev->dev, "Write of cache line size failed\n");
8533 rc = -EIO;
8534 goto cleanup_nomem;
8535 }
8536
95fecd90 8537 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8538 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8539 rc = ipr_test_msi(ioa_cfg, pdev);
8540 if (rc == -EOPNOTSUPP)
8541 pci_disable_msi(pdev);
8542 else if (rc)
8543 goto out_msi_disable;
8544 else
8545 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8546 } else if (ipr_debug)
8547 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8548
1da177e4
LT
8549 /* Save away PCI config space for use following IOA reset */
8550 rc = pci_save_state(pdev);
8551
8552 if (rc != PCIBIOS_SUCCESSFUL) {
8553 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8554 rc = -EIO;
8555 goto cleanup_nomem;
8556 }
8557
8558 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8559 goto cleanup_nomem;
8560
8561 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8562 goto cleanup_nomem;
8563
3e7ebdfa
WB
8564 if (ioa_cfg->sis64)
8565 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8566 + ((sizeof(struct ipr_config_table_entry64)
8567 * ioa_cfg->max_devs_supported)));
8568 else
8569 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8570 + ((sizeof(struct ipr_config_table_entry)
8571 * ioa_cfg->max_devs_supported)));
8572
1da177e4
LT
8573 rc = ipr_alloc_mem(ioa_cfg);
8574 if (rc < 0) {
8575 dev_err(&pdev->dev,
8576 "Couldn't allocate enough memory for device driver!\n");
8577 goto cleanup_nomem;
8578 }
8579
ce155cce
BK
8580 /*
8581 * If HRRQ updated interrupt is not masked, or reset alert is set,
8582 * the card is in an unknown state and needs a hard reset
8583 */
214777ba
WB
8584 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8585 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8586 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
8587 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8588 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8589 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8590 ioa_cfg->needs_hard_reset = 1;
8591 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8592 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8593
1da177e4 8594 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8595 rc = request_irq(pdev->irq, ipr_isr,
8596 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8597 IPR_NAME, ioa_cfg);
1da177e4
LT
8598
8599 if (rc) {
8600 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8601 pdev->irq, rc);
8602 goto cleanup_nolog;
8603 }
8604
463fc696
BK
8605 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8606 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8607 ioa_cfg->needs_warm_reset = 1;
8608 ioa_cfg->reset = ipr_reset_slot_reset;
8609 } else
8610 ioa_cfg->reset = ipr_reset_start_bist;
8611
1da177e4
LT
8612 spin_lock(&ipr_driver_lock);
8613 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8614 spin_unlock(&ipr_driver_lock);
8615
8616 LEAVE;
8617out:
8618 return rc;
8619
8620cleanup_nolog:
8621 ipr_free_mem(ioa_cfg);
8622cleanup_nomem:
8623 iounmap(ipr_regs);
95fecd90
WB
8624out_msi_disable:
8625 pci_disable_msi(pdev);
1da177e4
LT
8626out_release_regions:
8627 pci_release_regions(pdev);
8628out_scsi_host_put:
8629 scsi_host_put(host);
8630out_disable:
8631 pci_disable_device(pdev);
8632 goto out;
8633}
8634
8635/**
8636 * ipr_scan_vsets - Scans for VSET devices
8637 * @ioa_cfg: ioa config struct
8638 *
8639 * Description: Since the VSET resources do not follow SAM in that we can have
8640 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8641 *
8642 * Return value:
8643 * none
8644 **/
8645static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8646{
8647 int target, lun;
8648
8649 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8650 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8651 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8652}
8653
8654/**
8655 * ipr_initiate_ioa_bringdown - Bring down an adapter
8656 * @ioa_cfg: ioa config struct
8657 * @shutdown_type: shutdown type
8658 *
8659 * Description: This function will initiate bringing down the adapter.
8660 * This consists of issuing an IOA shutdown to the adapter
8661 * to flush the cache, and running BIST.
8662 * If the caller needs to wait on the completion of the reset,
8663 * the caller must sleep on the reset_wait_q.
8664 *
8665 * Return value:
8666 * none
8667 **/
8668static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8669 enum ipr_shutdown_type shutdown_type)
8670{
8671 ENTER;
8672 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8673 ioa_cfg->sdt_state = ABORT_DUMP;
8674 ioa_cfg->reset_retries = 0;
8675 ioa_cfg->in_ioa_bringdown = 1;
8676 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8677 LEAVE;
8678}
8679
8680/**
8681 * __ipr_remove - Remove a single adapter
8682 * @pdev: pci device struct
8683 *
8684 * Adapter hot plug remove entry point.
8685 *
8686 * Return value:
8687 * none
8688 **/
8689static void __ipr_remove(struct pci_dev *pdev)
8690{
8691 unsigned long host_lock_flags = 0;
8692 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693 ENTER;
8694
8695 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8696 while(ioa_cfg->in_reset_reload) {
8697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8698 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8699 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8700 }
8701
1da177e4
LT
8702 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8703
8704 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8705 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8706 flush_scheduled_work();
1da177e4
LT
8707 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8708
8709 spin_lock(&ipr_driver_lock);
8710 list_del(&ioa_cfg->queue);
8711 spin_unlock(&ipr_driver_lock);
8712
8713 if (ioa_cfg->sdt_state == ABORT_DUMP)
8714 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8716
8717 ipr_free_all_resources(ioa_cfg);
8718
8719 LEAVE;
8720}
8721
8722/**
8723 * ipr_remove - IOA hot plug remove entry point
8724 * @pdev: pci device struct
8725 *
8726 * Adapter hot plug remove entry point.
8727 *
8728 * Return value:
8729 * none
8730 **/
f381642d 8731static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8732{
8733 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8734
8735 ENTER;
8736
ee959b00 8737 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8738 &ipr_trace_attr);
ee959b00 8739 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8740 &ipr_dump_attr);
8741 scsi_remove_host(ioa_cfg->host);
8742
8743 __ipr_remove(pdev);
8744
8745 LEAVE;
8746}
8747
8748/**
8749 * ipr_probe - Adapter hot plug add entry point
8750 *
8751 * Return value:
8752 * 0 on success / non-zero on failure
8753 **/
8754static int __devinit ipr_probe(struct pci_dev *pdev,
8755 const struct pci_device_id *dev_id)
8756{
8757 struct ipr_ioa_cfg *ioa_cfg;
8758 int rc;
8759
8760 rc = ipr_probe_ioa(pdev, dev_id);
8761
8762 if (rc)
8763 return rc;
8764
8765 ioa_cfg = pci_get_drvdata(pdev);
8766 rc = ipr_probe_ioa_part2(ioa_cfg);
8767
8768 if (rc) {
8769 __ipr_remove(pdev);
8770 return rc;
8771 }
8772
8773 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8774
8775 if (rc) {
8776 __ipr_remove(pdev);
8777 return rc;
8778 }
8779
ee959b00 8780 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8781 &ipr_trace_attr);
8782
8783 if (rc) {
8784 scsi_remove_host(ioa_cfg->host);
8785 __ipr_remove(pdev);
8786 return rc;
8787 }
8788
ee959b00 8789 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8790 &ipr_dump_attr);
8791
8792 if (rc) {
ee959b00 8793 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8794 &ipr_trace_attr);
8795 scsi_remove_host(ioa_cfg->host);
8796 __ipr_remove(pdev);
8797 return rc;
8798 }
8799
8800 scsi_scan_host(ioa_cfg->host);
8801 ipr_scan_vsets(ioa_cfg);
8802 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8803 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8804 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8805 schedule_work(&ioa_cfg->work_q);
8806 return 0;
8807}
8808
8809/**
8810 * ipr_shutdown - Shutdown handler.
d18c3db5 8811 * @pdev: pci device struct
1da177e4
LT
8812 *
8813 * This function is invoked upon system shutdown/reboot. It will issue
8814 * an adapter shutdown to the adapter to flush the write cache.
8815 *
8816 * Return value:
8817 * none
8818 **/
d18c3db5 8819static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8820{
d18c3db5 8821 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8822 unsigned long lock_flags = 0;
8823
8824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8825 while(ioa_cfg->in_reset_reload) {
8826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8827 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8828 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8829 }
8830
1da177e4
LT
8831 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8833 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8834}
8835
8836static struct pci_device_id ipr_pci_table[] __devinitdata = {
8837 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8839 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8841 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8843 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8853 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8854 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8856 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8858 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8859 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8861 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8862 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8863 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8864 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8865 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8866 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8867 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8869 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8870 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8872 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 8875 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8877 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8878 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8879 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8880 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8882 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8884 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8885 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8887 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
8888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8890 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8892 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8902 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8903 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
1da177e4
LT
8904 { }
8905};
8906MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8907
f8a88b19
LV
8908static struct pci_error_handlers ipr_err_handler = {
8909 .error_detected = ipr_pci_error_detected,
8910 .slot_reset = ipr_pci_slot_reset,
8911};
8912
1da177e4
LT
8913static struct pci_driver ipr_driver = {
8914 .name = IPR_NAME,
8915 .id_table = ipr_pci_table,
8916 .probe = ipr_probe,
f381642d 8917 .remove = __devexit_p(ipr_remove),
d18c3db5 8918 .shutdown = ipr_shutdown,
f8a88b19 8919 .err_handler = &ipr_err_handler,
1da177e4
LT
8920};
8921
f72919ec
WB
8922/**
8923 * ipr_halt_done - Shutdown prepare completion
8924 *
8925 * Return value:
8926 * none
8927 **/
8928static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8929{
8930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8931
8932 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8933}
8934
8935/**
8936 * ipr_halt - Issue shutdown prepare to all adapters
8937 *
8938 * Return value:
8939 * NOTIFY_OK on success / NOTIFY_DONE on failure
8940 **/
8941static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8942{
8943 struct ipr_cmnd *ipr_cmd;
8944 struct ipr_ioa_cfg *ioa_cfg;
8945 unsigned long flags = 0;
8946
8947 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8948 return NOTIFY_DONE;
8949
8950 spin_lock(&ipr_driver_lock);
8951
8952 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8953 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8954 if (!ioa_cfg->allow_cmds) {
8955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8956 continue;
8957 }
8958
8959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8960 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8961 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8962 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8963 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8964
8965 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8967 }
8968 spin_unlock(&ipr_driver_lock);
8969
8970 return NOTIFY_OK;
8971}
8972
8973static struct notifier_block ipr_notifier = {
8974 ipr_halt, NULL, 0
8975};
8976
1da177e4
LT
8977/**
8978 * ipr_init - Module entry point
8979 *
8980 * Return value:
8981 * 0 on success / negative value on failure
8982 **/
8983static int __init ipr_init(void)
8984{
8985 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8986 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8987
f72919ec 8988 register_reboot_notifier(&ipr_notifier);
dcbccbde 8989 return pci_register_driver(&ipr_driver);
1da177e4
LT
8990}
8991
8992/**
8993 * ipr_exit - Module unload
8994 *
8995 * Module unload entry point.
8996 *
8997 * Return value:
8998 * none
8999 **/
9000static void __exit ipr_exit(void)
9001{
f72919ec 9002 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9003 pci_unregister_driver(&ipr_driver);
9004}
9005
9006module_init(ipr_init);
9007module_exit(ipr_exit);