]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
Linux 2.6.35-rc3
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
1da177e4
LT
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
35a39691 74#include <linux/libata.h>
0ce3a7e5 75#include <linux/hdreg.h>
f72919ec 76#include <linux/reboot.h>
3e7ebdfa 77#include <linux/stringify.h>
1da177e4
LT
78#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
1da177e4
LT
86#include "ipr.h"
87
88/*
89 * Global Data
90 */
b7d68ca3 91static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
92static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
5469cb5b 96static unsigned int ipr_transop_timeout = 0;
d3c74871 97static unsigned int ipr_debug = 0;
3e7ebdfa 98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 99static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
107 {
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
214777ba 110 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 111 .sense_interrupt_mask_reg = 0x0022C,
214777ba 112 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 113 .clr_interrupt_reg = 0x00228,
214777ba 114 .clr_interrupt_reg32 = 0x00228,
1da177e4 115 .sense_interrupt_reg = 0x00224,
214777ba 116 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
214777ba 119 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 120 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
124 }
125 },
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
129 {
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
214777ba 132 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 133 .sense_interrupt_mask_reg = 0x00288,
214777ba 134 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 135 .clr_interrupt_reg = 0x00284,
214777ba 136 .clr_interrupt_reg32 = 0x00284,
1da177e4 137 .sense_interrupt_reg = 0x00280,
214777ba 138 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
214777ba 141 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 142 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
146 }
147 },
a74c1639
WB
148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
214777ba 154 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 155 .sense_interrupt_mask_reg = 0x00010,
214777ba 156 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 157 .clr_interrupt_reg = 0x00008,
214777ba 158 .clr_interrupt_reg32 = 0x0000C,
a74c1639 159 .sense_interrupt_reg = 0x00000,
214777ba 160 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
214777ba 163 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 164 .set_uproc_interrupt_reg = 0x00020,
214777ba 165 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 166 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
dcbad00e
WB
169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
a74c1639
WB
171 }
172 },
1da177e4
LT
173};
174
175static const struct ipr_chip_t ipr_chip[] = {
a32c055f
WB
176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
d7b4627f
WB
182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
1da177e4
LT
185};
186
187static int ipr_max_bus_speeds [] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
210MODULE_LICENSE("GPL");
211MODULE_VERSION(IPR_DRIVER_VERSION);
212
1da177e4
LT
213/* A constant array of IOASCs/URCs/Error Messages */
214static const
215struct ipr_error_table_t ipr_error_table[] = {
933916f3 216 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
217 "8155: An unknown error was received"},
218 {0x00330000, 0, 0,
219 "Soft underlength error"},
220 {0x005A0000, 0, 0,
221 "Command to be cancelled not found"},
222 {0x00808000, 0, 0,
223 "Qualified success"},
933916f3 224 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 225 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 227 "4101: Soft device bus fabric error"},
5aa3a333
WB
228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 243 "FFF9: Device sector reassign successful"},
933916f3 244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 245 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 246 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 247 "7001: IOA sector reassignment successful"},
933916f3 248 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 249 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 250 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 251 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 252 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 253 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 254 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 255 "FFF6: Device hardware error recovered by the IOA"},
933916f3 256 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF6: Device hardware error recovered by the device"},
933916f3 258 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 260 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FFFA: Undefined device response recovered by the IOA"},
933916f3 262 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF6: Device bus error, message or command phase"},
933916f3 264 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 265 "FFFE: Task Management Function failed"},
933916f3 266 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FFF6: Failure prediction threshold exceeded"},
933916f3 268 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
269 "8009: Impending cache battery pack failure"},
270 {0x02040400, 0, 0,
271 "34FF: Disk device format in progress"},
65f56475
BK
272 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "9070: IOA requested reset"},
1da177e4
LT
274 {0x023F0000, 0, 0,
275 "Synchronization required"},
276 {0x024E0000, 0, 0,
277 "No ready, IOA shutdown"},
278 {0x025A0000, 0, 0,
279 "Not ready, IOA has been shutdown"},
933916f3 280 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
281 "3020: Storage subsystem configuration error"},
282 {0x03110B00, 0, 0,
283 "FFF5: Medium error, data unreadable, recommend reassign"},
284 {0x03110C00, 0, 0,
285 "7000: Medium error, data unreadable, do not reassign"},
933916f3 286 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 287 "FFF3: Disk media format bad"},
933916f3 288 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 289 "3002: Addressed device failed to respond to selection"},
933916f3 290 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 291 "3100: Device bus error"},
933916f3 292 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
293 "3109: IOA timed out a device command"},
294 {0x04088000, 0, 0,
295 "3120: SCSI bus is not operational"},
933916f3 296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 297 "4100: Hard device bus fabric error"},
5aa3a333
WB
298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
933916f3 312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 313 "9000: IOA reserved area data check"},
933916f3 314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "9001: IOA reserved area invalid data pattern"},
933916f3 316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 317 "9002: IOA reserved area LRC error"},
5aa3a333
WB
318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
933916f3 320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 321 "102E: Out of alternate sectors for disk storage"},
933916f3 322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 323 "FFF4: Data transfer underlength error"},
933916f3 324 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 325 "FFF4: Data transfer overlength error"},
933916f3 326 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 327 "3400: Logical unit failure"},
933916f3 328 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 329 "FFF4: Device microcode is corrupt"},
933916f3 330 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
331 "8150: PCI bus error"},
332 {0x04430000, 1, 0,
333 "Unsupported device bus message received"},
933916f3 334 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "FFF4: Disk device problem"},
933916f3 336 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "8150: Permanent IOA failure"},
933916f3 338 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "3010: Disk device returned wrong response to IOA"},
933916f3 340 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
341 "8151: IOA microcode error"},
342 {0x04448500, 0, 0,
343 "Device bus status error"},
933916f3 344 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
346 {0x04448700, 0, 0,
347 "ATA device status error"},
1da177e4
LT
348 {0x04490000, 0, 0,
349 "Message reject received from the device"},
933916f3 350 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "8008: A permanent cache battery pack failure occurred"},
933916f3 352 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "9090: Disk unit has been modified after the last known status"},
933916f3 354 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 355 "9081: IOA detected device error"},
933916f3 356 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 357 "9082: IOA detected device error"},
933916f3 358 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "3110: Device bus error, message or command phase"},
933916f3 360 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 361 "3110: SAS Command / Task Management Function failed"},
933916f3 362 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "9091: Incorrect hardware configuration change has been detected"},
933916f3 364 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 365 "9073: Invalid multi-adapter configuration"},
933916f3 366 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 367 "4010: Incorrect connection between cascaded expanders"},
933916f3 368 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 369 "4020: Connections exceed IOA design limits"},
933916f3 370 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 371 "4030: Incorrect multipath connection"},
933916f3 372 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 373 "4110: Unsupported enclosure function"},
933916f3 374 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
375 "FFF4: Command to logical unit failed"},
376 {0x05240000, 1, 0,
377 "Illegal request, invalid request type or request packet"},
378 {0x05250000, 0, 0,
379 "Illegal request, invalid resource handle"},
b0df54bb
BK
380 {0x05258000, 0, 0,
381 "Illegal request, commands not allowed to this device"},
382 {0x05258100, 0, 0,
383 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
386 {0x05260000, 0, 0,
387 "Illegal request, invalid field in parameter list"},
388 {0x05260100, 0, 0,
389 "Illegal request, parameter not supported"},
390 {0x05260200, 0, 0,
391 "Illegal request, parameter value invalid"},
392 {0x052C0000, 0, 0,
393 "Illegal request, command sequence error"},
b0df54bb
BK
394 {0x052C8000, 1, 0,
395 "Illegal request, dual adapter support not enabled"},
933916f3 396 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 397 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 398 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 399 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 400 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 401 "3140: Device bus not ready to ready transition"},
933916f3 402 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
403 "FFFB: SCSI bus was reset"},
404 {0x06290500, 0, 0,
405 "FFFE: SCSI bus transition to single ended"},
406 {0x06290600, 0, 0,
407 "FFFE: SCSI bus transition to LVD"},
933916f3 408 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 409 "FFFB: SCSI bus was reset by another initiator"},
933916f3 410 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 411 "3029: A device replacement has occurred"},
933916f3 412 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 413 "9051: IOA cache data exists for a missing or failed device"},
933916f3 414 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 415 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 416 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 417 "9025: Disk unit is not supported at its physical location"},
933916f3 418 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 419 "3020: IOA detected a SCSI bus configuration error"},
933916f3 420 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 421 "3150: SCSI bus configuration error"},
933916f3 422 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 423 "9074: Asymmetric advanced function disk configuration"},
933916f3 424 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 425 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 426 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 427 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 428 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 429 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 430 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 431 "9076: Configuration error, missing remote IOA"},
933916f3 432 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 433 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
434 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4070: Logically bad block written on device"},
933916f3 436 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 437 "9041: Array protection temporarily suspended"},
933916f3 438 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 439 "9042: Corrupt array parity detected on specified device"},
933916f3 440 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 441 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 442 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 443 "9071: Link operational transition"},
933916f3 444 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 445 "9072: Link not operational transition"},
933916f3 446 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 447 "9032: Array exposed but still protected"},
e435340c
BK
448 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449 "70DD: Device forced failed by disrupt device command"},
933916f3 450 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 451 "4061: Multipath redundancy level got better"},
933916f3 452 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 453 "4060: Multipath redundancy level got worse"},
1da177e4
LT
454 {0x07270000, 0, 0,
455 "Failure due to other device"},
933916f3 456 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 457 "9008: IOA does not support functions expected by devices"},
933916f3 458 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 459 "9010: Cache data associated with attached devices cannot be found"},
933916f3 460 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 461 "9011: Cache data belongs to devices other than those attached"},
933916f3 462 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 463 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 464 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 465 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 466 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 467 "9022: Exposed array is missing a required device"},
933916f3 468 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 469 "9023: Array member(s) not at required physical locations"},
933916f3 470 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 471 "9024: Array not functional due to present hardware configuration"},
933916f3 472 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 473 "9026: Array not functional due to present hardware configuration"},
933916f3 474 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 475 "9027: Array is missing a device and parity is out of sync"},
933916f3 476 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 477 "9028: Maximum number of arrays already exist"},
933916f3 478 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 479 "9050: Required cache data cannot be located for a disk unit"},
933916f3 480 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9052: Cache data exists for a device that has been modified"},
933916f3 482 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9054: IOA resources not available due to previous problems"},
933916f3 484 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9092: Disk unit requires initialization before use"},
933916f3 486 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9029: Incorrect hardware configuration change has been detected"},
933916f3 488 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9060: One or more disk pairs are missing from an array"},
933916f3 490 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9061: One or more disks are missing from an array"},
933916f3 492 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 493 "9062: One or more disks are missing from an array"},
933916f3 494 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
495 "9063: Maximum number of functional arrays has been exceeded"},
496 {0x0B260000, 0, 0,
497 "Aborted command, invalid descriptor"},
498 {0x0B5A0000, 0, 0,
499 "Command terminated by host"}
500};
501
502static const struct ipr_ses_table_entry ipr_ses_table[] = {
503 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
514 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516};
517
518/*
519 * Function Prototypes
520 */
521static int ipr_reset_alert(struct ipr_cmnd *);
522static void ipr_process_ccn(struct ipr_cmnd *);
523static void ipr_process_error(struct ipr_cmnd *);
524static void ipr_reset_ioa_job(struct ipr_cmnd *);
525static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526 enum ipr_shutdown_type);
527
528#ifdef CONFIG_SCSI_IPR_TRACE
529/**
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd: ipr command struct
532 * @type: trace type
533 * @add_data: additional data
534 *
535 * Return value:
536 * none
537 **/
538static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539 u8 type, u32 add_data)
540{
541 struct ipr_trace_entry *trace_entry;
542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545 trace_entry->time = jiffies;
546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547 trace_entry->type = type;
a32c055f
WB
548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554 trace_entry->u.add_data = add_data;
555}
556#else
557#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558#endif
559
560/**
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd: ipr command struct
563 *
564 * Return value:
565 * none
566 **/
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
573
574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 575 ioarcb->data_transfer_length = 0;
1da177e4 576 ioarcb->read_data_transfer_length = 0;
a32c055f 577 ioarcb->ioadl_len = 0;
1da177e4 578 ioarcb->read_ioadl_len = 0;
a32c055f 579
96d21f00 580 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
583 ioasa64->u.gata.status = 0;
584 } else {
a32c055f
WB
585 ioarcb->write_ioadl_addr =
586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 588 ioasa->u.gata.status = 0;
a32c055f
WB
589 }
590
96d21f00
WB
591 ioasa->hdr.ioasc = 0;
592 ioasa->hdr.residual_data_len = 0;
1da177e4 593 ipr_cmd->scsi_cmd = NULL;
35a39691 594 ipr_cmd->qc = NULL;
1da177e4
LT
595 ipr_cmd->sense_buffer[0] = 0;
596 ipr_cmd->dma_use_sg = 0;
597}
598
599/**
600 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601 * @ipr_cmd: ipr command struct
602 *
603 * Return value:
604 * none
605 **/
606static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
607{
608 ipr_reinit_ipr_cmnd(ipr_cmd);
609 ipr_cmd->u.scratch = 0;
610 ipr_cmd->sibling = NULL;
611 init_timer(&ipr_cmd->timer);
612}
613
614/**
615 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616 * @ioa_cfg: ioa config struct
617 *
618 * Return value:
619 * pointer to ipr command struct
620 **/
621static
622struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
623{
624 struct ipr_cmnd *ipr_cmd;
625
626 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627 list_del(&ipr_cmd->queue);
628 ipr_init_ipr_cmnd(ipr_cmd);
629
630 return ipr_cmd;
631}
632
1da177e4
LT
633/**
634 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635 * @ioa_cfg: ioa config struct
636 * @clr_ints: interrupts to clear
637 *
638 * This function masks all interrupts on the adapter, then clears the
639 * interrupts specified in the mask
640 *
641 * Return value:
642 * none
643 **/
644static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645 u32 clr_ints)
646{
647 volatile u32 int_reg;
648
649 /* Stop new interrupts */
650 ioa_cfg->allow_interrupts = 0;
651
652 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
653 if (ioa_cfg->sis64)
654 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655 else
656 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
657
658 /* Clear any pending interrupts */
214777ba
WB
659 if (ioa_cfg->sis64)
660 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
663}
664
665/**
666 * ipr_save_pcix_cmd_reg - Save PCI-X command register
667 * @ioa_cfg: ioa config struct
668 *
669 * Return value:
670 * 0 on success / -EIO on failure
671 **/
672static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
673{
674 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
675
7dce0e1c
BK
676 if (pcix_cmd_reg == 0)
677 return 0;
1da177e4
LT
678
679 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682 return -EIO;
683 }
684
685 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686 return 0;
687}
688
689/**
690 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691 * @ioa_cfg: ioa config struct
692 *
693 * Return value:
694 * 0 on success / -EIO on failure
695 **/
696static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
697{
698 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
699
700 if (pcix_cmd_reg) {
701 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704 return -EIO;
705 }
1da177e4
LT
706 }
707
708 return 0;
709}
710
35a39691
BK
711/**
712 * ipr_sata_eh_done - done function for aborted SATA commands
713 * @ipr_cmd: ipr command struct
714 *
715 * This function is invoked for ops generated to SATA
716 * devices which are being aborted.
717 *
718 * Return value:
719 * none
720 **/
721static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
722{
723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724 struct ata_queued_cmd *qc = ipr_cmd->qc;
725 struct ipr_sata_port *sata_port = qc->ap->private_data;
726
727 qc->err_mask |= AC_ERR_OTHER;
728 sata_port->ioasa.status |= ATA_BUSY;
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730 ata_qc_complete(qc);
731}
732
1da177e4
LT
733/**
734 * ipr_scsi_eh_done - mid-layer done function for aborted ops
735 * @ipr_cmd: ipr command struct
736 *
737 * This function is invoked by the interrupt handler for
738 * ops generated by the SCSI mid-layer which are being aborted.
739 *
740 * Return value:
741 * none
742 **/
743static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
744{
745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
747
748 scsi_cmd->result |= (DID_ERROR << 16);
749
63015bc9 750 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
751 scsi_cmd->scsi_done(scsi_cmd);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753}
754
755/**
756 * ipr_fail_all_ops - Fails all outstanding ops.
757 * @ioa_cfg: ioa config struct
758 *
759 * This function fails all outstanding ops.
760 *
761 * Return value:
762 * none
763 **/
764static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
765{
766 struct ipr_cmnd *ipr_cmd, *temp;
767
768 ENTER;
769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770 list_del(&ipr_cmd->queue);
771
96d21f00
WB
772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
1da177e4
LT
774
775 if (ipr_cmd->scsi_cmd)
776 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
777 else if (ipr_cmd->qc)
778 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
779
780 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781 del_timer(&ipr_cmd->timer);
782 ipr_cmd->done(ipr_cmd);
783 }
784
785 LEAVE;
786}
787
a32c055f
WB
788/**
789 * ipr_send_command - Send driver initiated requests.
790 * @ipr_cmd: ipr command struct
791 *
792 * This function sends a command to the adapter using the correct write call.
793 * In the case of sis64, calculate the ioarcb size required. Then or in the
794 * appropriate bits.
795 *
796 * Return value:
797 * none
798 **/
799static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
800{
801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
803
804 if (ioa_cfg->sis64) {
805 /* The default size is 256 bytes */
806 send_dma_addr |= 0x1;
807
808 /* If the number of ioadls * size of ioadl > 128 bytes,
809 then use a 512 byte ioarcb */
810 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811 send_dma_addr |= 0x4;
812 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813 } else
814 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815}
816
1da177e4
LT
817/**
818 * ipr_do_req - Send driver initiated requests.
819 * @ipr_cmd: ipr command struct
820 * @done: done function
821 * @timeout_func: timeout function
822 * @timeout: timeout value
823 *
824 * This function sends the specified command to the adapter with the
825 * timeout given. The done function is invoked on command completion.
826 *
827 * Return value:
828 * none
829 **/
830static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831 void (*done) (struct ipr_cmnd *),
832 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
833{
834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
835
836 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
837
838 ipr_cmd->done = done;
839
840 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841 ipr_cmd->timer.expires = jiffies + timeout;
842 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
843
844 add_timer(&ipr_cmd->timer);
845
846 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
847
848 mb();
a32c055f
WB
849
850 ipr_send_command(ipr_cmd);
1da177e4
LT
851}
852
853/**
854 * ipr_internal_cmd_done - Op done function for an internally generated op.
855 * @ipr_cmd: ipr command struct
856 *
857 * This function is the op done function for an internally generated,
858 * blocking op. It simply wakes the sleeping thread.
859 *
860 * Return value:
861 * none
862 **/
863static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
864{
865 if (ipr_cmd->sibling)
866 ipr_cmd->sibling = NULL;
867 else
868 complete(&ipr_cmd->completion);
869}
870
a32c055f
WB
871/**
872 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873 * @ipr_cmd: ipr command struct
874 * @dma_addr: dma address
875 * @len: transfer length
876 * @flags: ioadl flag value
877 *
878 * This function initializes an ioadl in the case where there is only a single
879 * descriptor.
880 *
881 * Return value:
882 * nothing
883 **/
884static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885 u32 len, int flags)
886{
887 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
889
890 ipr_cmd->dma_use_sg = 1;
891
892 if (ipr_cmd->ioa_cfg->sis64) {
893 ioadl64->flags = cpu_to_be32(flags);
894 ioadl64->data_len = cpu_to_be32(len);
895 ioadl64->address = cpu_to_be64(dma_addr);
896
897 ipr_cmd->ioarcb.ioadl_len =
898 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900 } else {
901 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902 ioadl->address = cpu_to_be32(dma_addr);
903
904 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905 ipr_cmd->ioarcb.read_ioadl_len =
906 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908 } else {
909 ipr_cmd->ioarcb.ioadl_len =
910 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
912 }
913 }
914}
915
1da177e4
LT
916/**
917 * ipr_send_blocking_cmd - Send command and sleep on its completion.
918 * @ipr_cmd: ipr command struct
919 * @timeout_func: function to invoke if command times out
920 * @timeout: timeout
921 *
922 * Return value:
923 * none
924 **/
925static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927 u32 timeout)
928{
929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
930
931 init_completion(&ipr_cmd->completion);
932 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
933
934 spin_unlock_irq(ioa_cfg->host->host_lock);
935 wait_for_completion(&ipr_cmd->completion);
936 spin_lock_irq(ioa_cfg->host->host_lock);
937}
938
939/**
940 * ipr_send_hcam - Send an HCAM to the adapter.
941 * @ioa_cfg: ioa config struct
942 * @type: HCAM type
943 * @hostrcb: hostrcb struct
944 *
945 * This function will send a Host Controlled Async command to the adapter.
946 * If HCAMs are currently not allowed to be issued to the adapter, it will
947 * place the hostrcb on the free queue.
948 *
949 * Return value:
950 * none
951 **/
952static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953 struct ipr_hostrcb *hostrcb)
954{
955 struct ipr_cmnd *ipr_cmd;
956 struct ipr_ioarcb *ioarcb;
957
958 if (ioa_cfg->allow_cmds) {
959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
962
963 ipr_cmd->u.hostrcb = hostrcb;
964 ioarcb = &ipr_cmd->ioarcb;
965
966 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969 ioarcb->cmd_pkt.cdb[1] = type;
970 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
972
a32c055f
WB
973 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
975
976 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977 ipr_cmd->done = ipr_process_ccn;
978 else
979 ipr_cmd->done = ipr_process_error;
980
981 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
982
983 mb();
a32c055f
WB
984
985 ipr_send_command(ipr_cmd);
1da177e4
LT
986 } else {
987 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
988 }
989}
990
3e7ebdfa
WB
991/**
992 * ipr_update_ata_class - Update the ata class in the resource entry
993 * @res: resource entry struct
994 * @proto: cfgte device bus protocol value
995 *
996 * Return value:
997 * none
998 **/
999static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1000{
1001 switch(proto) {
1002 case IPR_PROTO_SATA:
1003 case IPR_PROTO_SAS_STP:
1004 res->ata_class = ATA_DEV_ATA;
1005 break;
1006 case IPR_PROTO_SATA_ATAPI:
1007 case IPR_PROTO_SAS_STP_ATAPI:
1008 res->ata_class = ATA_DEV_ATAPI;
1009 break;
1010 default:
1011 res->ata_class = ATA_DEV_UNKNOWN;
1012 break;
1013 };
1014}
1015
1da177e4
LT
1016/**
1017 * ipr_init_res_entry - Initialize a resource entry struct.
1018 * @res: resource entry struct
3e7ebdfa 1019 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1020 *
1021 * Return value:
1022 * none
1023 **/
3e7ebdfa
WB
1024static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1026{
3e7ebdfa
WB
1027 int found = 0;
1028 unsigned int proto;
1029 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030 struct ipr_resource_entry *gscsi_res = NULL;
1031
ee0a90fa 1032 res->needs_sync_complete = 0;
1da177e4
LT
1033 res->in_erp = 0;
1034 res->add_to_ml = 0;
1035 res->del_from_ml = 0;
1036 res->resetting_device = 0;
1037 res->sdev = NULL;
35a39691 1038 res->sata_port = NULL;
3e7ebdfa
WB
1039
1040 if (ioa_cfg->sis64) {
1041 proto = cfgtew->u.cfgte64->proto;
1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1044 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1045
1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047 sizeof(res->res_path));
1048
1049 res->bus = 0;
1050 res->lun = scsilun_to_int(&res->dev_lun);
1051
1052 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055 found = 1;
1056 res->target = gscsi_res->target;
1057 break;
1058 }
1059 }
1060 if (!found) {
1061 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062 ioa_cfg->max_devs_supported);
1063 set_bit(res->target, ioa_cfg->target_ids);
1064 }
1065
1066 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067 sizeof(res->dev_lun.scsi_lun));
1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 res->target = 0;
1071 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 ioa_cfg->max_devs_supported);
1075 set_bit(res->target, ioa_cfg->array_ids);
1076 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 res->bus = IPR_VSET_VIRTUAL_BUS;
1078 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->vset_ids);
1081 } else {
1082 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 ioa_cfg->max_devs_supported);
1084 set_bit(res->target, ioa_cfg->target_ids);
1085 }
1086 } else {
1087 proto = cfgtew->u.cfgte->proto;
1088 res->qmodel = IPR_QUEUEING_MODEL(res);
1089 res->flags = cfgtew->u.cfgte->flags;
1090 if (res->flags & IPR_IS_IOA_RESOURCE)
1091 res->type = IPR_RES_TYPE_IOAFP;
1092 else
1093 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 res->target = cfgtew->u.cfgte->res_addr.target;
1097 res->lun = cfgtew->u.cfgte->res_addr.lun;
1098 }
1099
1100 ipr_update_ata_class(res, proto);
1101}
1102
1103/**
1104 * ipr_is_same_device - Determine if two devices are the same.
1105 * @res: resource entry struct
1106 * @cfgtew: config table entry wrapper struct
1107 *
1108 * Return value:
1109 * 1 if the devices are the same / 0 otherwise
1110 **/
1111static int ipr_is_same_device(struct ipr_resource_entry *res,
1112 struct ipr_config_table_entry_wrapper *cfgtew)
1113{
1114 if (res->ioa_cfg->sis64) {
1115 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118 sizeof(cfgtew->u.cfgte64->lun))) {
1119 return 1;
1120 }
1121 } else {
1122 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123 res->target == cfgtew->u.cfgte->res_addr.target &&
1124 res->lun == cfgtew->u.cfgte->res_addr.lun)
1125 return 1;
1126 }
1127
1128 return 0;
1129}
1130
1131/**
1132 * ipr_format_resource_path - Format the resource path for printing.
1133 * @res_path: resource path
1134 * @buf: buffer
1135 *
1136 * Return value:
1137 * pointer to buffer
1138 **/
1139static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1140{
1141 int i;
1142
1143 sprintf(buffer, "%02X", res_path[0]);
1144 for (i=1; res_path[i] != 0xff; i++)
4565e370 1145 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
3e7ebdfa
WB
1146
1147 return buffer;
1148}
1149
1150/**
1151 * ipr_update_res_entry - Update the resource entry.
1152 * @res: resource entry struct
1153 * @cfgtew: config table entry wrapper struct
1154 *
1155 * Return value:
1156 * none
1157 **/
1158static void ipr_update_res_entry(struct ipr_resource_entry *res,
1159 struct ipr_config_table_entry_wrapper *cfgtew)
1160{
1161 char buffer[IPR_MAX_RES_PATH_LENGTH];
1162 unsigned int proto;
1163 int new_path = 0;
1164
1165 if (res->ioa_cfg->sis64) {
1166 res->flags = cfgtew->u.cfgte64->flags;
1167 res->res_flags = cfgtew->u.cfgte64->res_flags;
1168 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1169
1170 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1171 sizeof(struct ipr_std_inq_data));
1172
1173 res->qmodel = IPR_QUEUEING_MODEL64(res);
1174 proto = cfgtew->u.cfgte64->proto;
1175 res->res_handle = cfgtew->u.cfgte64->res_handle;
1176 res->dev_id = cfgtew->u.cfgte64->dev_id;
1177
1178 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1179 sizeof(res->dev_lun.scsi_lun));
1180
1181 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1182 sizeof(res->res_path))) {
1183 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1184 sizeof(res->res_path));
1185 new_path = 1;
1186 }
1187
1188 if (res->sdev && new_path)
1189 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1190 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1191 } else {
1192 res->flags = cfgtew->u.cfgte->flags;
1193 if (res->flags & IPR_IS_IOA_RESOURCE)
1194 res->type = IPR_RES_TYPE_IOAFP;
1195 else
1196 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1197
1198 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1199 sizeof(struct ipr_std_inq_data));
1200
1201 res->qmodel = IPR_QUEUEING_MODEL(res);
1202 proto = cfgtew->u.cfgte->proto;
1203 res->res_handle = cfgtew->u.cfgte->res_handle;
1204 }
1205
1206 ipr_update_ata_class(res, proto);
1207}
1208
1209/**
1210 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1211 * for the resource.
1212 * @res: resource entry struct
1213 * @cfgtew: config table entry wrapper struct
1214 *
1215 * Return value:
1216 * none
1217 **/
1218static void ipr_clear_res_target(struct ipr_resource_entry *res)
1219{
1220 struct ipr_resource_entry *gscsi_res = NULL;
1221 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1222
1223 if (!ioa_cfg->sis64)
1224 return;
1225
1226 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1227 clear_bit(res->target, ioa_cfg->array_ids);
1228 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1229 clear_bit(res->target, ioa_cfg->vset_ids);
1230 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1231 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1232 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1233 return;
1234 clear_bit(res->target, ioa_cfg->target_ids);
1235
1236 } else if (res->bus == 0)
1237 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1238}
1239
1240/**
1241 * ipr_handle_config_change - Handle a config change from the adapter
1242 * @ioa_cfg: ioa config struct
1243 * @hostrcb: hostrcb
1244 *
1245 * Return value:
1246 * none
1247 **/
1248static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1249 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1250{
1251 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1252 struct ipr_config_table_entry_wrapper cfgtew;
1253 __be32 cc_res_handle;
1254
1da177e4
LT
1255 u32 is_ndn = 1;
1256
3e7ebdfa
WB
1257 if (ioa_cfg->sis64) {
1258 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1259 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1260 } else {
1261 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1262 cc_res_handle = cfgtew.u.cfgte->res_handle;
1263 }
1da177e4
LT
1264
1265 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1266 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1267 is_ndn = 0;
1268 break;
1269 }
1270 }
1271
1272 if (is_ndn) {
1273 if (list_empty(&ioa_cfg->free_res_q)) {
1274 ipr_send_hcam(ioa_cfg,
1275 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1276 hostrcb);
1277 return;
1278 }
1279
1280 res = list_entry(ioa_cfg->free_res_q.next,
1281 struct ipr_resource_entry, queue);
1282
1283 list_del(&res->queue);
3e7ebdfa 1284 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1285 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1286 }
1287
3e7ebdfa 1288 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1289
1290 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1291 if (res->sdev) {
1da177e4 1292 res->del_from_ml = 1;
3e7ebdfa 1293 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1294 if (ioa_cfg->allow_ml_add_del)
1295 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1296 } else {
1297 ipr_clear_res_target(res);
1da177e4 1298 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1299 }
1da177e4
LT
1300 } else if (!res->sdev) {
1301 res->add_to_ml = 1;
1302 if (ioa_cfg->allow_ml_add_del)
1303 schedule_work(&ioa_cfg->work_q);
1304 }
1305
1306 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1307}
1308
1309/**
1310 * ipr_process_ccn - Op done function for a CCN.
1311 * @ipr_cmd: ipr command struct
1312 *
1313 * This function is the op done function for a configuration
1314 * change notification host controlled async from the adapter.
1315 *
1316 * Return value:
1317 * none
1318 **/
1319static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1320{
1321 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1322 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1323 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1324
1325 list_del(&hostrcb->queue);
1326 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1327
1328 if (ioasc) {
1329 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1330 dev_err(&ioa_cfg->pdev->dev,
1331 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1332
1333 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1334 } else {
1335 ipr_handle_config_change(ioa_cfg, hostrcb);
1336 }
1337}
1338
8cf093e2
BK
1339/**
1340 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1341 * @i: index into buffer
1342 * @buf: string to modify
1343 *
1344 * This function will strip all trailing whitespace, pad the end
1345 * of the string with a single space, and NULL terminate the string.
1346 *
1347 * Return value:
1348 * new length of string
1349 **/
1350static int strip_and_pad_whitespace(int i, char *buf)
1351{
1352 while (i && buf[i] == ' ')
1353 i--;
1354 buf[i+1] = ' ';
1355 buf[i+2] = '\0';
1356 return i + 2;
1357}
1358
1359/**
1360 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1361 * @prefix: string to print at start of printk
1362 * @hostrcb: hostrcb pointer
1363 * @vpd: vendor/product id/sn struct
1364 *
1365 * Return value:
1366 * none
1367 **/
1368static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1369 struct ipr_vpd *vpd)
1370{
1371 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1372 int i = 0;
1373
1374 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1375 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1376
1377 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1378 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1379
1380 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1381 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1382
1383 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1384}
1385
1da177e4
LT
1386/**
1387 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1388 * @vpd: vendor/product id/sn struct
1da177e4
LT
1389 *
1390 * Return value:
1391 * none
1392 **/
cfc32139 1393static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1394{
1395 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1396 + IPR_SERIAL_NUM_LEN];
1397
cfc32139
BK
1398 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1399 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1400 IPR_PROD_ID_LEN);
1401 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1402 ipr_err("Vendor/Product ID: %s\n", buffer);
1403
cfc32139 1404 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1405 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1406 ipr_err(" Serial Number: %s\n", buffer);
1407}
1408
8cf093e2
BK
1409/**
1410 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1411 * @prefix: string to print at start of printk
1412 * @hostrcb: hostrcb pointer
1413 * @vpd: vendor/product id/sn/wwn struct
1414 *
1415 * Return value:
1416 * none
1417 **/
1418static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1419 struct ipr_ext_vpd *vpd)
1420{
1421 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1422 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1423 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1424}
1425
ee0f05b8
BK
1426/**
1427 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1428 * @vpd: vendor/product id/sn/wwn struct
1429 *
1430 * Return value:
1431 * none
1432 **/
1433static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1434{
1435 ipr_log_vpd(&vpd->vpd);
1436 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1437 be32_to_cpu(vpd->wwid[1]));
1438}
1439
1440/**
1441 * ipr_log_enhanced_cache_error - Log a cache error.
1442 * @ioa_cfg: ioa config struct
1443 * @hostrcb: hostrcb struct
1444 *
1445 * Return value:
1446 * none
1447 **/
1448static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1449 struct ipr_hostrcb *hostrcb)
1450{
4565e370
WB
1451 struct ipr_hostrcb_type_12_error *error;
1452
1453 if (ioa_cfg->sis64)
1454 error = &hostrcb->hcam.u.error64.u.type_12_error;
1455 else
1456 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1457
1458 ipr_err("-----Current Configuration-----\n");
1459 ipr_err("Cache Directory Card Information:\n");
1460 ipr_log_ext_vpd(&error->ioa_vpd);
1461 ipr_err("Adapter Card Information:\n");
1462 ipr_log_ext_vpd(&error->cfc_vpd);
1463
1464 ipr_err("-----Expected Configuration-----\n");
1465 ipr_err("Cache Directory Card Information:\n");
1466 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1467 ipr_err("Adapter Card Information:\n");
1468 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1469
1470 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1471 be32_to_cpu(error->ioa_data[0]),
1472 be32_to_cpu(error->ioa_data[1]),
1473 be32_to_cpu(error->ioa_data[2]));
1474}
1475
1da177e4
LT
1476/**
1477 * ipr_log_cache_error - Log a cache error.
1478 * @ioa_cfg: ioa config struct
1479 * @hostrcb: hostrcb struct
1480 *
1481 * Return value:
1482 * none
1483 **/
1484static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1485 struct ipr_hostrcb *hostrcb)
1486{
1487 struct ipr_hostrcb_type_02_error *error =
1488 &hostrcb->hcam.u.error.u.type_02_error;
1489
1490 ipr_err("-----Current Configuration-----\n");
1491 ipr_err("Cache Directory Card Information:\n");
cfc32139 1492 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1493 ipr_err("Adapter Card Information:\n");
cfc32139 1494 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1495
1496 ipr_err("-----Expected Configuration-----\n");
1497 ipr_err("Cache Directory Card Information:\n");
cfc32139 1498 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1499 ipr_err("Adapter Card Information:\n");
cfc32139 1500 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1501
1502 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1503 be32_to_cpu(error->ioa_data[0]),
1504 be32_to_cpu(error->ioa_data[1]),
1505 be32_to_cpu(error->ioa_data[2]));
1506}
1507
ee0f05b8
BK
1508/**
1509 * ipr_log_enhanced_config_error - Log a configuration error.
1510 * @ioa_cfg: ioa config struct
1511 * @hostrcb: hostrcb struct
1512 *
1513 * Return value:
1514 * none
1515 **/
1516static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1517 struct ipr_hostrcb *hostrcb)
1518{
1519 int errors_logged, i;
1520 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1521 struct ipr_hostrcb_type_13_error *error;
1522
1523 error = &hostrcb->hcam.u.error.u.type_13_error;
1524 errors_logged = be32_to_cpu(error->errors_logged);
1525
1526 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1527 be32_to_cpu(error->errors_detected), errors_logged);
1528
1529 dev_entry = error->dev;
1530
1531 for (i = 0; i < errors_logged; i++, dev_entry++) {
1532 ipr_err_separator;
1533
1534 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1535 ipr_log_ext_vpd(&dev_entry->vpd);
1536
1537 ipr_err("-----New Device Information-----\n");
1538 ipr_log_ext_vpd(&dev_entry->new_vpd);
1539
1540 ipr_err("Cache Directory Card Information:\n");
1541 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1542
1543 ipr_err("Adapter Card Information:\n");
1544 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1545 }
1546}
1547
4565e370
WB
1548/**
1549 * ipr_log_sis64_config_error - Log a device error.
1550 * @ioa_cfg: ioa config struct
1551 * @hostrcb: hostrcb struct
1552 *
1553 * Return value:
1554 * none
1555 **/
1556static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1557 struct ipr_hostrcb *hostrcb)
1558{
1559 int errors_logged, i;
1560 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1561 struct ipr_hostrcb_type_23_error *error;
1562 char buffer[IPR_MAX_RES_PATH_LENGTH];
1563
1564 error = &hostrcb->hcam.u.error64.u.type_23_error;
1565 errors_logged = be32_to_cpu(error->errors_logged);
1566
1567 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1568 be32_to_cpu(error->errors_detected), errors_logged);
1569
1570 dev_entry = error->dev;
1571
1572 for (i = 0; i < errors_logged; i++, dev_entry++) {
1573 ipr_err_separator;
1574
1575 ipr_err("Device %d : %s", i + 1,
1576 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1577 ipr_log_ext_vpd(&dev_entry->vpd);
1578
1579 ipr_err("-----New Device Information-----\n");
1580 ipr_log_ext_vpd(&dev_entry->new_vpd);
1581
1582 ipr_err("Cache Directory Card Information:\n");
1583 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1584
1585 ipr_err("Adapter Card Information:\n");
1586 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1587 }
1588}
1589
1da177e4
LT
1590/**
1591 * ipr_log_config_error - Log a configuration error.
1592 * @ioa_cfg: ioa config struct
1593 * @hostrcb: hostrcb struct
1594 *
1595 * Return value:
1596 * none
1597 **/
1598static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1599 struct ipr_hostrcb *hostrcb)
1600{
1601 int errors_logged, i;
1602 struct ipr_hostrcb_device_data_entry *dev_entry;
1603 struct ipr_hostrcb_type_03_error *error;
1604
1605 error = &hostrcb->hcam.u.error.u.type_03_error;
1606 errors_logged = be32_to_cpu(error->errors_logged);
1607
1608 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1609 be32_to_cpu(error->errors_detected), errors_logged);
1610
cfc32139 1611 dev_entry = error->dev;
1da177e4
LT
1612
1613 for (i = 0; i < errors_logged; i++, dev_entry++) {
1614 ipr_err_separator;
1615
fa15b1f6 1616 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1617 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1618
1619 ipr_err("-----New Device Information-----\n");
cfc32139 1620 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1621
1622 ipr_err("Cache Directory Card Information:\n");
cfc32139 1623 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1624
1625 ipr_err("Adapter Card Information:\n");
cfc32139 1626 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1627
1628 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1629 be32_to_cpu(dev_entry->ioa_data[0]),
1630 be32_to_cpu(dev_entry->ioa_data[1]),
1631 be32_to_cpu(dev_entry->ioa_data[2]),
1632 be32_to_cpu(dev_entry->ioa_data[3]),
1633 be32_to_cpu(dev_entry->ioa_data[4]));
1634 }
1635}
1636
ee0f05b8
BK
1637/**
1638 * ipr_log_enhanced_array_error - Log an array configuration error.
1639 * @ioa_cfg: ioa config struct
1640 * @hostrcb: hostrcb struct
1641 *
1642 * Return value:
1643 * none
1644 **/
1645static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1646 struct ipr_hostrcb *hostrcb)
1647{
1648 int i, num_entries;
1649 struct ipr_hostrcb_type_14_error *error;
1650 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1651 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1652
1653 error = &hostrcb->hcam.u.error.u.type_14_error;
1654
1655 ipr_err_separator;
1656
1657 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1658 error->protection_level,
1659 ioa_cfg->host->host_no,
1660 error->last_func_vset_res_addr.bus,
1661 error->last_func_vset_res_addr.target,
1662 error->last_func_vset_res_addr.lun);
1663
1664 ipr_err_separator;
1665
1666 array_entry = error->array_member;
1667 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1668 sizeof(error->array_member));
1669
1670 for (i = 0; i < num_entries; i++, array_entry++) {
1671 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1672 continue;
1673
1674 if (be32_to_cpu(error->exposed_mode_adn) == i)
1675 ipr_err("Exposed Array Member %d:\n", i);
1676 else
1677 ipr_err("Array Member %d:\n", i);
1678
1679 ipr_log_ext_vpd(&array_entry->vpd);
1680 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1681 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1682 "Expected Location");
1683
1684 ipr_err_separator;
1685 }
1686}
1687
1da177e4
LT
1688/**
1689 * ipr_log_array_error - Log an array configuration error.
1690 * @ioa_cfg: ioa config struct
1691 * @hostrcb: hostrcb struct
1692 *
1693 * Return value:
1694 * none
1695 **/
1696static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1697 struct ipr_hostrcb *hostrcb)
1698{
1699 int i;
1700 struct ipr_hostrcb_type_04_error *error;
1701 struct ipr_hostrcb_array_data_entry *array_entry;
1702 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1703
1704 error = &hostrcb->hcam.u.error.u.type_04_error;
1705
1706 ipr_err_separator;
1707
1708 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1709 error->protection_level,
1710 ioa_cfg->host->host_no,
1711 error->last_func_vset_res_addr.bus,
1712 error->last_func_vset_res_addr.target,
1713 error->last_func_vset_res_addr.lun);
1714
1715 ipr_err_separator;
1716
1717 array_entry = error->array_member;
1718
1719 for (i = 0; i < 18; i++) {
cfc32139 1720 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1721 continue;
1722
fa15b1f6 1723 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1724 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1725 else
1da177e4 1726 ipr_err("Array Member %d:\n", i);
1da177e4 1727
cfc32139 1728 ipr_log_vpd(&array_entry->vpd);
1da177e4 1729
fa15b1f6
BK
1730 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1731 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1732 "Expected Location");
1da177e4
LT
1733
1734 ipr_err_separator;
1735
1736 if (i == 9)
1737 array_entry = error->array_member2;
1738 else
1739 array_entry++;
1740 }
1741}
1742
1743/**
b0df54bb 1744 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1745 * @ioa_cfg: ioa config struct
b0df54bb
BK
1746 * @data: IOA error data
1747 * @len: data length
1da177e4
LT
1748 *
1749 * Return value:
1750 * none
1751 **/
ac719aba 1752static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1753{
1754 int i;
1da177e4 1755
b0df54bb 1756 if (len == 0)
1da177e4
LT
1757 return;
1758
ac719aba
BK
1759 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1760 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1761
b0df54bb 1762 for (i = 0; i < len / 4; i += 4) {
1da177e4 1763 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1764 be32_to_cpu(data[i]),
1765 be32_to_cpu(data[i+1]),
1766 be32_to_cpu(data[i+2]),
1767 be32_to_cpu(data[i+3]));
1da177e4
LT
1768 }
1769}
1770
ee0f05b8
BK
1771/**
1772 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1773 * @ioa_cfg: ioa config struct
1774 * @hostrcb: hostrcb struct
1775 *
1776 * Return value:
1777 * none
1778 **/
1779static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1780 struct ipr_hostrcb *hostrcb)
1781{
1782 struct ipr_hostrcb_type_17_error *error;
1783
4565e370
WB
1784 if (ioa_cfg->sis64)
1785 error = &hostrcb->hcam.u.error64.u.type_17_error;
1786 else
1787 error = &hostrcb->hcam.u.error.u.type_17_error;
1788
ee0f05b8 1789 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1790 strim(error->failure_reason);
ee0f05b8 1791
8cf093e2
BK
1792 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1793 be32_to_cpu(hostrcb->hcam.u.error.prc));
1794 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1795 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1796 be32_to_cpu(hostrcb->hcam.length) -
1797 (offsetof(struct ipr_hostrcb_error, u) +
1798 offsetof(struct ipr_hostrcb_type_17_error, data)));
1799}
1800
b0df54bb
BK
1801/**
1802 * ipr_log_dual_ioa_error - Log a dual adapter error.
1803 * @ioa_cfg: ioa config struct
1804 * @hostrcb: hostrcb struct
1805 *
1806 * Return value:
1807 * none
1808 **/
1809static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1810 struct ipr_hostrcb *hostrcb)
1811{
1812 struct ipr_hostrcb_type_07_error *error;
1813
1814 error = &hostrcb->hcam.u.error.u.type_07_error;
1815 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1816 strim(error->failure_reason);
b0df54bb 1817
8cf093e2
BK
1818 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1819 be32_to_cpu(hostrcb->hcam.u.error.prc));
1820 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1821 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1822 be32_to_cpu(hostrcb->hcam.length) -
1823 (offsetof(struct ipr_hostrcb_error, u) +
1824 offsetof(struct ipr_hostrcb_type_07_error, data)));
1825}
1826
49dc6a18
BK
1827static const struct {
1828 u8 active;
1829 char *desc;
1830} path_active_desc[] = {
1831 { IPR_PATH_NO_INFO, "Path" },
1832 { IPR_PATH_ACTIVE, "Active path" },
1833 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1834};
1835
1836static const struct {
1837 u8 state;
1838 char *desc;
1839} path_state_desc[] = {
1840 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1841 { IPR_PATH_HEALTHY, "is healthy" },
1842 { IPR_PATH_DEGRADED, "is degraded" },
1843 { IPR_PATH_FAILED, "is failed" }
1844};
1845
1846/**
1847 * ipr_log_fabric_path - Log a fabric path error
1848 * @hostrcb: hostrcb struct
1849 * @fabric: fabric descriptor
1850 *
1851 * Return value:
1852 * none
1853 **/
1854static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1855 struct ipr_hostrcb_fabric_desc *fabric)
1856{
1857 int i, j;
1858 u8 path_state = fabric->path_state;
1859 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1860 u8 state = path_state & IPR_PATH_STATE_MASK;
1861
1862 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1863 if (path_active_desc[i].active != active)
1864 continue;
1865
1866 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1867 if (path_state_desc[j].state != state)
1868 continue;
1869
1870 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1871 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1872 path_active_desc[i].desc, path_state_desc[j].desc,
1873 fabric->ioa_port);
1874 } else if (fabric->cascaded_expander == 0xff) {
1875 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1876 path_active_desc[i].desc, path_state_desc[j].desc,
1877 fabric->ioa_port, fabric->phy);
1878 } else if (fabric->phy == 0xff) {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port, fabric->cascaded_expander);
1882 } else {
1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1884 path_active_desc[i].desc, path_state_desc[j].desc,
1885 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1886 }
1887 return;
1888 }
1889 }
1890
1891 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1892 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1893}
1894
4565e370
WB
1895/**
1896 * ipr_log64_fabric_path - Log a fabric path error
1897 * @hostrcb: hostrcb struct
1898 * @fabric: fabric descriptor
1899 *
1900 * Return value:
1901 * none
1902 **/
1903static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1904 struct ipr_hostrcb64_fabric_desc *fabric)
1905{
1906 int i, j;
1907 u8 path_state = fabric->path_state;
1908 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1909 u8 state = path_state & IPR_PATH_STATE_MASK;
1910 char buffer[IPR_MAX_RES_PATH_LENGTH];
1911
1912 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1913 if (path_active_desc[i].active != active)
1914 continue;
1915
1916 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1917 if (path_state_desc[j].state != state)
1918 continue;
1919
1920 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1921 path_active_desc[i].desc, path_state_desc[j].desc,
1922 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1923 return;
1924 }
1925 }
1926
1927 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1928 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1929}
1930
49dc6a18
BK
1931static const struct {
1932 u8 type;
1933 char *desc;
1934} path_type_desc[] = {
1935 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1936 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1937 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1938 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1939};
1940
1941static const struct {
1942 u8 status;
1943 char *desc;
1944} path_status_desc[] = {
1945 { IPR_PATH_CFG_NO_PROB, "Functional" },
1946 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1947 { IPR_PATH_CFG_FAILED, "Failed" },
1948 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1949 { IPR_PATH_NOT_DETECTED, "Missing" },
1950 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1951};
1952
1953static const char *link_rate[] = {
1954 "unknown",
1955 "disabled",
1956 "phy reset problem",
1957 "spinup hold",
1958 "port selector",
1959 "unknown",
1960 "unknown",
1961 "unknown",
1962 "1.5Gbps",
1963 "3.0Gbps",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "unknown",
1968 "unknown",
1969 "unknown"
1970};
1971
1972/**
1973 * ipr_log_path_elem - Log a fabric path element.
1974 * @hostrcb: hostrcb struct
1975 * @cfg: fabric path element struct
1976 *
1977 * Return value:
1978 * none
1979 **/
1980static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1981 struct ipr_hostrcb_config_element *cfg)
1982{
1983 int i, j;
1984 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1985 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1986
1987 if (type == IPR_PATH_CFG_NOT_EXIST)
1988 return;
1989
1990 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1991 if (path_type_desc[i].type != type)
1992 continue;
1993
1994 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1995 if (path_status_desc[j].status != status)
1996 continue;
1997
1998 if (type == IPR_PATH_CFG_IOA_PORT) {
1999 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2000 path_status_desc[j].desc, path_type_desc[i].desc,
2001 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2002 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2003 } else {
2004 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2005 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2006 path_status_desc[j].desc, path_type_desc[i].desc,
2007 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 } else if (cfg->cascaded_expander == 0xff) {
2010 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2011 "WWN=%08X%08X\n", path_status_desc[j].desc,
2012 path_type_desc[i].desc, cfg->phy,
2013 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015 } else if (cfg->phy == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2017 "WWN=%08X%08X\n", path_status_desc[j].desc,
2018 path_type_desc[i].desc, cfg->cascaded_expander,
2019 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021 } else {
2022 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2023 "WWN=%08X%08X\n", path_status_desc[j].desc,
2024 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2025 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 }
2028 }
2029 return;
2030 }
2031 }
2032
2033 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2034 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2035 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2036 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2037}
2038
4565e370
WB
2039/**
2040 * ipr_log64_path_elem - Log a fabric path element.
2041 * @hostrcb: hostrcb struct
2042 * @cfg: fabric path element struct
2043 *
2044 * Return value:
2045 * none
2046 **/
2047static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2048 struct ipr_hostrcb64_config_element *cfg)
2049{
2050 int i, j;
2051 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2052 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2053 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2054 char buffer[IPR_MAX_RES_PATH_LENGTH];
2055
2056 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2057 return;
2058
2059 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2060 if (path_type_desc[i].type != type)
2061 continue;
2062
2063 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2064 if (path_status_desc[j].status != status)
2065 continue;
2066
2067 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2068 path_status_desc[j].desc, path_type_desc[i].desc,
2069 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2070 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2071 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2072 return;
2073 }
2074 }
2075 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2076 "WWN=%08X%08X\n", cfg->type_status,
2077 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2078 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2079 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2080}
2081
49dc6a18
BK
2082/**
2083 * ipr_log_fabric_error - Log a fabric error.
2084 * @ioa_cfg: ioa config struct
2085 * @hostrcb: hostrcb struct
2086 *
2087 * Return value:
2088 * none
2089 **/
2090static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2091 struct ipr_hostrcb *hostrcb)
2092{
2093 struct ipr_hostrcb_type_20_error *error;
2094 struct ipr_hostrcb_fabric_desc *fabric;
2095 struct ipr_hostrcb_config_element *cfg;
2096 int i, add_len;
2097
2098 error = &hostrcb->hcam.u.error.u.type_20_error;
2099 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2100 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2101
2102 add_len = be32_to_cpu(hostrcb->hcam.length) -
2103 (offsetof(struct ipr_hostrcb_error, u) +
2104 offsetof(struct ipr_hostrcb_type_20_error, desc));
2105
2106 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2107 ipr_log_fabric_path(hostrcb, fabric);
2108 for_each_fabric_cfg(fabric, cfg)
2109 ipr_log_path_elem(hostrcb, cfg);
2110
2111 add_len -= be16_to_cpu(fabric->length);
2112 fabric = (struct ipr_hostrcb_fabric_desc *)
2113 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2114 }
2115
ac719aba 2116 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2117}
2118
4565e370
WB
2119/**
2120 * ipr_log_sis64_array_error - Log a sis64 array error.
2121 * @ioa_cfg: ioa config struct
2122 * @hostrcb: hostrcb struct
2123 *
2124 * Return value:
2125 * none
2126 **/
2127static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2128 struct ipr_hostrcb *hostrcb)
2129{
2130 int i, num_entries;
2131 struct ipr_hostrcb_type_24_error *error;
2132 struct ipr_hostrcb64_array_data_entry *array_entry;
2133 char buffer[IPR_MAX_RES_PATH_LENGTH];
2134 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2135
2136 error = &hostrcb->hcam.u.error64.u.type_24_error;
2137
2138 ipr_err_separator;
2139
2140 ipr_err("RAID %s Array Configuration: %s\n",
2141 error->protection_level,
2142 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2143
2144 ipr_err_separator;
2145
2146 array_entry = error->array_member;
2147 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2148 sizeof(error->array_member));
2149
2150 for (i = 0; i < num_entries; i++, array_entry++) {
2151
2152 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2153 continue;
2154
2155 if (error->exposed_mode_adn == i)
2156 ipr_err("Exposed Array Member %d:\n", i);
2157 else
2158 ipr_err("Array Member %d:\n", i);
2159
2160 ipr_err("Array Member %d:\n", i);
2161 ipr_log_ext_vpd(&array_entry->vpd);
2162 ipr_err("Current Location: %s",
2163 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2164 ipr_err("Expected Location: %s",
2165 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2166
2167 ipr_err_separator;
2168 }
2169}
2170
2171/**
2172 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2173 * @ioa_cfg: ioa config struct
2174 * @hostrcb: hostrcb struct
2175 *
2176 * Return value:
2177 * none
2178 **/
2179static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2180 struct ipr_hostrcb *hostrcb)
2181{
2182 struct ipr_hostrcb_type_30_error *error;
2183 struct ipr_hostrcb64_fabric_desc *fabric;
2184 struct ipr_hostrcb64_config_element *cfg;
2185 int i, add_len;
2186
2187 error = &hostrcb->hcam.u.error64.u.type_30_error;
2188
2189 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2190 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2191
2192 add_len = be32_to_cpu(hostrcb->hcam.length) -
2193 (offsetof(struct ipr_hostrcb64_error, u) +
2194 offsetof(struct ipr_hostrcb_type_30_error, desc));
2195
2196 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2197 ipr_log64_fabric_path(hostrcb, fabric);
2198 for_each_fabric_cfg(fabric, cfg)
2199 ipr_log64_path_elem(hostrcb, cfg);
2200
2201 add_len -= be16_to_cpu(fabric->length);
2202 fabric = (struct ipr_hostrcb64_fabric_desc *)
2203 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2204 }
2205
2206 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2207}
2208
b0df54bb
BK
2209/**
2210 * ipr_log_generic_error - Log an adapter error.
2211 * @ioa_cfg: ioa config struct
2212 * @hostrcb: hostrcb struct
2213 *
2214 * Return value:
2215 * none
2216 **/
2217static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2218 struct ipr_hostrcb *hostrcb)
2219{
ac719aba 2220 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2221 be32_to_cpu(hostrcb->hcam.length));
2222}
2223
1da177e4
LT
2224/**
2225 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2226 * @ioasc: IOASC
2227 *
2228 * This function will return the index of into the ipr_error_table
2229 * for the specified IOASC. If the IOASC is not in the table,
2230 * 0 will be returned, which points to the entry used for unknown errors.
2231 *
2232 * Return value:
2233 * index into the ipr_error_table
2234 **/
2235static u32 ipr_get_error(u32 ioasc)
2236{
2237 int i;
2238
2239 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2240 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2241 return i;
2242
2243 return 0;
2244}
2245
2246/**
2247 * ipr_handle_log_data - Log an adapter error.
2248 * @ioa_cfg: ioa config struct
2249 * @hostrcb: hostrcb struct
2250 *
2251 * This function logs an adapter error to the system.
2252 *
2253 * Return value:
2254 * none
2255 **/
2256static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2257 struct ipr_hostrcb *hostrcb)
2258{
2259 u32 ioasc;
2260 int error_index;
2261
2262 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2263 return;
2264
2265 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2266 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2267
4565e370
WB
2268 if (ioa_cfg->sis64)
2269 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2270 else
2271 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2272
4565e370
WB
2273 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2274 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2275 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2276 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2277 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2278 }
2279
2280 error_index = ipr_get_error(ioasc);
2281
2282 if (!ipr_error_table[error_index].log_hcam)
2283 return;
2284
49dc6a18 2285 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2286
2287 /* Set indication we have logged an error */
2288 ioa_cfg->errors_logged++;
2289
933916f3 2290 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2291 return;
cf852037
BK
2292 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2293 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2294
2295 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2296 case IPR_HOST_RCB_OVERLAY_ID_2:
2297 ipr_log_cache_error(ioa_cfg, hostrcb);
2298 break;
2299 case IPR_HOST_RCB_OVERLAY_ID_3:
2300 ipr_log_config_error(ioa_cfg, hostrcb);
2301 break;
2302 case IPR_HOST_RCB_OVERLAY_ID_4:
2303 case IPR_HOST_RCB_OVERLAY_ID_6:
2304 ipr_log_array_error(ioa_cfg, hostrcb);
2305 break;
b0df54bb
BK
2306 case IPR_HOST_RCB_OVERLAY_ID_7:
2307 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2308 break;
ee0f05b8
BK
2309 case IPR_HOST_RCB_OVERLAY_ID_12:
2310 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2311 break;
2312 case IPR_HOST_RCB_OVERLAY_ID_13:
2313 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2314 break;
2315 case IPR_HOST_RCB_OVERLAY_ID_14:
2316 case IPR_HOST_RCB_OVERLAY_ID_16:
2317 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2318 break;
2319 case IPR_HOST_RCB_OVERLAY_ID_17:
2320 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2321 break;
49dc6a18
BK
2322 case IPR_HOST_RCB_OVERLAY_ID_20:
2323 ipr_log_fabric_error(ioa_cfg, hostrcb);
2324 break;
4565e370
WB
2325 case IPR_HOST_RCB_OVERLAY_ID_23:
2326 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2327 break;
2328 case IPR_HOST_RCB_OVERLAY_ID_24:
2329 case IPR_HOST_RCB_OVERLAY_ID_26:
2330 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2331 break;
2332 case IPR_HOST_RCB_OVERLAY_ID_30:
2333 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2334 break;
cf852037 2335 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2336 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2337 default:
a9cfca96 2338 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2339 break;
2340 }
2341}
2342
2343/**
2344 * ipr_process_error - Op done function for an adapter error log.
2345 * @ipr_cmd: ipr command struct
2346 *
2347 * This function is the op done function for an error log host
2348 * controlled async from the adapter. It will log the error and
2349 * send the HCAM back to the adapter.
2350 *
2351 * Return value:
2352 * none
2353 **/
2354static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2355{
2356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2357 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2358 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2359 u32 fd_ioasc;
2360
2361 if (ioa_cfg->sis64)
2362 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2363 else
2364 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2365
2366 list_del(&hostrcb->queue);
2367 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2368
2369 if (!ioasc) {
2370 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2371 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2372 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2373 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2374 dev_err(&ioa_cfg->pdev->dev,
2375 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2376 }
2377
2378 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2379}
2380
2381/**
2382 * ipr_timeout - An internally generated op has timed out.
2383 * @ipr_cmd: ipr command struct
2384 *
2385 * This function blocks host requests and initiates an
2386 * adapter reset.
2387 *
2388 * Return value:
2389 * none
2390 **/
2391static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2392{
2393 unsigned long lock_flags = 0;
2394 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2395
2396 ENTER;
2397 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2398
2399 ioa_cfg->errors_logged++;
2400 dev_err(&ioa_cfg->pdev->dev,
2401 "Adapter being reset due to command timeout.\n");
2402
2403 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2404 ioa_cfg->sdt_state = GET_DUMP;
2405
2406 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2407 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2408
2409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2410 LEAVE;
2411}
2412
2413/**
2414 * ipr_oper_timeout - Adapter timed out transitioning to operational
2415 * @ipr_cmd: ipr command struct
2416 *
2417 * This function blocks host requests and initiates an
2418 * adapter reset.
2419 *
2420 * Return value:
2421 * none
2422 **/
2423static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2424{
2425 unsigned long lock_flags = 0;
2426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2427
2428 ENTER;
2429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2430
2431 ioa_cfg->errors_logged++;
2432 dev_err(&ioa_cfg->pdev->dev,
2433 "Adapter timed out transitioning to operational.\n");
2434
2435 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2436 ioa_cfg->sdt_state = GET_DUMP;
2437
2438 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2439 if (ipr_fastfail)
2440 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2441 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2442 }
2443
2444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2445 LEAVE;
2446}
2447
2448/**
2449 * ipr_reset_reload - Reset/Reload the IOA
2450 * @ioa_cfg: ioa config struct
2451 * @shutdown_type: shutdown type
2452 *
2453 * This function resets the adapter and re-initializes it.
2454 * This function assumes that all new host commands have been stopped.
2455 * Return value:
2456 * SUCCESS / FAILED
2457 **/
2458static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2459 enum ipr_shutdown_type shutdown_type)
2460{
2461 if (!ioa_cfg->in_reset_reload)
2462 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2463
2464 spin_unlock_irq(ioa_cfg->host->host_lock);
2465 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2466 spin_lock_irq(ioa_cfg->host->host_lock);
2467
2468 /* If we got hit with a host reset while we were already resetting
2469 the adapter for some reason, and the reset failed. */
2470 if (ioa_cfg->ioa_is_dead) {
2471 ipr_trace;
2472 return FAILED;
2473 }
2474
2475 return SUCCESS;
2476}
2477
2478/**
2479 * ipr_find_ses_entry - Find matching SES in SES table
2480 * @res: resource entry struct of SES
2481 *
2482 * Return value:
2483 * pointer to SES table entry / NULL on failure
2484 **/
2485static const struct ipr_ses_table_entry *
2486ipr_find_ses_entry(struct ipr_resource_entry *res)
2487{
2488 int i, j, matches;
3e7ebdfa 2489 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2490 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2491
2492 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2493 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2494 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2495 vpids = &res->std_inq_data.vpids;
2496 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2497 matches++;
2498 else
2499 break;
2500 } else
2501 matches++;
2502 }
2503
2504 if (matches == IPR_PROD_ID_LEN)
2505 return ste;
2506 }
2507
2508 return NULL;
2509}
2510
2511/**
2512 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2513 * @ioa_cfg: ioa config struct
2514 * @bus: SCSI bus
2515 * @bus_width: bus width
2516 *
2517 * Return value:
2518 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2519 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2520 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2521 * max 160MHz = max 320MB/sec).
2522 **/
2523static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2524{
2525 struct ipr_resource_entry *res;
2526 const struct ipr_ses_table_entry *ste;
2527 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2528
2529 /* Loop through each config table entry in the config table buffer */
2530 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2531 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2532 continue;
2533
3e7ebdfa 2534 if (bus != res->bus)
1da177e4
LT
2535 continue;
2536
2537 if (!(ste = ipr_find_ses_entry(res)))
2538 continue;
2539
2540 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2541 }
2542
2543 return max_xfer_rate;
2544}
2545
2546/**
2547 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2548 * @ioa_cfg: ioa config struct
2549 * @max_delay: max delay in micro-seconds to wait
2550 *
2551 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2552 *
2553 * Return value:
2554 * 0 on success / other on failure
2555 **/
2556static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2557{
2558 volatile u32 pcii_reg;
2559 int delay = 1;
2560
2561 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2562 while (delay < max_delay) {
2563 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2564
2565 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2566 return 0;
2567
2568 /* udelay cannot be used if delay is more than a few milliseconds */
2569 if ((delay / 1000) > MAX_UDELAY_MS)
2570 mdelay(delay / 1000);
2571 else
2572 udelay(delay);
2573
2574 delay += delay;
2575 }
2576 return -EIO;
2577}
2578
dcbad00e
WB
2579/**
2580 * ipr_get_sis64_dump_data_section - Dump IOA memory
2581 * @ioa_cfg: ioa config struct
2582 * @start_addr: adapter address to dump
2583 * @dest: destination kernel buffer
2584 * @length_in_words: length to dump in 4 byte words
2585 *
2586 * Return value:
2587 * 0 on success
2588 **/
2589static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2590 u32 start_addr,
2591 __be32 *dest, u32 length_in_words)
2592{
2593 int i;
2594
2595 for (i = 0; i < length_in_words; i++) {
2596 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2597 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2598 dest++;
2599 }
2600
2601 return 0;
2602}
2603
1da177e4
LT
2604/**
2605 * ipr_get_ldump_data_section - Dump IOA memory
2606 * @ioa_cfg: ioa config struct
2607 * @start_addr: adapter address to dump
2608 * @dest: destination kernel buffer
2609 * @length_in_words: length to dump in 4 byte words
2610 *
2611 * Return value:
2612 * 0 on success / -EIO on failure
2613 **/
2614static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2615 u32 start_addr,
2616 __be32 *dest, u32 length_in_words)
2617{
2618 volatile u32 temp_pcii_reg;
2619 int i, delay = 0;
2620
dcbad00e
WB
2621 if (ioa_cfg->sis64)
2622 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2623 dest, length_in_words);
2624
1da177e4
LT
2625 /* Write IOA interrupt reg starting LDUMP state */
2626 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2627 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2628
2629 /* Wait for IO debug acknowledge */
2630 if (ipr_wait_iodbg_ack(ioa_cfg,
2631 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2632 dev_err(&ioa_cfg->pdev->dev,
2633 "IOA dump long data transfer timeout\n");
2634 return -EIO;
2635 }
2636
2637 /* Signal LDUMP interlocked - clear IO debug ack */
2638 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2639 ioa_cfg->regs.clr_interrupt_reg);
2640
2641 /* Write Mailbox with starting address */
2642 writel(start_addr, ioa_cfg->ioa_mailbox);
2643
2644 /* Signal address valid - clear IOA Reset alert */
2645 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2646 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2647
2648 for (i = 0; i < length_in_words; i++) {
2649 /* Wait for IO debug acknowledge */
2650 if (ipr_wait_iodbg_ack(ioa_cfg,
2651 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2652 dev_err(&ioa_cfg->pdev->dev,
2653 "IOA dump short data transfer timeout\n");
2654 return -EIO;
2655 }
2656
2657 /* Read data from mailbox and increment destination pointer */
2658 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2659 dest++;
2660
2661 /* For all but the last word of data, signal data received */
2662 if (i < (length_in_words - 1)) {
2663 /* Signal dump data received - Clear IO debug Ack */
2664 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2665 ioa_cfg->regs.clr_interrupt_reg);
2666 }
2667 }
2668
2669 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2670 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2671 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2672
2673 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2674 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2675
2676 /* Signal dump data received - Clear IO debug Ack */
2677 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2678 ioa_cfg->regs.clr_interrupt_reg);
2679
2680 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2681 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2682 temp_pcii_reg =
214777ba 2683 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2684
2685 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2686 return 0;
2687
2688 udelay(10);
2689 delay += 10;
2690 }
2691
2692 return 0;
2693}
2694
2695#ifdef CONFIG_SCSI_IPR_DUMP
2696/**
2697 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2698 * @ioa_cfg: ioa config struct
2699 * @pci_address: adapter address
2700 * @length: length of data to copy
2701 *
2702 * Copy data from PCI adapter to kernel buffer.
2703 * Note: length MUST be a 4 byte multiple
2704 * Return value:
2705 * 0 on success / other on failure
2706 **/
2707static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2708 unsigned long pci_address, u32 length)
2709{
2710 int bytes_copied = 0;
2711 int cur_len, rc, rem_len, rem_page_len;
2712 __be32 *page;
2713 unsigned long lock_flags = 0;
2714 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2715
2716 while (bytes_copied < length &&
2717 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2718 if (ioa_dump->page_offset >= PAGE_SIZE ||
2719 ioa_dump->page_offset == 0) {
2720 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2721
2722 if (!page) {
2723 ipr_trace;
2724 return bytes_copied;
2725 }
2726
2727 ioa_dump->page_offset = 0;
2728 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2729 ioa_dump->next_page_index++;
2730 } else
2731 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2732
2733 rem_len = length - bytes_copied;
2734 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2735 cur_len = min(rem_len, rem_page_len);
2736
2737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2738 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2739 rc = -EIO;
2740 } else {
2741 rc = ipr_get_ldump_data_section(ioa_cfg,
2742 pci_address + bytes_copied,
2743 &page[ioa_dump->page_offset / 4],
2744 (cur_len / sizeof(u32)));
2745 }
2746 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2747
2748 if (!rc) {
2749 ioa_dump->page_offset += cur_len;
2750 bytes_copied += cur_len;
2751 } else {
2752 ipr_trace;
2753 break;
2754 }
2755 schedule();
2756 }
2757
2758 return bytes_copied;
2759}
2760
2761/**
2762 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2763 * @hdr: dump entry header struct
2764 *
2765 * Return value:
2766 * nothing
2767 **/
2768static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2769{
2770 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2771 hdr->num_elems = 1;
2772 hdr->offset = sizeof(*hdr);
2773 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2774}
2775
2776/**
2777 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2778 * @ioa_cfg: ioa config struct
2779 * @driver_dump: driver dump struct
2780 *
2781 * Return value:
2782 * nothing
2783 **/
2784static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2785 struct ipr_driver_dump *driver_dump)
2786{
2787 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2788
2789 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2790 driver_dump->ioa_type_entry.hdr.len =
2791 sizeof(struct ipr_dump_ioa_type_entry) -
2792 sizeof(struct ipr_dump_entry_header);
2793 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2794 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2795 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2796 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2797 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2798 ucode_vpd->minor_release[1];
2799 driver_dump->hdr.num_entries++;
2800}
2801
2802/**
2803 * ipr_dump_version_data - Fill in the driver version in the dump.
2804 * @ioa_cfg: ioa config struct
2805 * @driver_dump: driver dump struct
2806 *
2807 * Return value:
2808 * nothing
2809 **/
2810static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2811 struct ipr_driver_dump *driver_dump)
2812{
2813 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2814 driver_dump->version_entry.hdr.len =
2815 sizeof(struct ipr_dump_version_entry) -
2816 sizeof(struct ipr_dump_entry_header);
2817 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2818 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2819 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2820 driver_dump->hdr.num_entries++;
2821}
2822
2823/**
2824 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2825 * @ioa_cfg: ioa config struct
2826 * @driver_dump: driver dump struct
2827 *
2828 * Return value:
2829 * nothing
2830 **/
2831static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2832 struct ipr_driver_dump *driver_dump)
2833{
2834 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2835 driver_dump->trace_entry.hdr.len =
2836 sizeof(struct ipr_dump_trace_entry) -
2837 sizeof(struct ipr_dump_entry_header);
2838 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2839 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2840 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2841 driver_dump->hdr.num_entries++;
2842}
2843
2844/**
2845 * ipr_dump_location_data - Fill in the IOA location in the dump.
2846 * @ioa_cfg: ioa config struct
2847 * @driver_dump: driver dump struct
2848 *
2849 * Return value:
2850 * nothing
2851 **/
2852static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2853 struct ipr_driver_dump *driver_dump)
2854{
2855 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2856 driver_dump->location_entry.hdr.len =
2857 sizeof(struct ipr_dump_location_entry) -
2858 sizeof(struct ipr_dump_entry_header);
2859 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2860 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2861 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2862 driver_dump->hdr.num_entries++;
2863}
2864
2865/**
2866 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2867 * @ioa_cfg: ioa config struct
2868 * @dump: dump struct
2869 *
2870 * Return value:
2871 * nothing
2872 **/
2873static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2874{
2875 unsigned long start_addr, sdt_word;
2876 unsigned long lock_flags = 0;
2877 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2878 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2879 u32 num_entries, start_off, end_off;
2880 u32 bytes_to_copy, bytes_copied, rc;
2881 struct ipr_sdt *sdt;
dcbad00e 2882 int valid = 1;
1da177e4
LT
2883 int i;
2884
2885 ENTER;
2886
2887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2888
2889 if (ioa_cfg->sdt_state != GET_DUMP) {
2890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2891 return;
2892 }
2893
2894 start_addr = readl(ioa_cfg->ioa_mailbox);
2895
dcbad00e 2896 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2897 dev_err(&ioa_cfg->pdev->dev,
2898 "Invalid dump table format: %lx\n", start_addr);
2899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 return;
2901 }
2902
2903 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2904
2905 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2906
2907 /* Initialize the overall dump header */
2908 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2909 driver_dump->hdr.num_entries = 1;
2910 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2911 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2912 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2913 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2914
2915 ipr_dump_version_data(ioa_cfg, driver_dump);
2916 ipr_dump_location_data(ioa_cfg, driver_dump);
2917 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2918 ipr_dump_trace_data(ioa_cfg, driver_dump);
2919
2920 /* Update dump_header */
2921 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2922
2923 /* IOA Dump entry */
2924 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2925 ioa_dump->hdr.len = 0;
2926 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2927 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2928
2929 /* First entries in sdt are actually a list of dump addresses and
2930 lengths to gather the real dump data. sdt represents the pointer
2931 to the ioa generated dump table. Dump data will be extracted based
2932 on entries in this table */
2933 sdt = &ioa_dump->sdt;
2934
2935 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2936 sizeof(struct ipr_sdt) / sizeof(__be32));
2937
2938 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2939 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2940 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2941 dev_err(&ioa_cfg->pdev->dev,
2942 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2943 rc, be32_to_cpu(sdt->hdr.state));
2944 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2945 ioa_cfg->sdt_state = DUMP_OBTAINED;
2946 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2947 return;
2948 }
2949
2950 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2951
2952 if (num_entries > IPR_NUM_SDT_ENTRIES)
2953 num_entries = IPR_NUM_SDT_ENTRIES;
2954
2955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956
2957 for (i = 0; i < num_entries; i++) {
2958 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2959 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2960 break;
2961 }
2962
2963 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
2964 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2965 if (ioa_cfg->sis64)
2966 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2967 else {
2968 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2969 end_off = be32_to_cpu(sdt->entry[i].end_token);
2970
2971 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2972 bytes_to_copy = end_off - start_off;
2973 else
2974 valid = 0;
2975 }
2976 if (valid) {
1da177e4
LT
2977 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2978 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2979 continue;
2980 }
2981
2982 /* Copy data from adapter to driver buffers */
2983 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2984 bytes_to_copy);
2985
2986 ioa_dump->hdr.len += bytes_copied;
2987
2988 if (bytes_copied != bytes_to_copy) {
2989 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2990 break;
2991 }
2992 }
2993 }
2994 }
2995
2996 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2997
2998 /* Update dump_header */
2999 driver_dump->hdr.len += ioa_dump->hdr.len;
3000 wmb();
3001 ioa_cfg->sdt_state = DUMP_OBTAINED;
3002 LEAVE;
3003}
3004
3005#else
3006#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3007#endif
3008
3009/**
3010 * ipr_release_dump - Free adapter dump memory
3011 * @kref: kref struct
3012 *
3013 * Return value:
3014 * nothing
3015 **/
3016static void ipr_release_dump(struct kref *kref)
3017{
3018 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3019 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3020 unsigned long lock_flags = 0;
3021 int i;
3022
3023 ENTER;
3024 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3025 ioa_cfg->dump = NULL;
3026 ioa_cfg->sdt_state = INACTIVE;
3027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3028
3029 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3030 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3031
3032 kfree(dump);
3033 LEAVE;
3034}
3035
3036/**
3037 * ipr_worker_thread - Worker thread
c4028958 3038 * @work: ioa config struct
1da177e4
LT
3039 *
3040 * Called at task level from a work thread. This function takes care
3041 * of adding and removing device from the mid-layer as configuration
3042 * changes are detected by the adapter.
3043 *
3044 * Return value:
3045 * nothing
3046 **/
c4028958 3047static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3048{
3049 unsigned long lock_flags;
3050 struct ipr_resource_entry *res;
3051 struct scsi_device *sdev;
3052 struct ipr_dump *dump;
c4028958
DH
3053 struct ipr_ioa_cfg *ioa_cfg =
3054 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3055 u8 bus, target, lun;
3056 int did_work;
3057
3058 ENTER;
3059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3060
3061 if (ioa_cfg->sdt_state == GET_DUMP) {
3062 dump = ioa_cfg->dump;
3063 if (!dump) {
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 return;
3066 }
3067 kref_get(&dump->kref);
3068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3069 ipr_get_ioa_dump(ioa_cfg, dump);
3070 kref_put(&dump->kref, ipr_release_dump);
3071
3072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3073 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3074 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3075 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076 return;
3077 }
3078
3079restart:
3080 do {
3081 did_work = 0;
3082 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 return;
3085 }
3086
3087 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3088 if (res->del_from_ml && res->sdev) {
3089 did_work = 1;
3090 sdev = res->sdev;
3091 if (!scsi_device_get(sdev)) {
1da177e4
LT
3092 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3094 scsi_remove_device(sdev);
3095 scsi_device_put(sdev);
3096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3097 }
3098 break;
3099 }
3100 }
3101 } while(did_work);
3102
3103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3104 if (res->add_to_ml) {
3e7ebdfa
WB
3105 bus = res->bus;
3106 target = res->target;
3107 lun = res->lun;
1121b794 3108 res->add_to_ml = 0;
1da177e4
LT
3109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3110 scsi_add_device(ioa_cfg->host, bus, target, lun);
3111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112 goto restart;
3113 }
3114 }
3115
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3117 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3118 LEAVE;
3119}
3120
3121#ifdef CONFIG_SCSI_IPR_TRACE
3122/**
3123 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3124 * @filp: open sysfs file
1da177e4 3125 * @kobj: kobject struct
91a69029 3126 * @bin_attr: bin_attribute struct
1da177e4
LT
3127 * @buf: buffer
3128 * @off: offset
3129 * @count: buffer size
3130 *
3131 * Return value:
3132 * number of bytes printed to buffer
3133 **/
2c3c8bea 3134static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3135 struct bin_attribute *bin_attr,
3136 char *buf, loff_t off, size_t count)
1da177e4 3137{
ee959b00
TJ
3138 struct device *dev = container_of(kobj, struct device, kobj);
3139 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3141 unsigned long lock_flags = 0;
d777aaf3 3142 ssize_t ret;
1da177e4
LT
3143
3144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3145 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3146 IPR_TRACE_SIZE);
1da177e4 3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3148
3149 return ret;
1da177e4
LT
3150}
3151
3152static struct bin_attribute ipr_trace_attr = {
3153 .attr = {
3154 .name = "trace",
3155 .mode = S_IRUGO,
3156 },
3157 .size = 0,
3158 .read = ipr_read_trace,
3159};
3160#endif
3161
3162/**
3163 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3164 * @dev: class device struct
3165 * @buf: buffer
1da177e4
LT
3166 *
3167 * Return value:
3168 * number of bytes printed to buffer
3169 **/
ee959b00
TJ
3170static ssize_t ipr_show_fw_version(struct device *dev,
3171 struct device_attribute *attr, char *buf)
1da177e4 3172{
ee959b00 3173 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3174 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3175 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3176 unsigned long lock_flags = 0;
3177 int len;
3178
3179 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3180 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3181 ucode_vpd->major_release, ucode_vpd->card_type,
3182 ucode_vpd->minor_release[0],
3183 ucode_vpd->minor_release[1]);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185 return len;
3186}
3187
ee959b00 3188static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3189 .attr = {
3190 .name = "fw_version",
3191 .mode = S_IRUGO,
3192 },
3193 .show = ipr_show_fw_version,
3194};
3195
3196/**
3197 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3198 * @dev: class device struct
3199 * @buf: buffer
1da177e4
LT
3200 *
3201 * Return value:
3202 * number of bytes printed to buffer
3203 **/
ee959b00
TJ
3204static ssize_t ipr_show_log_level(struct device *dev,
3205 struct device_attribute *attr, char *buf)
1da177e4 3206{
ee959b00 3207 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3208 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3209 unsigned long lock_flags = 0;
3210 int len;
3211
3212 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3213 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215 return len;
3216}
3217
3218/**
3219 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3220 * @dev: class device struct
3221 * @buf: buffer
1da177e4
LT
3222 *
3223 * Return value:
3224 * number of bytes printed to buffer
3225 **/
ee959b00
TJ
3226static ssize_t ipr_store_log_level(struct device *dev,
3227 struct device_attribute *attr,
1da177e4
LT
3228 const char *buf, size_t count)
3229{
ee959b00 3230 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3231 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3232 unsigned long lock_flags = 0;
3233
3234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3235 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237 return strlen(buf);
3238}
3239
ee959b00 3240static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3241 .attr = {
3242 .name = "log_level",
3243 .mode = S_IRUGO | S_IWUSR,
3244 },
3245 .show = ipr_show_log_level,
3246 .store = ipr_store_log_level
3247};
3248
3249/**
3250 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3251 * @dev: device struct
3252 * @buf: buffer
3253 * @count: buffer size
1da177e4
LT
3254 *
3255 * This function will reset the adapter and wait a reasonable
3256 * amount of time for any errors that the adapter might log.
3257 *
3258 * Return value:
3259 * count on success / other on failure
3260 **/
ee959b00
TJ
3261static ssize_t ipr_store_diagnostics(struct device *dev,
3262 struct device_attribute *attr,
1da177e4
LT
3263 const char *buf, size_t count)
3264{
ee959b00 3265 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3266 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3267 unsigned long lock_flags = 0;
3268 int rc = count;
3269
3270 if (!capable(CAP_SYS_ADMIN))
3271 return -EACCES;
3272
1da177e4 3273 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3274 while(ioa_cfg->in_reset_reload) {
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3278 }
3279
1da177e4
LT
3280 ioa_cfg->errors_logged = 0;
3281 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3282
3283 if (ioa_cfg->in_reset_reload) {
3284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3285 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3286
3287 /* Wait for a second for any errors to be logged */
3288 msleep(1000);
3289 } else {
3290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291 return -EIO;
3292 }
3293
3294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3296 rc = -EIO;
3297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298
3299 return rc;
3300}
3301
ee959b00 3302static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3303 .attr = {
3304 .name = "run_diagnostics",
3305 .mode = S_IWUSR,
3306 },
3307 .store = ipr_store_diagnostics
3308};
3309
f37eb54b
BK
3310/**
3311 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3312 * @class_dev: device struct
3313 * @buf: buffer
f37eb54b
BK
3314 *
3315 * Return value:
3316 * number of bytes printed to buffer
3317 **/
ee959b00
TJ
3318static ssize_t ipr_show_adapter_state(struct device *dev,
3319 struct device_attribute *attr, char *buf)
f37eb54b 3320{
ee959b00 3321 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3323 unsigned long lock_flags = 0;
3324 int len;
3325
3326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327 if (ioa_cfg->ioa_is_dead)
3328 len = snprintf(buf, PAGE_SIZE, "offline\n");
3329 else
3330 len = snprintf(buf, PAGE_SIZE, "online\n");
3331 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332 return len;
3333}
3334
3335/**
3336 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3337 * @dev: device struct
3338 * @buf: buffer
3339 * @count: buffer size
f37eb54b
BK
3340 *
3341 * This function will change the adapter's state.
3342 *
3343 * Return value:
3344 * count on success / other on failure
3345 **/
ee959b00
TJ
3346static ssize_t ipr_store_adapter_state(struct device *dev,
3347 struct device_attribute *attr,
f37eb54b
BK
3348 const char *buf, size_t count)
3349{
ee959b00 3350 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3351 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3352 unsigned long lock_flags;
3353 int result = count;
3354
3355 if (!capable(CAP_SYS_ADMIN))
3356 return -EACCES;
3357
3358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3359 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3360 ioa_cfg->ioa_is_dead = 0;
3361 ioa_cfg->reset_retries = 0;
3362 ioa_cfg->in_ioa_bringdown = 0;
3363 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3364 }
3365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3367
3368 return result;
3369}
3370
ee959b00 3371static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3372 .attr = {
49dd0961 3373 .name = "online_state",
f37eb54b
BK
3374 .mode = S_IRUGO | S_IWUSR,
3375 },
3376 .show = ipr_show_adapter_state,
3377 .store = ipr_store_adapter_state
3378};
3379
1da177e4
LT
3380/**
3381 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3382 * @dev: device struct
3383 * @buf: buffer
3384 * @count: buffer size
1da177e4
LT
3385 *
3386 * This function will reset the adapter.
3387 *
3388 * Return value:
3389 * count on success / other on failure
3390 **/
ee959b00
TJ
3391static ssize_t ipr_store_reset_adapter(struct device *dev,
3392 struct device_attribute *attr,
1da177e4
LT
3393 const char *buf, size_t count)
3394{
ee959b00 3395 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3396 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3397 unsigned long lock_flags;
3398 int result = count;
3399
3400 if (!capable(CAP_SYS_ADMIN))
3401 return -EACCES;
3402
3403 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404 if (!ioa_cfg->in_reset_reload)
3405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3408
3409 return result;
3410}
3411
ee959b00 3412static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3413 .attr = {
3414 .name = "reset_host",
3415 .mode = S_IWUSR,
3416 },
3417 .store = ipr_store_reset_adapter
3418};
3419
3420/**
3421 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3422 * @buf_len: buffer length
3423 *
3424 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3425 * list to use for microcode download
3426 *
3427 * Return value:
3428 * pointer to sglist / NULL on failure
3429 **/
3430static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3431{
3432 int sg_size, order, bsize_elem, num_elem, i, j;
3433 struct ipr_sglist *sglist;
3434 struct scatterlist *scatterlist;
3435 struct page *page;
3436
3437 /* Get the minimum size per scatter/gather element */
3438 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3439
3440 /* Get the actual size per element */
3441 order = get_order(sg_size);
3442
3443 /* Determine the actual number of bytes per element */
3444 bsize_elem = PAGE_SIZE * (1 << order);
3445
3446 /* Determine the actual number of sg entries needed */
3447 if (buf_len % bsize_elem)
3448 num_elem = (buf_len / bsize_elem) + 1;
3449 else
3450 num_elem = buf_len / bsize_elem;
3451
3452 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3453 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3454 (sizeof(struct scatterlist) * (num_elem - 1)),
3455 GFP_KERNEL);
3456
3457 if (sglist == NULL) {
3458 ipr_trace;
3459 return NULL;
3460 }
3461
1da177e4 3462 scatterlist = sglist->scatterlist;
45711f1a 3463 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3464
3465 sglist->order = order;
3466 sglist->num_sg = num_elem;
3467
3468 /* Allocate a bunch of sg elements */
3469 for (i = 0; i < num_elem; i++) {
3470 page = alloc_pages(GFP_KERNEL, order);
3471 if (!page) {
3472 ipr_trace;
3473
3474 /* Free up what we already allocated */
3475 for (j = i - 1; j >= 0; j--)
45711f1a 3476 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3477 kfree(sglist);
3478 return NULL;
3479 }
3480
642f1490 3481 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3482 }
3483
3484 return sglist;
3485}
3486
3487/**
3488 * ipr_free_ucode_buffer - Frees a microcode download buffer
3489 * @p_dnld: scatter/gather list pointer
3490 *
3491 * Free a DMA'able ucode download buffer previously allocated with
3492 * ipr_alloc_ucode_buffer
3493 *
3494 * Return value:
3495 * nothing
3496 **/
3497static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3498{
3499 int i;
3500
3501 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3502 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3503
3504 kfree(sglist);
3505}
3506
3507/**
3508 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3509 * @sglist: scatter/gather list pointer
3510 * @buffer: buffer pointer
3511 * @len: buffer length
3512 *
3513 * Copy a microcode image from a user buffer into a buffer allocated by
3514 * ipr_alloc_ucode_buffer
3515 *
3516 * Return value:
3517 * 0 on success / other on failure
3518 **/
3519static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3520 u8 *buffer, u32 len)
3521{
3522 int bsize_elem, i, result = 0;
3523 struct scatterlist *scatterlist;
3524 void *kaddr;
3525
3526 /* Determine the actual number of bytes per element */
3527 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3528
3529 scatterlist = sglist->scatterlist;
3530
3531 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3532 struct page *page = sg_page(&scatterlist[i]);
3533
3534 kaddr = kmap(page);
1da177e4 3535 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3536 kunmap(page);
1da177e4
LT
3537
3538 scatterlist[i].length = bsize_elem;
3539
3540 if (result != 0) {
3541 ipr_trace;
3542 return result;
3543 }
3544 }
3545
3546 if (len % bsize_elem) {
45711f1a
JA
3547 struct page *page = sg_page(&scatterlist[i]);
3548
3549 kaddr = kmap(page);
1da177e4 3550 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3551 kunmap(page);
1da177e4
LT
3552
3553 scatterlist[i].length = len % bsize_elem;
3554 }
3555
3556 sglist->buffer_len = len;
3557 return result;
3558}
3559
a32c055f
WB
3560/**
3561 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3562 * @ipr_cmd: ipr command struct
3563 * @sglist: scatter/gather list
3564 *
3565 * Builds a microcode download IOA data list (IOADL).
3566 *
3567 **/
3568static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3569 struct ipr_sglist *sglist)
3570{
3571 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3572 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3573 struct scatterlist *scatterlist = sglist->scatterlist;
3574 int i;
3575
3576 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3577 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3578 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3579
3580 ioarcb->ioadl_len =
3581 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3582 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3583 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3584 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3585 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3586 }
3587
3588 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3589}
3590
1da177e4 3591/**
12baa420 3592 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3593 * @ipr_cmd: ipr command struct
3594 * @sglist: scatter/gather list
1da177e4 3595 *
12baa420 3596 * Builds a microcode download IOA data list (IOADL).
1da177e4 3597 *
1da177e4 3598 **/
12baa420
BK
3599static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3600 struct ipr_sglist *sglist)
1da177e4 3601{
1da177e4 3602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3603 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3604 struct scatterlist *scatterlist = sglist->scatterlist;
3605 int i;
3606
12baa420 3607 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3608 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3609 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3610
3611 ioarcb->ioadl_len =
1da177e4
LT
3612 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3613
3614 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3615 ioadl[i].flags_and_data_len =
3616 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3617 ioadl[i].address =
3618 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3619 }
3620
12baa420
BK
3621 ioadl[i-1].flags_and_data_len |=
3622 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3623}
3624
3625/**
3626 * ipr_update_ioa_ucode - Update IOA's microcode
3627 * @ioa_cfg: ioa config struct
3628 * @sglist: scatter/gather list
3629 *
3630 * Initiate an adapter reset to update the IOA's microcode
3631 *
3632 * Return value:
3633 * 0 on success / -EIO on failure
3634 **/
3635static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3636 struct ipr_sglist *sglist)
3637{
3638 unsigned long lock_flags;
3639
3640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3641 while(ioa_cfg->in_reset_reload) {
3642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3645 }
12baa420
BK
3646
3647 if (ioa_cfg->ucode_sglist) {
3648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3649 dev_err(&ioa_cfg->pdev->dev,
3650 "Microcode download already in progress\n");
3651 return -EIO;
1da177e4 3652 }
12baa420
BK
3653
3654 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3655 sglist->num_sg, DMA_TO_DEVICE);
3656
3657 if (!sglist->num_dma_sg) {
3658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3659 dev_err(&ioa_cfg->pdev->dev,
3660 "Failed to map microcode download buffer!\n");
1da177e4
LT
3661 return -EIO;
3662 }
3663
12baa420
BK
3664 ioa_cfg->ucode_sglist = sglist;
3665 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3668
3669 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3670 ioa_cfg->ucode_sglist = NULL;
3671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3672 return 0;
3673}
3674
3675/**
3676 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3677 * @class_dev: device struct
3678 * @buf: buffer
3679 * @count: buffer size
1da177e4
LT
3680 *
3681 * This function will update the firmware on the adapter.
3682 *
3683 * Return value:
3684 * count on success / other on failure
3685 **/
ee959b00
TJ
3686static ssize_t ipr_store_update_fw(struct device *dev,
3687 struct device_attribute *attr,
3688 const char *buf, size_t count)
1da177e4 3689{
ee959b00 3690 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3691 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3692 struct ipr_ucode_image_header *image_hdr;
3693 const struct firmware *fw_entry;
3694 struct ipr_sglist *sglist;
1da177e4
LT
3695 char fname[100];
3696 char *src;
3697 int len, result, dnld_size;
3698
3699 if (!capable(CAP_SYS_ADMIN))
3700 return -EACCES;
3701
3702 len = snprintf(fname, 99, "%s", buf);
3703 fname[len-1] = '\0';
3704
3705 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3706 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3707 return -EIO;
3708 }
3709
3710 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3711
3712 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3713 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3714 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3715 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3716 release_firmware(fw_entry);
3717 return -EINVAL;
3718 }
3719
3720 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3721 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3722 sglist = ipr_alloc_ucode_buffer(dnld_size);
3723
3724 if (!sglist) {
3725 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3726 release_firmware(fw_entry);
3727 return -ENOMEM;
3728 }
3729
3730 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3731
3732 if (result) {
3733 dev_err(&ioa_cfg->pdev->dev,
3734 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3735 goto out;
1da177e4
LT
3736 }
3737
12baa420 3738 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3739
12baa420
BK
3740 if (!result)
3741 result = count;
3742out:
1da177e4
LT
3743 ipr_free_ucode_buffer(sglist);
3744 release_firmware(fw_entry);
12baa420 3745 return result;
1da177e4
LT
3746}
3747
ee959b00 3748static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3749 .attr = {
3750 .name = "update_fw",
3751 .mode = S_IWUSR,
3752 },
3753 .store = ipr_store_update_fw
3754};
3755
ee959b00 3756static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3757 &ipr_fw_version_attr,
3758 &ipr_log_level_attr,
3759 &ipr_diagnostics_attr,
f37eb54b 3760 &ipr_ioa_state_attr,
1da177e4
LT
3761 &ipr_ioa_reset_attr,
3762 &ipr_update_fw_attr,
3763 NULL,
3764};
3765
3766#ifdef CONFIG_SCSI_IPR_DUMP
3767/**
3768 * ipr_read_dump - Dump the adapter
2c3c8bea 3769 * @filp: open sysfs file
1da177e4 3770 * @kobj: kobject struct
91a69029 3771 * @bin_attr: bin_attribute struct
1da177e4
LT
3772 * @buf: buffer
3773 * @off: offset
3774 * @count: buffer size
3775 *
3776 * Return value:
3777 * number of bytes printed to buffer
3778 **/
2c3c8bea 3779static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3780 struct bin_attribute *bin_attr,
3781 char *buf, loff_t off, size_t count)
1da177e4 3782{
ee959b00 3783 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3784 struct Scsi_Host *shost = class_to_shost(cdev);
3785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3786 struct ipr_dump *dump;
3787 unsigned long lock_flags = 0;
3788 char *src;
3789 int len;
3790 size_t rc = count;
3791
3792 if (!capable(CAP_SYS_ADMIN))
3793 return -EACCES;
3794
3795 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3796 dump = ioa_cfg->dump;
3797
3798 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3800 return 0;
3801 }
3802 kref_get(&dump->kref);
3803 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3804
3805 if (off > dump->driver_dump.hdr.len) {
3806 kref_put(&dump->kref, ipr_release_dump);
3807 return 0;
3808 }
3809
3810 if (off + count > dump->driver_dump.hdr.len) {
3811 count = dump->driver_dump.hdr.len - off;
3812 rc = count;
3813 }
3814
3815 if (count && off < sizeof(dump->driver_dump)) {
3816 if (off + count > sizeof(dump->driver_dump))
3817 len = sizeof(dump->driver_dump) - off;
3818 else
3819 len = count;
3820 src = (u8 *)&dump->driver_dump + off;
3821 memcpy(buf, src, len);
3822 buf += len;
3823 off += len;
3824 count -= len;
3825 }
3826
3827 off -= sizeof(dump->driver_dump);
3828
3829 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3830 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3831 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3832 else
3833 len = count;
3834 src = (u8 *)&dump->ioa_dump + off;
3835 memcpy(buf, src, len);
3836 buf += len;
3837 off += len;
3838 count -= len;
3839 }
3840
3841 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3842
3843 while (count) {
3844 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3845 len = PAGE_ALIGN(off) - off;
3846 else
3847 len = count;
3848 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3849 src += off & ~PAGE_MASK;
3850 memcpy(buf, src, len);
3851 buf += len;
3852 off += len;
3853 count -= len;
3854 }
3855
3856 kref_put(&dump->kref, ipr_release_dump);
3857 return rc;
3858}
3859
3860/**
3861 * ipr_alloc_dump - Prepare for adapter dump
3862 * @ioa_cfg: ioa config struct
3863 *
3864 * Return value:
3865 * 0 on success / other on failure
3866 **/
3867static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3868{
3869 struct ipr_dump *dump;
3870 unsigned long lock_flags = 0;
3871
0bc42e35 3872 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3873
3874 if (!dump) {
3875 ipr_err("Dump memory allocation failed\n");
3876 return -ENOMEM;
3877 }
3878
1da177e4
LT
3879 kref_init(&dump->kref);
3880 dump->ioa_cfg = ioa_cfg;
3881
3882 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3883
3884 if (INACTIVE != ioa_cfg->sdt_state) {
3885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3886 kfree(dump);
3887 return 0;
3888 }
3889
3890 ioa_cfg->dump = dump;
3891 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3892 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3893 ioa_cfg->dump_taken = 1;
3894 schedule_work(&ioa_cfg->work_q);
3895 }
3896 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3897
1da177e4
LT
3898 return 0;
3899}
3900
3901/**
3902 * ipr_free_dump - Free adapter dump memory
3903 * @ioa_cfg: ioa config struct
3904 *
3905 * Return value:
3906 * 0 on success / other on failure
3907 **/
3908static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3909{
3910 struct ipr_dump *dump;
3911 unsigned long lock_flags = 0;
3912
3913 ENTER;
3914
3915 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3916 dump = ioa_cfg->dump;
3917 if (!dump) {
3918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3919 return 0;
3920 }
3921
3922 ioa_cfg->dump = NULL;
3923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3924
3925 kref_put(&dump->kref, ipr_release_dump);
3926
3927 LEAVE;
3928 return 0;
3929}
3930
3931/**
3932 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 3933 * @filp: open sysfs file
1da177e4 3934 * @kobj: kobject struct
91a69029 3935 * @bin_attr: bin_attribute struct
1da177e4
LT
3936 * @buf: buffer
3937 * @off: offset
3938 * @count: buffer size
3939 *
3940 * Return value:
3941 * number of bytes printed to buffer
3942 **/
2c3c8bea 3943static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3944 struct bin_attribute *bin_attr,
3945 char *buf, loff_t off, size_t count)
1da177e4 3946{
ee959b00 3947 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3948 struct Scsi_Host *shost = class_to_shost(cdev);
3949 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3950 int rc;
3951
3952 if (!capable(CAP_SYS_ADMIN))
3953 return -EACCES;
3954
3955 if (buf[0] == '1')
3956 rc = ipr_alloc_dump(ioa_cfg);
3957 else if (buf[0] == '0')
3958 rc = ipr_free_dump(ioa_cfg);
3959 else
3960 return -EINVAL;
3961
3962 if (rc)
3963 return rc;
3964 else
3965 return count;
3966}
3967
3968static struct bin_attribute ipr_dump_attr = {
3969 .attr = {
3970 .name = "dump",
3971 .mode = S_IRUSR | S_IWUSR,
3972 },
3973 .size = 0,
3974 .read = ipr_read_dump,
3975 .write = ipr_write_dump
3976};
3977#else
3978static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3979#endif
3980
3981/**
3982 * ipr_change_queue_depth - Change the device's queue depth
3983 * @sdev: scsi device struct
3984 * @qdepth: depth to set
e881a172 3985 * @reason: calling context
1da177e4
LT
3986 *
3987 * Return value:
3988 * actual depth set
3989 **/
e881a172
MC
3990static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3991 int reason)
1da177e4 3992{
35a39691
BK
3993 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3994 struct ipr_resource_entry *res;
3995 unsigned long lock_flags = 0;
3996
e881a172
MC
3997 if (reason != SCSI_QDEPTH_DEFAULT)
3998 return -EOPNOTSUPP;
3999
35a39691
BK
4000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001 res = (struct ipr_resource_entry *)sdev->hostdata;
4002
4003 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4004 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4006
1da177e4
LT
4007 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4008 return sdev->queue_depth;
4009}
4010
4011/**
4012 * ipr_change_queue_type - Change the device's queue type
4013 * @dsev: scsi device struct
4014 * @tag_type: type of tags to use
4015 *
4016 * Return value:
4017 * actual queue type set
4018 **/
4019static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4020{
4021 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4022 struct ipr_resource_entry *res;
4023 unsigned long lock_flags = 0;
4024
4025 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4026 res = (struct ipr_resource_entry *)sdev->hostdata;
4027
4028 if (res) {
4029 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4030 /*
4031 * We don't bother quiescing the device here since the
4032 * adapter firmware does it for us.
4033 */
4034 scsi_set_tag_type(sdev, tag_type);
4035
4036 if (tag_type)
4037 scsi_activate_tcq(sdev, sdev->queue_depth);
4038 else
4039 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4040 } else
4041 tag_type = 0;
4042 } else
4043 tag_type = 0;
4044
4045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046 return tag_type;
4047}
4048
4049/**
4050 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4051 * @dev: device struct
4052 * @buf: buffer
4053 *
4054 * Return value:
4055 * number of bytes printed to buffer
4056 **/
10523b3b 4057static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4058{
4059 struct scsi_device *sdev = to_scsi_device(dev);
4060 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4061 struct ipr_resource_entry *res;
4062 unsigned long lock_flags = 0;
4063 ssize_t len = -ENXIO;
4064
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066 res = (struct ipr_resource_entry *)sdev->hostdata;
4067 if (res)
3e7ebdfa 4068 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070 return len;
4071}
4072
4073static struct device_attribute ipr_adapter_handle_attr = {
4074 .attr = {
4075 .name = "adapter_handle",
4076 .mode = S_IRUSR,
4077 },
4078 .show = ipr_show_adapter_handle
4079};
4080
3e7ebdfa
WB
4081/**
4082 * ipr_show_resource_path - Show the resource path for this device.
4083 * @dev: device struct
4084 * @buf: buffer
4085 *
4086 * Return value:
4087 * number of bytes printed to buffer
4088 **/
4089static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4090{
4091 struct scsi_device *sdev = to_scsi_device(dev);
4092 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4093 struct ipr_resource_entry *res;
4094 unsigned long lock_flags = 0;
4095 ssize_t len = -ENXIO;
4096 char buffer[IPR_MAX_RES_PATH_LENGTH];
4097
4098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4099 res = (struct ipr_resource_entry *)sdev->hostdata;
4100 if (res)
4101 len = snprintf(buf, PAGE_SIZE, "%s\n",
4102 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4104 return len;
4105}
4106
4107static struct device_attribute ipr_resource_path_attr = {
4108 .attr = {
4109 .name = "resource_path",
4110 .mode = S_IRUSR,
4111 },
4112 .show = ipr_show_resource_path
4113};
4114
1da177e4
LT
4115static struct device_attribute *ipr_dev_attrs[] = {
4116 &ipr_adapter_handle_attr,
3e7ebdfa 4117 &ipr_resource_path_attr,
1da177e4
LT
4118 NULL,
4119};
4120
4121/**
4122 * ipr_biosparam - Return the HSC mapping
4123 * @sdev: scsi device struct
4124 * @block_device: block device pointer
4125 * @capacity: capacity of the device
4126 * @parm: Array containing returned HSC values.
4127 *
4128 * This function generates the HSC parms that fdisk uses.
4129 * We want to make sure we return something that places partitions
4130 * on 4k boundaries for best performance with the IOA.
4131 *
4132 * Return value:
4133 * 0 on success
4134 **/
4135static int ipr_biosparam(struct scsi_device *sdev,
4136 struct block_device *block_device,
4137 sector_t capacity, int *parm)
4138{
4139 int heads, sectors;
4140 sector_t cylinders;
4141
4142 heads = 128;
4143 sectors = 32;
4144
4145 cylinders = capacity;
4146 sector_div(cylinders, (128 * 32));
4147
4148 /* return result */
4149 parm[0] = heads;
4150 parm[1] = sectors;
4151 parm[2] = cylinders;
4152
4153 return 0;
4154}
4155
35a39691
BK
4156/**
4157 * ipr_find_starget - Find target based on bus/target.
4158 * @starget: scsi target struct
4159 *
4160 * Return value:
4161 * resource entry pointer if found / NULL if not found
4162 **/
4163static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4164{
4165 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4166 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4167 struct ipr_resource_entry *res;
4168
4169 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4170 if ((res->bus == starget->channel) &&
4171 (res->target == starget->id) &&
4172 (res->lun == 0)) {
35a39691
BK
4173 return res;
4174 }
4175 }
4176
4177 return NULL;
4178}
4179
4180static struct ata_port_info sata_port_info;
4181
4182/**
4183 * ipr_target_alloc - Prepare for commands to a SCSI target
4184 * @starget: scsi target struct
4185 *
4186 * If the device is a SATA device, this function allocates an
4187 * ATA port with libata, else it does nothing.
4188 *
4189 * Return value:
4190 * 0 on success / non-0 on failure
4191 **/
4192static int ipr_target_alloc(struct scsi_target *starget)
4193{
4194 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4195 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4196 struct ipr_sata_port *sata_port;
4197 struct ata_port *ap;
4198 struct ipr_resource_entry *res;
4199 unsigned long lock_flags;
4200
4201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4202 res = ipr_find_starget(starget);
4203 starget->hostdata = NULL;
4204
4205 if (res && ipr_is_gata(res)) {
4206 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4207 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4208 if (!sata_port)
4209 return -ENOMEM;
4210
4211 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4212 if (ap) {
4213 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4214 sata_port->ioa_cfg = ioa_cfg;
4215 sata_port->ap = ap;
4216 sata_port->res = res;
4217
4218 res->sata_port = sata_port;
4219 ap->private_data = sata_port;
4220 starget->hostdata = sata_port;
4221 } else {
4222 kfree(sata_port);
4223 return -ENOMEM;
4224 }
4225 }
4226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4227
4228 return 0;
4229}
4230
4231/**
4232 * ipr_target_destroy - Destroy a SCSI target
4233 * @starget: scsi target struct
4234 *
4235 * If the device was a SATA device, this function frees the libata
4236 * ATA port, else it does nothing.
4237 *
4238 **/
4239static void ipr_target_destroy(struct scsi_target *starget)
4240{
4241 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4242 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4243 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4244
4245 if (ioa_cfg->sis64) {
4246 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4247 clear_bit(starget->id, ioa_cfg->array_ids);
4248 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4249 clear_bit(starget->id, ioa_cfg->vset_ids);
4250 else if (starget->channel == 0)
4251 clear_bit(starget->id, ioa_cfg->target_ids);
4252 }
35a39691
BK
4253
4254 if (sata_port) {
4255 starget->hostdata = NULL;
4256 ata_sas_port_destroy(sata_port->ap);
4257 kfree(sata_port);
4258 }
4259}
4260
4261/**
4262 * ipr_find_sdev - Find device based on bus/target/lun.
4263 * @sdev: scsi device struct
4264 *
4265 * Return value:
4266 * resource entry pointer if found / NULL if not found
4267 **/
4268static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4269{
4270 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4271 struct ipr_resource_entry *res;
4272
4273 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4274 if ((res->bus == sdev->channel) &&
4275 (res->target == sdev->id) &&
4276 (res->lun == sdev->lun))
35a39691
BK
4277 return res;
4278 }
4279
4280 return NULL;
4281}
4282
1da177e4
LT
4283/**
4284 * ipr_slave_destroy - Unconfigure a SCSI device
4285 * @sdev: scsi device struct
4286 *
4287 * Return value:
4288 * nothing
4289 **/
4290static void ipr_slave_destroy(struct scsi_device *sdev)
4291{
4292 struct ipr_resource_entry *res;
4293 struct ipr_ioa_cfg *ioa_cfg;
4294 unsigned long lock_flags = 0;
4295
4296 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4297
4298 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4299 res = (struct ipr_resource_entry *) sdev->hostdata;
4300 if (res) {
35a39691 4301 if (res->sata_port)
3e4ec344 4302 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4303 sdev->hostdata = NULL;
4304 res->sdev = NULL;
35a39691 4305 res->sata_port = NULL;
1da177e4
LT
4306 }
4307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308}
4309
4310/**
4311 * ipr_slave_configure - Configure a SCSI device
4312 * @sdev: scsi device struct
4313 *
4314 * This function configures the specified scsi device.
4315 *
4316 * Return value:
4317 * 0 on success
4318 **/
4319static int ipr_slave_configure(struct scsi_device *sdev)
4320{
4321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4322 struct ipr_resource_entry *res;
dd406ef8 4323 struct ata_port *ap = NULL;
1da177e4 4324 unsigned long lock_flags = 0;
3e7ebdfa 4325 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4326
4327 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4328 res = sdev->hostdata;
4329 if (res) {
4330 if (ipr_is_af_dasd_device(res))
4331 sdev->type = TYPE_RAID;
0726ce26 4332 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4333 sdev->scsi_level = 4;
0726ce26
BK
4334 sdev->no_uld_attach = 1;
4335 }
1da177e4 4336 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4337 blk_queue_rq_timeout(sdev->request_queue,
4338 IPR_VSET_RW_TIMEOUT);
086fa5ff 4339 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4340 }
e4fbf44e 4341 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 4342 sdev->allow_restart = 1;
dd406ef8
BK
4343 if (ipr_is_gata(res) && res->sata_port)
4344 ap = res->sata_port->ap;
4345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346
4347 if (ap) {
35a39691 4348 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4349 ata_sas_slave_configure(sdev, ap);
4350 } else
35a39691 4351 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4352 if (ioa_cfg->sis64)
4353 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4354 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
dd406ef8 4355 return 0;
1da177e4
LT
4356 }
4357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4358 return 0;
4359}
4360
35a39691
BK
4361/**
4362 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4363 * @sdev: scsi device struct
4364 *
4365 * This function initializes an ATA port so that future commands
4366 * sent through queuecommand will work.
4367 *
4368 * Return value:
4369 * 0 on success
4370 **/
4371static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4372{
4373 struct ipr_sata_port *sata_port = NULL;
4374 int rc = -ENXIO;
4375
4376 ENTER;
4377 if (sdev->sdev_target)
4378 sata_port = sdev->sdev_target->hostdata;
4379 if (sata_port)
4380 rc = ata_sas_port_init(sata_port->ap);
4381 if (rc)
4382 ipr_slave_destroy(sdev);
4383
4384 LEAVE;
4385 return rc;
4386}
4387
1da177e4
LT
4388/**
4389 * ipr_slave_alloc - Prepare for commands to a device.
4390 * @sdev: scsi device struct
4391 *
4392 * This function saves a pointer to the resource entry
4393 * in the scsi device struct if the device exists. We
4394 * can then use this pointer in ipr_queuecommand when
4395 * handling new commands.
4396 *
4397 * Return value:
692aebfc 4398 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4399 **/
4400static int ipr_slave_alloc(struct scsi_device *sdev)
4401{
4402 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4403 struct ipr_resource_entry *res;
4404 unsigned long lock_flags;
692aebfc 4405 int rc = -ENXIO;
1da177e4
LT
4406
4407 sdev->hostdata = NULL;
4408
4409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4410
35a39691
BK
4411 res = ipr_find_sdev(sdev);
4412 if (res) {
4413 res->sdev = sdev;
4414 res->add_to_ml = 0;
4415 res->in_erp = 0;
4416 sdev->hostdata = res;
4417 if (!ipr_is_naca_model(res))
4418 res->needs_sync_complete = 1;
4419 rc = 0;
4420 if (ipr_is_gata(res)) {
4421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4423 }
4424 }
4425
4426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4427
692aebfc 4428 return rc;
1da177e4
LT
4429}
4430
4431/**
4432 * ipr_eh_host_reset - Reset the host adapter
4433 * @scsi_cmd: scsi command struct
4434 *
4435 * Return value:
4436 * SUCCESS / FAILED
4437 **/
df0ae249 4438static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4439{
4440 struct ipr_ioa_cfg *ioa_cfg;
4441 int rc;
4442
4443 ENTER;
4444 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4445
4446 dev_err(&ioa_cfg->pdev->dev,
4447 "Adapter being reset as a result of error recovery.\n");
4448
4449 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4450 ioa_cfg->sdt_state = GET_DUMP;
4451
4452 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4453
4454 LEAVE;
4455 return rc;
4456}
4457
df0ae249
JG
4458static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4459{
4460 int rc;
4461
4462 spin_lock_irq(cmd->device->host->host_lock);
4463 rc = __ipr_eh_host_reset(cmd);
4464 spin_unlock_irq(cmd->device->host->host_lock);
4465
4466 return rc;
4467}
4468
c6513096
BK
4469/**
4470 * ipr_device_reset - Reset the device
4471 * @ioa_cfg: ioa config struct
4472 * @res: resource entry struct
4473 *
4474 * This function issues a device reset to the affected device.
4475 * If the device is a SCSI device, a LUN reset will be sent
4476 * to the device first. If that does not work, a target reset
35a39691
BK
4477 * will be sent. If the device is a SATA device, a PHY reset will
4478 * be sent.
c6513096
BK
4479 *
4480 * Return value:
4481 * 0 on success / non-zero on failure
4482 **/
4483static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4484 struct ipr_resource_entry *res)
4485{
4486 struct ipr_cmnd *ipr_cmd;
4487 struct ipr_ioarcb *ioarcb;
4488 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4489 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4490 u32 ioasc;
4491
4492 ENTER;
4493 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4494 ioarcb = &ipr_cmd->ioarcb;
4495 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4496
4497 if (ipr_cmd->ioa_cfg->sis64) {
4498 regs = &ipr_cmd->i.ata_ioadl.regs;
4499 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4500 } else
4501 regs = &ioarcb->u.add_data.u.regs;
c6513096 4502
3e7ebdfa 4503 ioarcb->res_handle = res->res_handle;
c6513096
BK
4504 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4505 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4506 if (ipr_is_gata(res)) {
4507 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4508 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4509 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4510 }
c6513096
BK
4511
4512 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4513 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
c6513096 4514 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
96d21f00
WB
4515 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4516 if (ipr_cmd->ioa_cfg->sis64)
4517 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4518 sizeof(struct ipr_ioasa_gata));
4519 else
4520 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4521 sizeof(struct ipr_ioasa_gata));
4522 }
c6513096
BK
4523
4524 LEAVE;
4525 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4526}
4527
35a39691
BK
4528/**
4529 * ipr_sata_reset - Reset the SATA port
cc0680a5 4530 * @link: SATA link to reset
35a39691
BK
4531 * @classes: class of the attached device
4532 *
cc0680a5 4533 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4534 *
4535 * Return value:
4536 * 0 on success / non-zero on failure
4537 **/
cc0680a5 4538static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4539 unsigned long deadline)
35a39691 4540{
cc0680a5 4541 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4542 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4543 struct ipr_resource_entry *res;
4544 unsigned long lock_flags = 0;
4545 int rc = -ENXIO;
4546
4547 ENTER;
4548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4549 while(ioa_cfg->in_reset_reload) {
4550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4551 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4552 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4553 }
4554
35a39691
BK
4555 res = sata_port->res;
4556 if (res) {
4557 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4558 *classes = res->ata_class;
35a39691
BK
4559 }
4560
4561 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4562 LEAVE;
4563 return rc;
4564}
4565
1da177e4
LT
4566/**
4567 * ipr_eh_dev_reset - Reset the device
4568 * @scsi_cmd: scsi command struct
4569 *
4570 * This function issues a device reset to the affected device.
4571 * A LUN reset will be sent to the device first. If that does
4572 * not work, a target reset will be sent.
4573 *
4574 * Return value:
4575 * SUCCESS / FAILED
4576 **/
94d0e7b8 4577static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4578{
4579 struct ipr_cmnd *ipr_cmd;
4580 struct ipr_ioa_cfg *ioa_cfg;
4581 struct ipr_resource_entry *res;
35a39691
BK
4582 struct ata_port *ap;
4583 int rc = 0;
1da177e4
LT
4584
4585 ENTER;
4586 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4587 res = scsi_cmd->device->hostdata;
4588
eeb88307 4589 if (!res)
1da177e4
LT
4590 return FAILED;
4591
4592 /*
4593 * If we are currently going through reset/reload, return failed. This will force the
4594 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4595 * reset to complete
4596 */
4597 if (ioa_cfg->in_reset_reload)
4598 return FAILED;
4599 if (ioa_cfg->ioa_is_dead)
4600 return FAILED;
4601
4602 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4603 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4604 if (ipr_cmd->scsi_cmd)
4605 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4606 if (ipr_cmd->qc)
4607 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4608 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4609 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4610 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4611 }
1da177e4
LT
4612 }
4613 }
4614
4615 res->resetting_device = 1;
fb3ed3cb 4616 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4617
4618 if (ipr_is_gata(res) && res->sata_port) {
4619 ap = res->sata_port->ap;
4620 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4621 ata_std_error_handler(ap);
35a39691 4622 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4623
4624 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4625 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4626 rc = -EIO;
4627 break;
4628 }
4629 }
35a39691
BK
4630 } else
4631 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4632 res->resetting_device = 0;
4633
1da177e4 4634 LEAVE;
c6513096 4635 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4636}
4637
94d0e7b8
JG
4638static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4639{
4640 int rc;
4641
4642 spin_lock_irq(cmd->device->host->host_lock);
4643 rc = __ipr_eh_dev_reset(cmd);
4644 spin_unlock_irq(cmd->device->host->host_lock);
4645
4646 return rc;
4647}
4648
1da177e4
LT
4649/**
4650 * ipr_bus_reset_done - Op done function for bus reset.
4651 * @ipr_cmd: ipr command struct
4652 *
4653 * This function is the op done function for a bus reset
4654 *
4655 * Return value:
4656 * none
4657 **/
4658static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4659{
4660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4661 struct ipr_resource_entry *res;
4662
4663 ENTER;
3e7ebdfa
WB
4664 if (!ioa_cfg->sis64)
4665 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4666 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4667 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4668 break;
4669 }
1da177e4 4670 }
1da177e4
LT
4671
4672 /*
4673 * If abort has not completed, indicate the reset has, else call the
4674 * abort's done function to wake the sleeping eh thread
4675 */
4676 if (ipr_cmd->sibling->sibling)
4677 ipr_cmd->sibling->sibling = NULL;
4678 else
4679 ipr_cmd->sibling->done(ipr_cmd->sibling);
4680
4681 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4682 LEAVE;
4683}
4684
4685/**
4686 * ipr_abort_timeout - An abort task has timed out
4687 * @ipr_cmd: ipr command struct
4688 *
4689 * This function handles when an abort task times out. If this
4690 * happens we issue a bus reset since we have resources tied
4691 * up that must be freed before returning to the midlayer.
4692 *
4693 * Return value:
4694 * none
4695 **/
4696static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4697{
4698 struct ipr_cmnd *reset_cmd;
4699 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4700 struct ipr_cmd_pkt *cmd_pkt;
4701 unsigned long lock_flags = 0;
4702
4703 ENTER;
4704 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4705 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4707 return;
4708 }
4709
fb3ed3cb 4710 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4711 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4712 ipr_cmd->sibling = reset_cmd;
4713 reset_cmd->sibling = ipr_cmd;
4714 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4715 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4716 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4717 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4718 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4719
4720 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 LEAVE;
4723}
4724
4725/**
4726 * ipr_cancel_op - Cancel specified op
4727 * @scsi_cmd: scsi command struct
4728 *
4729 * This function cancels specified op.
4730 *
4731 * Return value:
4732 * SUCCESS / FAILED
4733 **/
4734static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4735{
4736 struct ipr_cmnd *ipr_cmd;
4737 struct ipr_ioa_cfg *ioa_cfg;
4738 struct ipr_resource_entry *res;
4739 struct ipr_cmd_pkt *cmd_pkt;
4740 u32 ioasc;
4741 int op_found = 0;
4742
4743 ENTER;
4744 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4745 res = scsi_cmd->device->hostdata;
4746
8fa728a2
JG
4747 /* If we are currently going through reset/reload, return failed.
4748 * This will force the mid-layer to call ipr_eh_host_reset,
4749 * which will then go to sleep and wait for the reset to complete
4750 */
4751 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4752 return FAILED;
04d9768f 4753 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4754 return FAILED;
4755
4756 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4757 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4758 ipr_cmd->done = ipr_scsi_eh_done;
4759 op_found = 1;
4760 break;
4761 }
4762 }
4763
4764 if (!op_found)
4765 return SUCCESS;
4766
4767 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4768 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4769 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4770 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4771 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4772 ipr_cmd->u.sdev = scsi_cmd->device;
4773
fb3ed3cb
BK
4774 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4775 scsi_cmd->cmnd[0]);
1da177e4 4776 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 4777 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4778
4779 /*
4780 * If the abort task timed out and we sent a bus reset, we will get
4781 * one the following responses to the abort
4782 */
4783 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4784 ioasc = 0;
4785 ipr_trace;
4786 }
4787
4788 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4789 if (!ipr_is_naca_model(res))
4790 res->needs_sync_complete = 1;
1da177e4
LT
4791
4792 LEAVE;
4793 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4794}
4795
4796/**
4797 * ipr_eh_abort - Abort a single op
4798 * @scsi_cmd: scsi command struct
4799 *
4800 * Return value:
4801 * SUCCESS / FAILED
4802 **/
4803static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4804{
8fa728a2
JG
4805 unsigned long flags;
4806 int rc;
1da177e4
LT
4807
4808 ENTER;
1da177e4 4809
8fa728a2
JG
4810 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4811 rc = ipr_cancel_op(scsi_cmd);
4812 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4813
4814 LEAVE;
8fa728a2 4815 return rc;
1da177e4
LT
4816}
4817
4818/**
4819 * ipr_handle_other_interrupt - Handle "other" interrupts
4820 * @ioa_cfg: ioa config struct
1da177e4
LT
4821 *
4822 * Return value:
4823 * IRQ_NONE / IRQ_HANDLED
4824 **/
64ffdb76 4825static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
4826{
4827 irqreturn_t rc = IRQ_HANDLED;
64ffdb76
WB
4828 volatile u32 int_reg, int_mask_reg;
4829
4830 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4832
4833 /* If an interrupt on the adapter did not occur, ignore it.
4834 * Or in the case of SIS 64, check for a stage change interrupt.
4835 */
4836 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4837 if (ioa_cfg->sis64) {
4838 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4839 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4840 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4841
4842 /* clear stage change */
4843 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4844 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4845 list_del(&ioa_cfg->reset_cmd->queue);
4846 del_timer(&ioa_cfg->reset_cmd->timer);
4847 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4848 return IRQ_HANDLED;
4849 }
4850 }
4851
4852 return IRQ_NONE;
4853 }
1da177e4
LT
4854
4855 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4856 /* Mask the interrupt */
4857 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4858
4859 /* Clear the interrupt */
4860 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4861 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4862
4863 list_del(&ioa_cfg->reset_cmd->queue);
4864 del_timer(&ioa_cfg->reset_cmd->timer);
4865 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4866 } else {
4867 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4868 ioa_cfg->ioa_unit_checked = 1;
4869 else
4870 dev_err(&ioa_cfg->pdev->dev,
4871 "Permanent IOA failure. 0x%08X\n", int_reg);
4872
4873 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4874 ioa_cfg->sdt_state = GET_DUMP;
4875
4876 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4877 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4878 }
4879
4880 return rc;
4881}
4882
3feeb89d
WB
4883/**
4884 * ipr_isr_eh - Interrupt service routine error handler
4885 * @ioa_cfg: ioa config struct
4886 * @msg: message to log
4887 *
4888 * Return value:
4889 * none
4890 **/
4891static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4892{
4893 ioa_cfg->errors_logged++;
4894 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4895
4896 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4897 ioa_cfg->sdt_state = GET_DUMP;
4898
4899 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4900}
4901
1da177e4
LT
4902/**
4903 * ipr_isr - Interrupt service routine
4904 * @irq: irq number
4905 * @devp: pointer to ioa config struct
1da177e4
LT
4906 *
4907 * Return value:
4908 * IRQ_NONE / IRQ_HANDLED
4909 **/
7d12e780 4910static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4911{
4912 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4913 unsigned long lock_flags = 0;
64ffdb76 4914 volatile u32 int_reg;
1da177e4
LT
4915 u32 ioasc;
4916 u16 cmd_index;
3feeb89d 4917 int num_hrrq = 0;
1da177e4
LT
4918 struct ipr_cmnd *ipr_cmd;
4919 irqreturn_t rc = IRQ_NONE;
4920
4921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4922
4923 /* If interrupts are disabled, ignore the interrupt */
4924 if (!ioa_cfg->allow_interrupts) {
4925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4926 return IRQ_NONE;
4927 }
4928
1da177e4
LT
4929 while (1) {
4930 ipr_cmd = NULL;
4931
4932 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4933 ioa_cfg->toggle_bit) {
4934
4935 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4936 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4937
4938 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 4939 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return IRQ_HANDLED;
4942 }
4943
4944 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4945
96d21f00 4946 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4947
4948 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4949
4950 list_del(&ipr_cmd->queue);
4951 del_timer(&ipr_cmd->timer);
4952 ipr_cmd->done(ipr_cmd);
4953
4954 rc = IRQ_HANDLED;
4955
4956 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4957 ioa_cfg->hrrq_curr++;
4958 } else {
4959 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4960 ioa_cfg->toggle_bit ^= 1u;
4961 }
4962 }
4963
4964 if (ipr_cmd != NULL) {
4965 /* Clear the PCI interrupt */
3feeb89d 4966 do {
214777ba 4967 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
64ffdb76 4968 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
4969 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4970 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4971
4972 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4973 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4975 return IRQ_HANDLED;
4976 }
4977
1da177e4
LT
4978 } else
4979 break;
4980 }
4981
4982 if (unlikely(rc == IRQ_NONE))
64ffdb76 4983 rc = ipr_handle_other_interrupt(ioa_cfg);
1da177e4
LT
4984
4985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4986 return rc;
4987}
4988
a32c055f
WB
4989/**
4990 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4991 * @ioa_cfg: ioa config struct
4992 * @ipr_cmd: ipr command struct
4993 *
4994 * Return value:
4995 * 0 on success / -1 on failure
4996 **/
4997static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4998 struct ipr_cmnd *ipr_cmd)
4999{
5000 int i, nseg;
5001 struct scatterlist *sg;
5002 u32 length;
5003 u32 ioadl_flags = 0;
5004 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5005 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5006 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5007
5008 length = scsi_bufflen(scsi_cmd);
5009 if (!length)
5010 return 0;
5011
5012 nseg = scsi_dma_map(scsi_cmd);
5013 if (nseg < 0) {
5014 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5015 return -1;
5016 }
5017
5018 ipr_cmd->dma_use_sg = nseg;
5019
438b0331 5020 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5021 ioarcb->ioadl_len =
5022 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5023
a32c055f
WB
5024 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5025 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5026 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5027 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5028 ioadl_flags = IPR_IOADL_FLAGS_READ;
5029
5030 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5031 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5032 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5033 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5034 }
5035
5036 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5037 return 0;
5038}
5039
1da177e4
LT
5040/**
5041 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5042 * @ioa_cfg: ioa config struct
5043 * @ipr_cmd: ipr command struct
5044 *
5045 * Return value:
5046 * 0 on success / -1 on failure
5047 **/
5048static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5049 struct ipr_cmnd *ipr_cmd)
5050{
63015bc9
FT
5051 int i, nseg;
5052 struct scatterlist *sg;
1da177e4
LT
5053 u32 length;
5054 u32 ioadl_flags = 0;
5055 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5056 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5057 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5058
63015bc9
FT
5059 length = scsi_bufflen(scsi_cmd);
5060 if (!length)
1da177e4
LT
5061 return 0;
5062
63015bc9
FT
5063 nseg = scsi_dma_map(scsi_cmd);
5064 if (nseg < 0) {
5065 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5066 return -1;
5067 }
51b1c7e1 5068
63015bc9
FT
5069 ipr_cmd->dma_use_sg = nseg;
5070
5071 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5072 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5073 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5074 ioarcb->data_transfer_length = cpu_to_be32(length);
5075 ioarcb->ioadl_len =
63015bc9
FT
5076 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5077 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5078 ioadl_flags = IPR_IOADL_FLAGS_READ;
5079 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5080 ioarcb->read_ioadl_len =
5081 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5082 }
1da177e4 5083
a32c055f
WB
5084 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5085 ioadl = ioarcb->u.add_data.u.ioadl;
5086 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5087 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5088 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5089 }
1da177e4 5090
63015bc9
FT
5091 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5092 ioadl[i].flags_and_data_len =
5093 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5094 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5095 }
5096
63015bc9
FT
5097 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5098 return 0;
1da177e4
LT
5099}
5100
5101/**
5102 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5103 * @scsi_cmd: scsi command struct
5104 *
5105 * Return value:
5106 * task attributes
5107 **/
5108static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5109{
5110 u8 tag[2];
5111 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5112
5113 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5114 switch (tag[0]) {
5115 case MSG_SIMPLE_TAG:
5116 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5117 break;
5118 case MSG_HEAD_TAG:
5119 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5120 break;
5121 case MSG_ORDERED_TAG:
5122 rc = IPR_FLAGS_LO_ORDERED_TASK;
5123 break;
5124 };
5125 }
5126
5127 return rc;
5128}
5129
5130/**
5131 * ipr_erp_done - Process completion of ERP for a device
5132 * @ipr_cmd: ipr command struct
5133 *
5134 * This function copies the sense buffer into the scsi_cmd
5135 * struct and pushes the scsi_done function.
5136 *
5137 * Return value:
5138 * nothing
5139 **/
5140static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5141{
5142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5143 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 5145 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5146
5147 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5148 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5149 scmd_printk(KERN_ERR, scsi_cmd,
5150 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5151 } else {
5152 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5153 SCSI_SENSE_BUFFERSIZE);
5154 }
5155
5156 if (res) {
ee0a90fa
BK
5157 if (!ipr_is_naca_model(res))
5158 res->needs_sync_complete = 1;
1da177e4
LT
5159 res->in_erp = 0;
5160 }
63015bc9 5161 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5162 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5163 scsi_cmd->scsi_done(scsi_cmd);
5164}
5165
5166/**
5167 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5168 * @ipr_cmd: ipr command struct
5169 *
5170 * Return value:
5171 * none
5172 **/
5173static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5174{
51b1c7e1 5175 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5176 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5177 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5178
5179 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5180 ioarcb->data_transfer_length = 0;
1da177e4 5181 ioarcb->read_data_transfer_length = 0;
a32c055f 5182 ioarcb->ioadl_len = 0;
1da177e4 5183 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5184 ioasa->hdr.ioasc = 0;
5185 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5186
5187 if (ipr_cmd->ioa_cfg->sis64)
5188 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5189 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5190 else {
5191 ioarcb->write_ioadl_addr =
5192 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5193 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5194 }
1da177e4
LT
5195}
5196
5197/**
5198 * ipr_erp_request_sense - Send request sense to a device
5199 * @ipr_cmd: ipr command struct
5200 *
5201 * This function sends a request sense to a device as a result
5202 * of a check condition.
5203 *
5204 * Return value:
5205 * nothing
5206 **/
5207static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5208{
5209 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5210 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5211
5212 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5213 ipr_erp_done(ipr_cmd);
5214 return;
5215 }
5216
5217 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5218
5219 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5220 cmd_pkt->cdb[0] = REQUEST_SENSE;
5221 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5222 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5223 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5224 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5225
a32c055f
WB
5226 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5227 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5228
5229 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5230 IPR_REQUEST_SENSE_TIMEOUT * 2);
5231}
5232
5233/**
5234 * ipr_erp_cancel_all - Send cancel all to a device
5235 * @ipr_cmd: ipr command struct
5236 *
5237 * This function sends a cancel all to a device to clear the
5238 * queue. If we are running TCQ on the device, QERR is set to 1,
5239 * which means all outstanding ops have been dropped on the floor.
5240 * Cancel all will return them to us.
5241 *
5242 * Return value:
5243 * nothing
5244 **/
5245static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5246{
5247 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5248 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5249 struct ipr_cmd_pkt *cmd_pkt;
5250
5251 res->in_erp = 1;
5252
5253 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5254
5255 if (!scsi_get_tag_type(scsi_cmd->device)) {
5256 ipr_erp_request_sense(ipr_cmd);
5257 return;
5258 }
5259
5260 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5261 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5262 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5263
5264 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5265 IPR_CANCEL_ALL_TIMEOUT);
5266}
5267
5268/**
5269 * ipr_dump_ioasa - Dump contents of IOASA
5270 * @ioa_cfg: ioa config struct
5271 * @ipr_cmd: ipr command struct
fe964d0a 5272 * @res: resource entry struct
1da177e4
LT
5273 *
5274 * This function is invoked by the interrupt handler when ops
5275 * fail. It will log the IOASA if appropriate. Only called
5276 * for GPDD ops.
5277 *
5278 * Return value:
5279 * none
5280 **/
5281static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5282 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5283{
5284 int i;
5285 u16 data_len;
b0692dd4 5286 u32 ioasc, fd_ioasc;
96d21f00 5287 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5288 __be32 *ioasa_data = (__be32 *)ioasa;
5289 int error_index;
5290
96d21f00
WB
5291 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5292 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5293
5294 if (0 == ioasc)
5295 return;
5296
5297 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5298 return;
5299
b0692dd4
BK
5300 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5301 error_index = ipr_get_error(fd_ioasc);
5302 else
5303 error_index = ipr_get_error(ioasc);
1da177e4
LT
5304
5305 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5306 /* Don't log an error if the IOA already logged one */
96d21f00 5307 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5308 return;
5309
cc9bd5d4
BK
5310 if (!ipr_is_gscsi(res))
5311 return;
5312
1da177e4
LT
5313 if (ipr_error_table[error_index].log_ioasa == 0)
5314 return;
5315 }
5316
fe964d0a 5317 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5318
96d21f00
WB
5319 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5320 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5321 data_len = sizeof(struct ipr_ioasa64);
5322 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5323 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5324
5325 ipr_err("IOASA Dump:\n");
5326
5327 for (i = 0; i < data_len / 4; i += 4) {
5328 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5329 be32_to_cpu(ioasa_data[i]),
5330 be32_to_cpu(ioasa_data[i+1]),
5331 be32_to_cpu(ioasa_data[i+2]),
5332 be32_to_cpu(ioasa_data[i+3]));
5333 }
5334}
5335
5336/**
5337 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5338 * @ioasa: IOASA
5339 * @sense_buf: sense data buffer
5340 *
5341 * Return value:
5342 * none
5343 **/
5344static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5345{
5346 u32 failing_lba;
5347 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5348 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5349 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5350 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5351
5352 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5353
5354 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5355 return;
5356
5357 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5358
5359 if (ipr_is_vset_device(res) &&
5360 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5361 ioasa->u.vset.failing_lba_hi != 0) {
5362 sense_buf[0] = 0x72;
5363 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5364 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5365 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5366
5367 sense_buf[7] = 12;
5368 sense_buf[8] = 0;
5369 sense_buf[9] = 0x0A;
5370 sense_buf[10] = 0x80;
5371
5372 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5373
5374 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5375 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5376 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5377 sense_buf[15] = failing_lba & 0x000000ff;
5378
5379 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5380
5381 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5382 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5383 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5384 sense_buf[19] = failing_lba & 0x000000ff;
5385 } else {
5386 sense_buf[0] = 0x70;
5387 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5388 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5389 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5390
5391 /* Illegal request */
5392 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5393 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5394 sense_buf[7] = 10; /* additional length */
5395
5396 /* IOARCB was in error */
5397 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5398 sense_buf[15] = 0xC0;
5399 else /* Parameter data was invalid */
5400 sense_buf[15] = 0x80;
5401
5402 sense_buf[16] =
5403 ((IPR_FIELD_POINTER_MASK &
96d21f00 5404 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5405 sense_buf[17] =
5406 (IPR_FIELD_POINTER_MASK &
96d21f00 5407 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5408 } else {
5409 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5410 if (ipr_is_vset_device(res))
5411 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5412 else
5413 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5414
5415 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5416 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5417 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5418 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5419 sense_buf[6] = failing_lba & 0x000000ff;
5420 }
5421
5422 sense_buf[7] = 6; /* additional length */
5423 }
5424 }
5425}
5426
ee0a90fa
BK
5427/**
5428 * ipr_get_autosense - Copy autosense data to sense buffer
5429 * @ipr_cmd: ipr command struct
5430 *
5431 * This function copies the autosense buffer to the buffer
5432 * in the scsi_cmd, if there is autosense available.
5433 *
5434 * Return value:
5435 * 1 if autosense was available / 0 if not
5436 **/
5437static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5438{
96d21f00
WB
5439 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5440 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5441
96d21f00 5442 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5443 return 0;
5444
96d21f00
WB
5445 if (ipr_cmd->ioa_cfg->sis64)
5446 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5447 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5448 SCSI_SENSE_BUFFERSIZE));
5449 else
5450 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5451 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5452 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5453 return 1;
5454}
5455
1da177e4
LT
5456/**
5457 * ipr_erp_start - Process an error response for a SCSI op
5458 * @ioa_cfg: ioa config struct
5459 * @ipr_cmd: ipr command struct
5460 *
5461 * This function determines whether or not to initiate ERP
5462 * on the affected device.
5463 *
5464 * Return value:
5465 * nothing
5466 **/
5467static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5468 struct ipr_cmnd *ipr_cmd)
5469{
5470 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5471 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5472 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5473 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5474
5475 if (!res) {
5476 ipr_scsi_eh_done(ipr_cmd);
5477 return;
5478 }
5479
8a048994 5480 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5481 ipr_gen_sense(ipr_cmd);
5482
cc9bd5d4
BK
5483 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5484
8a048994 5485 switch (masked_ioasc) {
1da177e4 5486 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5487 if (ipr_is_naca_model(res))
5488 scsi_cmd->result |= (DID_ABORT << 16);
5489 else
5490 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5491 break;
5492 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5493 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5494 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5495 break;
5496 case IPR_IOASC_HW_SEL_TIMEOUT:
5497 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5498 if (!ipr_is_naca_model(res))
5499 res->needs_sync_complete = 1;
1da177e4
LT
5500 break;
5501 case IPR_IOASC_SYNC_REQUIRED:
5502 if (!res->in_erp)
5503 res->needs_sync_complete = 1;
5504 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5505 break;
5506 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5507 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5508 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5509 break;
5510 case IPR_IOASC_BUS_WAS_RESET:
5511 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5512 /*
5513 * Report the bus reset and ask for a retry. The device
5514 * will give CC/UA the next command.
5515 */
5516 if (!res->resetting_device)
5517 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5518 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5519 if (!ipr_is_naca_model(res))
5520 res->needs_sync_complete = 1;
1da177e4
LT
5521 break;
5522 case IPR_IOASC_HW_DEV_BUS_STATUS:
5523 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5524 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5525 if (!ipr_get_autosense(ipr_cmd)) {
5526 if (!ipr_is_naca_model(res)) {
5527 ipr_erp_cancel_all(ipr_cmd);
5528 return;
5529 }
5530 }
1da177e4 5531 }
ee0a90fa
BK
5532 if (!ipr_is_naca_model(res))
5533 res->needs_sync_complete = 1;
1da177e4
LT
5534 break;
5535 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5536 break;
5537 default:
5b7304fb
BK
5538 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5539 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5540 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5541 res->needs_sync_complete = 1;
5542 break;
5543 }
5544
63015bc9 5545 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5546 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5547 scsi_cmd->scsi_done(scsi_cmd);
5548}
5549
5550/**
5551 * ipr_scsi_done - mid-layer done function
5552 * @ipr_cmd: ipr command struct
5553 *
5554 * This function is invoked by the interrupt handler for
5555 * ops generated by the SCSI mid-layer
5556 *
5557 * Return value:
5558 * none
5559 **/
5560static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5561{
5562 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5563 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5564 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 5565
96d21f00 5566 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5567
5568 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5569 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5570 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5571 scsi_cmd->scsi_done(scsi_cmd);
5572 } else
5573 ipr_erp_start(ioa_cfg, ipr_cmd);
5574}
5575
1da177e4
LT
5576/**
5577 * ipr_queuecommand - Queue a mid-layer request
5578 * @scsi_cmd: scsi command struct
5579 * @done: done function
5580 *
5581 * This function queues a request generated by the mid-layer.
5582 *
5583 * Return value:
5584 * 0 on success
5585 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5586 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5587 **/
5588static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5589 void (*done) (struct scsi_cmnd *))
5590{
5591 struct ipr_ioa_cfg *ioa_cfg;
5592 struct ipr_resource_entry *res;
5593 struct ipr_ioarcb *ioarcb;
5594 struct ipr_cmnd *ipr_cmd;
5595 int rc = 0;
5596
5597 scsi_cmd->scsi_done = done;
5598 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5599 res = scsi_cmd->device->hostdata;
5600 scsi_cmd->result = (DID_OK << 16);
5601
5602 /*
5603 * We are currently blocking all devices due to a host reset
5604 * We have told the host to stop giving us new requests, but
5605 * ERP ops don't count. FIXME
5606 */
5607 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5608 return SCSI_MLQUEUE_HOST_BUSY;
5609
5610 /*
5611 * FIXME - Create scsi_set_host_offline interface
5612 * and the ioa_is_dead check can be removed
5613 */
5614 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5615 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5616 scsi_cmd->result = (DID_NO_CONNECT << 16);
5617 scsi_cmd->scsi_done(scsi_cmd);
5618 return 0;
5619 }
5620
35a39691
BK
5621 if (ipr_is_gata(res) && res->sata_port)
5622 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5623
1da177e4
LT
5624 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5625 ioarcb = &ipr_cmd->ioarcb;
5626 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5627
5628 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5629 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5630 ioarcb->res_handle = res->res_handle;
1da177e4 5631 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5632 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5633
5634 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5635 if (scsi_cmd->underflow == 0)
5636 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5637
5638 if (res->needs_sync_complete) {
5639 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5640 res->needs_sync_complete = 0;
5641 }
5642
5643 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5644 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5645 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5646 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5647 }
5648
5649 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5650 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5651 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5652
a32c055f
WB
5653 if (likely(rc == 0)) {
5654 if (ioa_cfg->sis64)
5655 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5656 else
5657 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5658 }
1da177e4
LT
5659
5660 if (likely(rc == 0)) {
5661 mb();
a32c055f 5662 ipr_send_command(ipr_cmd);
1da177e4
LT
5663 } else {
5664 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5665 return SCSI_MLQUEUE_HOST_BUSY;
5666 }
5667
5668 return 0;
5669}
5670
35a39691
BK
5671/**
5672 * ipr_ioctl - IOCTL handler
5673 * @sdev: scsi device struct
5674 * @cmd: IOCTL cmd
5675 * @arg: IOCTL arg
5676 *
5677 * Return value:
5678 * 0 on success / other on failure
5679 **/
bd705f2d 5680static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5681{
5682 struct ipr_resource_entry *res;
5683
5684 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5685 if (res && ipr_is_gata(res)) {
5686 if (cmd == HDIO_GET_IDENTITY)
5687 return -ENOTTY;
94be9a58 5688 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5689 }
35a39691
BK
5690
5691 return -EINVAL;
5692}
5693
1da177e4
LT
5694/**
5695 * ipr_info - Get information about the card/driver
5696 * @scsi_host: scsi host struct
5697 *
5698 * Return value:
5699 * pointer to buffer with description string
5700 **/
5701static const char * ipr_ioa_info(struct Scsi_Host *host)
5702{
5703 static char buffer[512];
5704 struct ipr_ioa_cfg *ioa_cfg;
5705 unsigned long lock_flags = 0;
5706
5707 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5708
5709 spin_lock_irqsave(host->host_lock, lock_flags);
5710 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5711 spin_unlock_irqrestore(host->host_lock, lock_flags);
5712
5713 return buffer;
5714}
5715
5716static struct scsi_host_template driver_template = {
5717 .module = THIS_MODULE,
5718 .name = "IPR",
5719 .info = ipr_ioa_info,
35a39691 5720 .ioctl = ipr_ioctl,
1da177e4
LT
5721 .queuecommand = ipr_queuecommand,
5722 .eh_abort_handler = ipr_eh_abort,
5723 .eh_device_reset_handler = ipr_eh_dev_reset,
5724 .eh_host_reset_handler = ipr_eh_host_reset,
5725 .slave_alloc = ipr_slave_alloc,
5726 .slave_configure = ipr_slave_configure,
5727 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5728 .target_alloc = ipr_target_alloc,
5729 .target_destroy = ipr_target_destroy,
1da177e4
LT
5730 .change_queue_depth = ipr_change_queue_depth,
5731 .change_queue_type = ipr_change_queue_type,
5732 .bios_param = ipr_biosparam,
5733 .can_queue = IPR_MAX_COMMANDS,
5734 .this_id = -1,
5735 .sg_tablesize = IPR_MAX_SGLIST,
5736 .max_sectors = IPR_IOA_MAX_SECTORS,
5737 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5738 .use_clustering = ENABLE_CLUSTERING,
5739 .shost_attrs = ipr_ioa_attrs,
5740 .sdev_attrs = ipr_dev_attrs,
5741 .proc_name = IPR_NAME
5742};
5743
35a39691
BK
5744/**
5745 * ipr_ata_phy_reset - libata phy_reset handler
5746 * @ap: ata port to reset
5747 *
5748 **/
5749static void ipr_ata_phy_reset(struct ata_port *ap)
5750{
5751 unsigned long flags;
5752 struct ipr_sata_port *sata_port = ap->private_data;
5753 struct ipr_resource_entry *res = sata_port->res;
5754 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5755 int rc;
5756
5757 ENTER;
5758 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5759 while(ioa_cfg->in_reset_reload) {
5760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5761 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5762 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5763 }
5764
5765 if (!ioa_cfg->allow_cmds)
5766 goto out_unlock;
5767
5768 rc = ipr_device_reset(ioa_cfg, res);
5769
5770 if (rc) {
3e4ec344 5771 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5772 goto out_unlock;
5773 }
5774
3e7ebdfa
WB
5775 ap->link.device[0].class = res->ata_class;
5776 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 5777 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5778
5779out_unlock:
5780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5781 LEAVE;
5782}
5783
5784/**
5785 * ipr_ata_post_internal - Cleanup after an internal command
5786 * @qc: ATA queued command
5787 *
5788 * Return value:
5789 * none
5790 **/
5791static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5792{
5793 struct ipr_sata_port *sata_port = qc->ap->private_data;
5794 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5795 struct ipr_cmnd *ipr_cmd;
5796 unsigned long flags;
5797
5798 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5799 while(ioa_cfg->in_reset_reload) {
5800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5801 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5802 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5803 }
5804
35a39691
BK
5805 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5806 if (ipr_cmd->qc == qc) {
5807 ipr_device_reset(ioa_cfg, sata_port->res);
5808 break;
5809 }
5810 }
5811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5812}
5813
35a39691
BK
5814/**
5815 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5816 * @regs: destination
5817 * @tf: source ATA taskfile
5818 *
5819 * Return value:
5820 * none
5821 **/
5822static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5823 struct ata_taskfile *tf)
5824{
5825 regs->feature = tf->feature;
5826 regs->nsect = tf->nsect;
5827 regs->lbal = tf->lbal;
5828 regs->lbam = tf->lbam;
5829 regs->lbah = tf->lbah;
5830 regs->device = tf->device;
5831 regs->command = tf->command;
5832 regs->hob_feature = tf->hob_feature;
5833 regs->hob_nsect = tf->hob_nsect;
5834 regs->hob_lbal = tf->hob_lbal;
5835 regs->hob_lbam = tf->hob_lbam;
5836 regs->hob_lbah = tf->hob_lbah;
5837 regs->ctl = tf->ctl;
5838}
5839
5840/**
5841 * ipr_sata_done - done function for SATA commands
5842 * @ipr_cmd: ipr command struct
5843 *
5844 * This function is invoked by the interrupt handler for
5845 * ops generated by the SCSI mid-layer to SATA devices
5846 *
5847 * Return value:
5848 * none
5849 **/
5850static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5851{
5852 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5853 struct ata_queued_cmd *qc = ipr_cmd->qc;
5854 struct ipr_sata_port *sata_port = qc->ap->private_data;
5855 struct ipr_resource_entry *res = sata_port->res;
96d21f00 5856 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 5857
96d21f00
WB
5858 if (ipr_cmd->ioa_cfg->sis64)
5859 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5860 sizeof(struct ipr_ioasa_gata));
5861 else
5862 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5863 sizeof(struct ipr_ioasa_gata));
35a39691
BK
5864 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5865
96d21f00 5866 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 5867 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
5868
5869 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 5870 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 5871 else
96d21f00 5872 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
35a39691
BK
5873 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5874 ata_qc_complete(qc);
5875}
5876
a32c055f
WB
5877/**
5878 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5879 * @ipr_cmd: ipr command struct
5880 * @qc: ATA queued command
5881 *
5882 **/
5883static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5884 struct ata_queued_cmd *qc)
5885{
5886 u32 ioadl_flags = 0;
5887 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5888 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5889 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5890 int len = qc->nbytes;
5891 struct scatterlist *sg;
5892 unsigned int si;
5893 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5894
5895 if (len == 0)
5896 return;
5897
5898 if (qc->dma_dir == DMA_TO_DEVICE) {
5899 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5900 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5901 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5902 ioadl_flags = IPR_IOADL_FLAGS_READ;
5903
5904 ioarcb->data_transfer_length = cpu_to_be32(len);
5905 ioarcb->ioadl_len =
5906 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5907 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5908 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5909
5910 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5911 ioadl64->flags = cpu_to_be32(ioadl_flags);
5912 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5913 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5914
5915 last_ioadl64 = ioadl64;
5916 ioadl64++;
5917 }
5918
5919 if (likely(last_ioadl64))
5920 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5921}
5922
35a39691
BK
5923/**
5924 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5925 * @ipr_cmd: ipr command struct
5926 * @qc: ATA queued command
5927 *
5928 **/
5929static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5930 struct ata_queued_cmd *qc)
5931{
5932 u32 ioadl_flags = 0;
5933 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5934 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 5935 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5936 int len = qc->nbytes;
35a39691 5937 struct scatterlist *sg;
ff2aeb1e 5938 unsigned int si;
35a39691
BK
5939
5940 if (len == 0)
5941 return;
5942
5943 if (qc->dma_dir == DMA_TO_DEVICE) {
5944 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5945 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5946 ioarcb->data_transfer_length = cpu_to_be32(len);
5947 ioarcb->ioadl_len =
35a39691
BK
5948 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5949 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5950 ioadl_flags = IPR_IOADL_FLAGS_READ;
5951 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5952 ioarcb->read_ioadl_len =
5953 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5954 }
5955
ff2aeb1e 5956 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5957 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5958 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5959
5960 last_ioadl = ioadl;
5961 ioadl++;
35a39691 5962 }
3be6cbd7
JG
5963
5964 if (likely(last_ioadl))
5965 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5966}
5967
5968/**
5969 * ipr_qc_issue - Issue a SATA qc to a device
5970 * @qc: queued command
5971 *
5972 * Return value:
5973 * 0 if success
5974 **/
5975static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5976{
5977 struct ata_port *ap = qc->ap;
5978 struct ipr_sata_port *sata_port = ap->private_data;
5979 struct ipr_resource_entry *res = sata_port->res;
5980 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5981 struct ipr_cmnd *ipr_cmd;
5982 struct ipr_ioarcb *ioarcb;
5983 struct ipr_ioarcb_ata_regs *regs;
5984
5985 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5986 return AC_ERR_SYSTEM;
35a39691
BK
5987
5988 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5989 ioarcb = &ipr_cmd->ioarcb;
35a39691 5990
a32c055f
WB
5991 if (ioa_cfg->sis64) {
5992 regs = &ipr_cmd->i.ata_ioadl.regs;
5993 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5994 } else
5995 regs = &ioarcb->u.add_data.u.regs;
5996
5997 memset(regs, 0, sizeof(*regs));
5998 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
5999
6000 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6001 ipr_cmd->qc = qc;
6002 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6003 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6004 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6006 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6007 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6008
a32c055f
WB
6009 if (ioa_cfg->sis64)
6010 ipr_build_ata_ioadl64(ipr_cmd, qc);
6011 else
6012 ipr_build_ata_ioadl(ipr_cmd, qc);
6013
35a39691
BK
6014 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6015 ipr_copy_sata_tf(regs, &qc->tf);
6016 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6017 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6018
6019 switch (qc->tf.protocol) {
6020 case ATA_PROT_NODATA:
6021 case ATA_PROT_PIO:
6022 break;
6023
6024 case ATA_PROT_DMA:
6025 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6026 break;
6027
0dc36888
TH
6028 case ATAPI_PROT_PIO:
6029 case ATAPI_PROT_NODATA:
35a39691
BK
6030 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6031 break;
6032
0dc36888 6033 case ATAPI_PROT_DMA:
35a39691
BK
6034 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6035 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6036 break;
6037
6038 default:
6039 WARN_ON(1);
0feeed82 6040 return AC_ERR_INVALID;
35a39691
BK
6041 }
6042
6043 mb();
a32c055f
WB
6044
6045 ipr_send_command(ipr_cmd);
6046
35a39691
BK
6047 return 0;
6048}
6049
4c9bf4e7
TH
6050/**
6051 * ipr_qc_fill_rtf - Read result TF
6052 * @qc: ATA queued command
6053 *
6054 * Return value:
6055 * true
6056 **/
6057static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6058{
6059 struct ipr_sata_port *sata_port = qc->ap->private_data;
6060 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6061 struct ata_taskfile *tf = &qc->result_tf;
6062
6063 tf->feature = g->error;
6064 tf->nsect = g->nsect;
6065 tf->lbal = g->lbal;
6066 tf->lbam = g->lbam;
6067 tf->lbah = g->lbah;
6068 tf->device = g->device;
6069 tf->command = g->status;
6070 tf->hob_nsect = g->hob_nsect;
6071 tf->hob_lbal = g->hob_lbal;
6072 tf->hob_lbam = g->hob_lbam;
6073 tf->hob_lbah = g->hob_lbah;
6074 tf->ctl = g->alt_status;
6075
6076 return true;
6077}
6078
35a39691 6079static struct ata_port_operations ipr_sata_ops = {
35a39691 6080 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6081 .hardreset = ipr_sata_reset,
35a39691 6082 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6083 .qc_prep = ata_noop_qc_prep,
6084 .qc_issue = ipr_qc_issue,
4c9bf4e7 6085 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6086 .port_start = ata_sas_port_start,
6087 .port_stop = ata_sas_port_stop
6088};
6089
6090static struct ata_port_info sata_port_info = {
6091 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6092 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6093 .pio_mask = 0x10, /* pio4 */
6094 .mwdma_mask = 0x07,
6095 .udma_mask = 0x7f, /* udma0-6 */
6096 .port_ops = &ipr_sata_ops
6097};
6098
1da177e4
LT
6099#ifdef CONFIG_PPC_PSERIES
6100static const u16 ipr_blocked_processors[] = {
6101 PV_NORTHSTAR,
6102 PV_PULSAR,
6103 PV_POWER4,
6104 PV_ICESTAR,
6105 PV_SSTAR,
6106 PV_POWER4p,
6107 PV_630,
6108 PV_630p
6109};
6110
6111/**
6112 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6113 * @ioa_cfg: ioa cfg struct
6114 *
6115 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6116 * certain pSeries hardware. This function determines if the given
6117 * adapter is in one of these confgurations or not.
6118 *
6119 * Return value:
6120 * 1 if adapter is not supported / 0 if adapter is supported
6121 **/
6122static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6123{
1da177e4
LT
6124 int i;
6125
44c10138
AK
6126 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6127 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6128 if (__is_processor(ipr_blocked_processors[i]))
6129 return 1;
1da177e4
LT
6130 }
6131 }
6132 return 0;
6133}
6134#else
6135#define ipr_invalid_adapter(ioa_cfg) 0
6136#endif
6137
6138/**
6139 * ipr_ioa_bringdown_done - IOA bring down completion.
6140 * @ipr_cmd: ipr command struct
6141 *
6142 * This function processes the completion of an adapter bring down.
6143 * It wakes any reset sleepers.
6144 *
6145 * Return value:
6146 * IPR_RC_JOB_RETURN
6147 **/
6148static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6149{
6150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6151
6152 ENTER;
6153 ioa_cfg->in_reset_reload = 0;
6154 ioa_cfg->reset_retries = 0;
6155 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6156 wake_up_all(&ioa_cfg->reset_wait_q);
6157
6158 spin_unlock_irq(ioa_cfg->host->host_lock);
6159 scsi_unblock_requests(ioa_cfg->host);
6160 spin_lock_irq(ioa_cfg->host->host_lock);
6161 LEAVE;
6162
6163 return IPR_RC_JOB_RETURN;
6164}
6165
6166/**
6167 * ipr_ioa_reset_done - IOA reset completion.
6168 * @ipr_cmd: ipr command struct
6169 *
6170 * This function processes the completion of an adapter reset.
6171 * It schedules any necessary mid-layer add/removes and
6172 * wakes any reset sleepers.
6173 *
6174 * Return value:
6175 * IPR_RC_JOB_RETURN
6176 **/
6177static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6178{
6179 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6180 struct ipr_resource_entry *res;
6181 struct ipr_hostrcb *hostrcb, *temp;
6182 int i = 0;
6183
6184 ENTER;
6185 ioa_cfg->in_reset_reload = 0;
6186 ioa_cfg->allow_cmds = 1;
6187 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6188 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6189
6190 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6191 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6192 ipr_trace;
6193 break;
6194 }
6195 }
6196 schedule_work(&ioa_cfg->work_q);
6197
6198 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6199 list_del(&hostrcb->queue);
6200 if (i++ < IPR_NUM_LOG_HCAMS)
6201 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6202 else
6203 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6204 }
6205
6bb04170 6206 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6207 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6208
6209 ioa_cfg->reset_retries = 0;
6210 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6211 wake_up_all(&ioa_cfg->reset_wait_q);
6212
30237853 6213 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6214 scsi_unblock_requests(ioa_cfg->host);
30237853 6215 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6216
6217 if (!ioa_cfg->allow_cmds)
6218 scsi_block_requests(ioa_cfg->host);
6219
6220 LEAVE;
6221 return IPR_RC_JOB_RETURN;
6222}
6223
6224/**
6225 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6226 * @supported_dev: supported device struct
6227 * @vpids: vendor product id struct
6228 *
6229 * Return value:
6230 * none
6231 **/
6232static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6233 struct ipr_std_inq_vpids *vpids)
6234{
6235 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6236 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6237 supported_dev->num_records = 1;
6238 supported_dev->data_length =
6239 cpu_to_be16(sizeof(struct ipr_supported_device));
6240 supported_dev->reserved = 0;
6241}
6242
6243/**
6244 * ipr_set_supported_devs - Send Set Supported Devices for a device
6245 * @ipr_cmd: ipr command struct
6246 *
a32c055f 6247 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6248 *
6249 * Return value:
6250 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6251 **/
6252static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6253{
6254 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6255 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6256 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6257 struct ipr_resource_entry *res = ipr_cmd->u.res;
6258
6259 ipr_cmd->job_step = ipr_ioa_reset_done;
6260
6261 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6262 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6263 continue;
6264
6265 ipr_cmd->u.res = res;
3e7ebdfa 6266 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6267
6268 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6269 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6270 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6271
6272 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6273 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6274 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6275 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6276
a32c055f
WB
6277 ipr_init_ioadl(ipr_cmd,
6278 ioa_cfg->vpd_cbs_dma +
6279 offsetof(struct ipr_misc_cbs, supp_dev),
6280 sizeof(struct ipr_supported_device),
6281 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6282
6283 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6284 IPR_SET_SUP_DEVICE_TIMEOUT);
6285
3e7ebdfa
WB
6286 if (!ioa_cfg->sis64)
6287 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6288 return IPR_RC_JOB_RETURN;
6289 }
6290
6291 return IPR_RC_JOB_CONTINUE;
6292}
6293
6294/**
6295 * ipr_get_mode_page - Locate specified mode page
6296 * @mode_pages: mode page buffer
6297 * @page_code: page code to find
6298 * @len: minimum required length for mode page
6299 *
6300 * Return value:
6301 * pointer to mode page / NULL on failure
6302 **/
6303static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6304 u32 page_code, u32 len)
6305{
6306 struct ipr_mode_page_hdr *mode_hdr;
6307 u32 page_length;
6308 u32 length;
6309
6310 if (!mode_pages || (mode_pages->hdr.length == 0))
6311 return NULL;
6312
6313 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6314 mode_hdr = (struct ipr_mode_page_hdr *)
6315 (mode_pages->data + mode_pages->hdr.block_desc_len);
6316
6317 while (length) {
6318 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6319 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6320 return mode_hdr;
6321 break;
6322 } else {
6323 page_length = (sizeof(struct ipr_mode_page_hdr) +
6324 mode_hdr->page_length);
6325 length -= page_length;
6326 mode_hdr = (struct ipr_mode_page_hdr *)
6327 ((unsigned long)mode_hdr + page_length);
6328 }
6329 }
6330 return NULL;
6331}
6332
6333/**
6334 * ipr_check_term_power - Check for term power errors
6335 * @ioa_cfg: ioa config struct
6336 * @mode_pages: IOAFP mode pages buffer
6337 *
6338 * Check the IOAFP's mode page 28 for term power errors
6339 *
6340 * Return value:
6341 * nothing
6342 **/
6343static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6344 struct ipr_mode_pages *mode_pages)
6345{
6346 int i;
6347 int entry_length;
6348 struct ipr_dev_bus_entry *bus;
6349 struct ipr_mode_page28 *mode_page;
6350
6351 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6352 sizeof(struct ipr_mode_page28));
6353
6354 entry_length = mode_page->entry_length;
6355
6356 bus = mode_page->bus;
6357
6358 for (i = 0; i < mode_page->num_entries; i++) {
6359 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6360 dev_err(&ioa_cfg->pdev->dev,
6361 "Term power is absent on scsi bus %d\n",
6362 bus->res_addr.bus);
6363 }
6364
6365 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6366 }
6367}
6368
6369/**
6370 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6371 * @ioa_cfg: ioa config struct
6372 *
6373 * Looks through the config table checking for SES devices. If
6374 * the SES device is in the SES table indicating a maximum SCSI
6375 * bus speed, the speed is limited for the bus.
6376 *
6377 * Return value:
6378 * none
6379 **/
6380static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6381{
6382 u32 max_xfer_rate;
6383 int i;
6384
6385 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6386 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6387 ioa_cfg->bus_attr[i].bus_width);
6388
6389 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6390 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6391 }
6392}
6393
6394/**
6395 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6396 * @ioa_cfg: ioa config struct
6397 * @mode_pages: mode page 28 buffer
6398 *
6399 * Updates mode page 28 based on driver configuration
6400 *
6401 * Return value:
6402 * none
6403 **/
6404static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6405 struct ipr_mode_pages *mode_pages)
6406{
6407 int i, entry_length;
6408 struct ipr_dev_bus_entry *bus;
6409 struct ipr_bus_attributes *bus_attr;
6410 struct ipr_mode_page28 *mode_page;
6411
6412 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6413 sizeof(struct ipr_mode_page28));
6414
6415 entry_length = mode_page->entry_length;
6416
6417 /* Loop for each device bus entry */
6418 for (i = 0, bus = mode_page->bus;
6419 i < mode_page->num_entries;
6420 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6421 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6422 dev_err(&ioa_cfg->pdev->dev,
6423 "Invalid resource address reported: 0x%08X\n",
6424 IPR_GET_PHYS_LOC(bus->res_addr));
6425 continue;
6426 }
6427
6428 bus_attr = &ioa_cfg->bus_attr[i];
6429 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6430 bus->bus_width = bus_attr->bus_width;
6431 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6432 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6433 if (bus_attr->qas_enabled)
6434 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6435 else
6436 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6437 }
6438}
6439
6440/**
6441 * ipr_build_mode_select - Build a mode select command
6442 * @ipr_cmd: ipr command struct
6443 * @res_handle: resource handle to send command to
6444 * @parm: Byte 2 of Mode Sense command
6445 * @dma_addr: DMA buffer address
6446 * @xfer_len: data transfer length
6447 *
6448 * Return value:
6449 * none
6450 **/
6451static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6452 __be32 res_handle, u8 parm,
6453 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6454{
1da177e4
LT
6455 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6456
6457 ioarcb->res_handle = res_handle;
6458 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6459 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6460 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6461 ioarcb->cmd_pkt.cdb[1] = parm;
6462 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6463
a32c055f 6464 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6465}
6466
6467/**
6468 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6469 * @ipr_cmd: ipr command struct
6470 *
6471 * This function sets up the SCSI bus attributes and sends
6472 * a Mode Select for Page 28 to activate them.
6473 *
6474 * Return value:
6475 * IPR_RC_JOB_RETURN
6476 **/
6477static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6478{
6479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6481 int length;
6482
6483 ENTER;
4733804c
BK
6484 ipr_scsi_bus_speed_limit(ioa_cfg);
6485 ipr_check_term_power(ioa_cfg, mode_pages);
6486 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6487 length = mode_pages->hdr.length + 1;
6488 mode_pages->hdr.length = 0;
1da177e4
LT
6489
6490 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6491 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6492 length);
6493
f72919ec
WB
6494 ipr_cmd->job_step = ipr_set_supported_devs;
6495 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6496 struct ipr_resource_entry, queue);
1da177e4
LT
6497 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6498
6499 LEAVE;
6500 return IPR_RC_JOB_RETURN;
6501}
6502
6503/**
6504 * ipr_build_mode_sense - Builds a mode sense command
6505 * @ipr_cmd: ipr command struct
6506 * @res: resource entry struct
6507 * @parm: Byte 2 of mode sense command
6508 * @dma_addr: DMA address of mode sense buffer
6509 * @xfer_len: Size of DMA buffer
6510 *
6511 * Return value:
6512 * none
6513 **/
6514static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6515 __be32 res_handle,
a32c055f 6516 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6517{
1da177e4
LT
6518 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6519
6520 ioarcb->res_handle = res_handle;
6521 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6522 ioarcb->cmd_pkt.cdb[2] = parm;
6523 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6524 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6525
a32c055f 6526 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6527}
6528
dfed823e
BK
6529/**
6530 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6531 * @ipr_cmd: ipr command struct
6532 *
6533 * This function handles the failure of an IOA bringup command.
6534 *
6535 * Return value:
6536 * IPR_RC_JOB_RETURN
6537 **/
6538static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6539{
6540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6541 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6542
6543 dev_err(&ioa_cfg->pdev->dev,
6544 "0x%02X failed with IOASC: 0x%08X\n",
6545 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6546
6547 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6548 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6549 return IPR_RC_JOB_RETURN;
6550}
6551
6552/**
6553 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6554 * @ipr_cmd: ipr command struct
6555 *
6556 * This function handles the failure of a Mode Sense to the IOAFP.
6557 * Some adapters do not handle all mode pages.
6558 *
6559 * Return value:
6560 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6561 **/
6562static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6563{
f72919ec 6564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6565 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6566
6567 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6568 ipr_cmd->job_step = ipr_set_supported_devs;
6569 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6570 struct ipr_resource_entry, queue);
dfed823e
BK
6571 return IPR_RC_JOB_CONTINUE;
6572 }
6573
6574 return ipr_reset_cmd_failed(ipr_cmd);
6575}
6576
1da177e4
LT
6577/**
6578 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6579 * @ipr_cmd: ipr command struct
6580 *
6581 * This function send a Page 28 mode sense to the IOA to
6582 * retrieve SCSI bus attributes.
6583 *
6584 * Return value:
6585 * IPR_RC_JOB_RETURN
6586 **/
6587static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6588{
6589 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6590
6591 ENTER;
6592 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6593 0x28, ioa_cfg->vpd_cbs_dma +
6594 offsetof(struct ipr_misc_cbs, mode_pages),
6595 sizeof(struct ipr_mode_pages));
6596
6597 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6598 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6599
6600 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6601
6602 LEAVE;
6603 return IPR_RC_JOB_RETURN;
6604}
6605
ac09c349
BK
6606/**
6607 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6608 * @ipr_cmd: ipr command struct
6609 *
6610 * This function enables dual IOA RAID support if possible.
6611 *
6612 * Return value:
6613 * IPR_RC_JOB_RETURN
6614 **/
6615static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6616{
6617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6618 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6619 struct ipr_mode_page24 *mode_page;
6620 int length;
6621
6622 ENTER;
6623 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6624 sizeof(struct ipr_mode_page24));
6625
6626 if (mode_page)
6627 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6628
6629 length = mode_pages->hdr.length + 1;
6630 mode_pages->hdr.length = 0;
6631
6632 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6633 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6634 length);
6635
6636 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6637 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6638
6639 LEAVE;
6640 return IPR_RC_JOB_RETURN;
6641}
6642
6643/**
6644 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6645 * @ipr_cmd: ipr command struct
6646 *
6647 * This function handles the failure of a Mode Sense to the IOAFP.
6648 * Some adapters do not handle all mode pages.
6649 *
6650 * Return value:
6651 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6652 **/
6653static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6654{
96d21f00 6655 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
6656
6657 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6658 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6659 return IPR_RC_JOB_CONTINUE;
6660 }
6661
6662 return ipr_reset_cmd_failed(ipr_cmd);
6663}
6664
6665/**
6666 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6667 * @ipr_cmd: ipr command struct
6668 *
6669 * This function send a mode sense to the IOA to retrieve
6670 * the IOA Advanced Function Control mode page.
6671 *
6672 * Return value:
6673 * IPR_RC_JOB_RETURN
6674 **/
6675static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6676{
6677 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6678
6679 ENTER;
6680 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6681 0x24, ioa_cfg->vpd_cbs_dma +
6682 offsetof(struct ipr_misc_cbs, mode_pages),
6683 sizeof(struct ipr_mode_pages));
6684
6685 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6686 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6687
6688 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6689
6690 LEAVE;
6691 return IPR_RC_JOB_RETURN;
6692}
6693
1da177e4
LT
6694/**
6695 * ipr_init_res_table - Initialize the resource table
6696 * @ipr_cmd: ipr command struct
6697 *
6698 * This function looks through the existing resource table, comparing
6699 * it with the config table. This function will take care of old/new
6700 * devices and schedule adding/removing them from the mid-layer
6701 * as appropriate.
6702 *
6703 * Return value:
6704 * IPR_RC_JOB_CONTINUE
6705 **/
6706static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6707{
6708 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6709 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6710 struct ipr_config_table_entry_wrapper cfgtew;
6711 int entries, found, flag, i;
1da177e4
LT
6712 LIST_HEAD(old_res);
6713
6714 ENTER;
3e7ebdfa
WB
6715 if (ioa_cfg->sis64)
6716 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6717 else
6718 flag = ioa_cfg->u.cfg_table->hdr.flags;
6719
6720 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6721 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6722
6723 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6724 list_move_tail(&res->queue, &old_res);
6725
3e7ebdfa 6726 if (ioa_cfg->sis64)
438b0331 6727 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
6728 else
6729 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6730
6731 for (i = 0; i < entries; i++) {
6732 if (ioa_cfg->sis64)
6733 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6734 else
6735 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6736 found = 0;
6737
6738 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6739 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6740 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6741 found = 1;
6742 break;
6743 }
6744 }
6745
6746 if (!found) {
6747 if (list_empty(&ioa_cfg->free_res_q)) {
6748 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6749 break;
6750 }
6751
6752 found = 1;
6753 res = list_entry(ioa_cfg->free_res_q.next,
6754 struct ipr_resource_entry, queue);
6755 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6756 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
6757 res->add_to_ml = 1;
6758 }
6759
6760 if (found)
3e7ebdfa 6761 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6762 }
6763
6764 list_for_each_entry_safe(res, temp, &old_res, queue) {
6765 if (res->sdev) {
6766 res->del_from_ml = 1;
3e7ebdfa 6767 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6768 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6769 }
6770 }
6771
3e7ebdfa
WB
6772 list_for_each_entry_safe(res, temp, &old_res, queue) {
6773 ipr_clear_res_target(res);
6774 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6775 }
6776
ac09c349
BK
6777 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6778 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6779 else
6780 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6781
6782 LEAVE;
6783 return IPR_RC_JOB_CONTINUE;
6784}
6785
6786/**
6787 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6788 * @ipr_cmd: ipr command struct
6789 *
6790 * This function sends a Query IOA Configuration command
6791 * to the adapter to retrieve the IOA configuration table.
6792 *
6793 * Return value:
6794 * IPR_RC_JOB_RETURN
6795 **/
6796static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6797{
6798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6799 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 6800 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6801 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6802
6803 ENTER;
ac09c349
BK
6804 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6805 ioa_cfg->dual_raid = 1;
1da177e4
LT
6806 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6807 ucode_vpd->major_release, ucode_vpd->card_type,
6808 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6809 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6810 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6811
6812 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 6813 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
6814 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6815 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 6816
3e7ebdfa 6817 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 6818 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6819
6820 ipr_cmd->job_step = ipr_init_res_table;
6821
6822 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6823
6824 LEAVE;
6825 return IPR_RC_JOB_RETURN;
6826}
6827
6828/**
6829 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6830 * @ipr_cmd: ipr command struct
6831 *
6832 * This utility function sends an inquiry to the adapter.
6833 *
6834 * Return value:
6835 * none
6836 **/
6837static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 6838 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
6839{
6840 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6841
6842 ENTER;
6843 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6844 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6845
6846 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6847 ioarcb->cmd_pkt.cdb[1] = flags;
6848 ioarcb->cmd_pkt.cdb[2] = page;
6849 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6850
a32c055f 6851 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6852
6853 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6854 LEAVE;
6855}
6856
62275040
BK
6857/**
6858 * ipr_inquiry_page_supported - Is the given inquiry page supported
6859 * @page0: inquiry page 0 buffer
6860 * @page: page code.
6861 *
6862 * This function determines if the specified inquiry page is supported.
6863 *
6864 * Return value:
6865 * 1 if page is supported / 0 if not
6866 **/
6867static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6868{
6869 int i;
6870
6871 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6872 if (page0->page[i] == page)
6873 return 1;
6874
6875 return 0;
6876}
6877
ac09c349
BK
6878/**
6879 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6880 * @ipr_cmd: ipr command struct
6881 *
6882 * This function sends a Page 0xD0 inquiry to the adapter
6883 * to retrieve adapter capabilities.
6884 *
6885 * Return value:
6886 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6887 **/
6888static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6889{
6890 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6891 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6892 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6893
6894 ENTER;
6895 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6896 memset(cap, 0, sizeof(*cap));
6897
6898 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6899 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6900 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6901 sizeof(struct ipr_inquiry_cap));
6902 return IPR_RC_JOB_RETURN;
6903 }
6904
6905 LEAVE;
6906 return IPR_RC_JOB_CONTINUE;
6907}
6908
1da177e4
LT
6909/**
6910 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6911 * @ipr_cmd: ipr command struct
6912 *
6913 * This function sends a Page 3 inquiry to the adapter
6914 * to retrieve software VPD information.
6915 *
6916 * Return value:
6917 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6918 **/
6919static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
6920{
6921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
6922
6923 ENTER;
6924
ac09c349 6925 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
6926
6927 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6928 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6929 sizeof(struct ipr_inquiry_page3));
6930
6931 LEAVE;
6932 return IPR_RC_JOB_RETURN;
6933}
6934
6935/**
6936 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6937 * @ipr_cmd: ipr command struct
6938 *
6939 * This function sends a Page 0 inquiry to the adapter
6940 * to retrieve supported inquiry pages.
6941 *
6942 * Return value:
6943 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6944 **/
6945static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6946{
6947 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6948 char type[5];
6949
6950 ENTER;
6951
6952 /* Grab the type out of the VPD and store it away */
6953 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6954 type[4] = '\0';
6955 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6956
62275040 6957 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6958
62275040
BK
6959 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6960 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6961 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6962
6963 LEAVE;
6964 return IPR_RC_JOB_RETURN;
6965}
6966
6967/**
6968 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6969 * @ipr_cmd: ipr command struct
6970 *
6971 * This function sends a standard inquiry to the adapter.
6972 *
6973 * Return value:
6974 * IPR_RC_JOB_RETURN
6975 **/
6976static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6977{
6978 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6979
6980 ENTER;
62275040 6981 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6982
6983 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6984 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6985 sizeof(struct ipr_ioa_vpd));
6986
6987 LEAVE;
6988 return IPR_RC_JOB_RETURN;
6989}
6990
6991/**
214777ba 6992 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
6993 * @ipr_cmd: ipr command struct
6994 *
6995 * This function send an Identify Host Request Response Queue
6996 * command to establish the HRRQ with the adapter.
6997 *
6998 * Return value:
6999 * IPR_RC_JOB_RETURN
7000 **/
214777ba 7001static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7002{
7003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7004 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7005
7006 ENTER;
7007 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7008
7009 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7011
7012 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
7013 if (ioa_cfg->sis64)
7014 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 7015 ioarcb->cmd_pkt.cdb[2] =
214777ba 7016 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 7017 ioarcb->cmd_pkt.cdb[3] =
214777ba 7018 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 7019 ioarcb->cmd_pkt.cdb[4] =
214777ba 7020 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 7021 ioarcb->cmd_pkt.cdb[5] =
214777ba 7022 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7023 ioarcb->cmd_pkt.cdb[7] =
7024 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7025 ioarcb->cmd_pkt.cdb[8] =
7026 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7027
214777ba
WB
7028 if (ioa_cfg->sis64) {
7029 ioarcb->cmd_pkt.cdb[10] =
7030 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7031 ioarcb->cmd_pkt.cdb[11] =
7032 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7033 ioarcb->cmd_pkt.cdb[12] =
7034 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7035 ioarcb->cmd_pkt.cdb[13] =
7036 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7037 }
7038
1da177e4
LT
7039 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7040
7041 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7042
7043 LEAVE;
7044 return IPR_RC_JOB_RETURN;
7045}
7046
7047/**
7048 * ipr_reset_timer_done - Adapter reset timer function
7049 * @ipr_cmd: ipr command struct
7050 *
7051 * Description: This function is used in adapter reset processing
7052 * for timing events. If the reset_cmd pointer in the IOA
7053 * config struct is not this adapter's we are doing nested
7054 * resets and fail_all_ops will take care of freeing the
7055 * command block.
7056 *
7057 * Return value:
7058 * none
7059 **/
7060static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7061{
7062 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7063 unsigned long lock_flags = 0;
7064
7065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7066
7067 if (ioa_cfg->reset_cmd == ipr_cmd) {
7068 list_del(&ipr_cmd->queue);
7069 ipr_cmd->done(ipr_cmd);
7070 }
7071
7072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7073}
7074
7075/**
7076 * ipr_reset_start_timer - Start a timer for adapter reset job
7077 * @ipr_cmd: ipr command struct
7078 * @timeout: timeout value
7079 *
7080 * Description: This function is used in adapter reset processing
7081 * for timing events. If the reset_cmd pointer in the IOA
7082 * config struct is not this adapter's we are doing nested
7083 * resets and fail_all_ops will take care of freeing the
7084 * command block.
7085 *
7086 * Return value:
7087 * none
7088 **/
7089static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7090 unsigned long timeout)
7091{
7092 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7093 ipr_cmd->done = ipr_reset_ioa_job;
7094
7095 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7096 ipr_cmd->timer.expires = jiffies + timeout;
7097 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7098 add_timer(&ipr_cmd->timer);
7099}
7100
7101/**
7102 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7103 * @ioa_cfg: ioa cfg struct
7104 *
7105 * Return value:
7106 * nothing
7107 **/
7108static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7109{
7110 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7111
7112 /* Initialize Host RRQ pointers */
7113 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7114 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7115 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7116 ioa_cfg->toggle_bit = 1;
7117
7118 /* Zero out config table */
3e7ebdfa 7119 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7120}
7121
214777ba
WB
7122/**
7123 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7124 * @ipr_cmd: ipr command struct
7125 *
7126 * Return value:
7127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7128 **/
7129static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7130{
7131 unsigned long stage, stage_time;
7132 u32 feedback;
7133 volatile u32 int_reg;
7134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7135 u64 maskval = 0;
7136
7137 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7138 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7139 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7140
7141 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7142
7143 /* sanity check the stage_time value */
438b0331
WB
7144 if (stage_time == 0)
7145 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7146 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7147 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7148 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7149 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7150
7151 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7152 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7153 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7154 stage_time = ioa_cfg->transop_timeout;
7155 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7156 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7157 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7158 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7159 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7160 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7161 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7162 return IPR_RC_JOB_CONTINUE;
7163 }
7164
7165 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7166 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7167 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7168 ipr_cmd->done = ipr_reset_ioa_job;
7169 add_timer(&ipr_cmd->timer);
7170 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7171
7172 return IPR_RC_JOB_RETURN;
7173}
7174
1da177e4
LT
7175/**
7176 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7177 * @ipr_cmd: ipr command struct
7178 *
7179 * This function reinitializes some control blocks and
7180 * enables destructive diagnostics on the adapter.
7181 *
7182 * Return value:
7183 * IPR_RC_JOB_RETURN
7184 **/
7185static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7186{
7187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7188 volatile u32 int_reg;
7be96900 7189 volatile u64 maskval;
1da177e4
LT
7190
7191 ENTER;
214777ba 7192 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7193 ipr_init_ioa_mem(ioa_cfg);
7194
7195 ioa_cfg->allow_interrupts = 1;
7be96900 7196 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7197
7198 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7199 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7200 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7201 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7202 return IPR_RC_JOB_CONTINUE;
7203 }
7204
7205 /* Enable destructive diagnostics on IOA */
214777ba
WB
7206 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7207
7be96900
WB
7208 if (ioa_cfg->sis64) {
7209 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7210 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7211 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7212 } else
7213 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7214
1da177e4
LT
7215 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7216
7217 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7218
214777ba
WB
7219 if (ioa_cfg->sis64) {
7220 ipr_cmd->job_step = ipr_reset_next_stage;
7221 return IPR_RC_JOB_CONTINUE;
7222 }
7223
1da177e4 7224 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7225 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7226 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7227 ipr_cmd->done = ipr_reset_ioa_job;
7228 add_timer(&ipr_cmd->timer);
7229 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7230
7231 LEAVE;
7232 return IPR_RC_JOB_RETURN;
7233}
7234
7235/**
7236 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7237 * @ipr_cmd: ipr command struct
7238 *
7239 * This function is invoked when an adapter dump has run out
7240 * of processing time.
7241 *
7242 * Return value:
7243 * IPR_RC_JOB_CONTINUE
7244 **/
7245static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7246{
7247 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7248
7249 if (ioa_cfg->sdt_state == GET_DUMP)
7250 ioa_cfg->sdt_state = ABORT_DUMP;
7251
7252 ipr_cmd->job_step = ipr_reset_alert;
7253
7254 return IPR_RC_JOB_CONTINUE;
7255}
7256
7257/**
7258 * ipr_unit_check_no_data - Log a unit check/no data error log
7259 * @ioa_cfg: ioa config struct
7260 *
7261 * Logs an error indicating the adapter unit checked, but for some
7262 * reason, we were unable to fetch the unit check buffer.
7263 *
7264 * Return value:
7265 * nothing
7266 **/
7267static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7268{
7269 ioa_cfg->errors_logged++;
7270 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7271}
7272
7273/**
7274 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7275 * @ioa_cfg: ioa config struct
7276 *
7277 * Fetches the unit check buffer from the adapter by clocking the data
7278 * through the mailbox register.
7279 *
7280 * Return value:
7281 * nothing
7282 **/
7283static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7284{
7285 unsigned long mailbox;
7286 struct ipr_hostrcb *hostrcb;
7287 struct ipr_uc_sdt sdt;
7288 int rc, length;
65f56475 7289 u32 ioasc;
1da177e4
LT
7290
7291 mailbox = readl(ioa_cfg->ioa_mailbox);
7292
dcbad00e 7293 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7294 ipr_unit_check_no_data(ioa_cfg);
7295 return;
7296 }
7297
7298 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7299 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7300 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7301
dcbad00e
WB
7302 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7303 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7304 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7305 ipr_unit_check_no_data(ioa_cfg);
7306 return;
7307 }
7308
7309 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7310 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7311 length = be32_to_cpu(sdt.entry[0].end_token);
7312 else
7313 length = (be32_to_cpu(sdt.entry[0].end_token) -
7314 be32_to_cpu(sdt.entry[0].start_token)) &
7315 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7316
7317 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7318 struct ipr_hostrcb, queue);
7319 list_del(&hostrcb->queue);
7320 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7321
7322 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7323 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7324 (__be32 *)&hostrcb->hcam,
7325 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7326
65f56475 7327 if (!rc) {
1da177e4 7328 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7329 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7330 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7331 ioa_cfg->sdt_state == GET_DUMP)
7332 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7333 } else
1da177e4
LT
7334 ipr_unit_check_no_data(ioa_cfg);
7335
7336 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7337}
7338
7339/**
7340 * ipr_reset_restore_cfg_space - Restore PCI config space.
7341 * @ipr_cmd: ipr command struct
7342 *
7343 * Description: This function restores the saved PCI config space of
7344 * the adapter, fails all outstanding ops back to the callers, and
7345 * fetches the dump/unit check if applicable to this reset.
7346 *
7347 * Return value:
7348 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7349 **/
7350static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7351{
7352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7353 int rc;
7354
7355 ENTER;
99c965dd 7356 ioa_cfg->pdev->state_saved = true;
1da177e4
LT
7357 rc = pci_restore_state(ioa_cfg->pdev);
7358
7359 if (rc != PCIBIOS_SUCCESSFUL) {
96d21f00 7360 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7361 return IPR_RC_JOB_CONTINUE;
7362 }
7363
7364 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7365 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7366 return IPR_RC_JOB_CONTINUE;
7367 }
7368
7369 ipr_fail_all_ops(ioa_cfg);
7370
7371 if (ioa_cfg->ioa_unit_checked) {
7372 ioa_cfg->ioa_unit_checked = 0;
7373 ipr_get_unit_check_buffer(ioa_cfg);
7374 ipr_cmd->job_step = ipr_reset_alert;
7375 ipr_reset_start_timer(ipr_cmd, 0);
7376 return IPR_RC_JOB_RETURN;
7377 }
7378
7379 if (ioa_cfg->in_ioa_bringdown) {
7380 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7381 } else {
7382 ipr_cmd->job_step = ipr_reset_enable_ioa;
7383
7384 if (GET_DUMP == ioa_cfg->sdt_state) {
7385 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7386 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7387 schedule_work(&ioa_cfg->work_q);
7388 return IPR_RC_JOB_RETURN;
7389 }
7390 }
7391
438b0331 7392 LEAVE;
1da177e4
LT
7393 return IPR_RC_JOB_CONTINUE;
7394}
7395
e619e1a7
BK
7396/**
7397 * ipr_reset_bist_done - BIST has completed on the adapter.
7398 * @ipr_cmd: ipr command struct
7399 *
7400 * Description: Unblock config space and resume the reset process.
7401 *
7402 * Return value:
7403 * IPR_RC_JOB_CONTINUE
7404 **/
7405static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7406{
7407 ENTER;
7408 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7409 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7410 LEAVE;
7411 return IPR_RC_JOB_CONTINUE;
7412}
7413
1da177e4
LT
7414/**
7415 * ipr_reset_start_bist - Run BIST on the adapter.
7416 * @ipr_cmd: ipr command struct
7417 *
7418 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7419 *
7420 * Return value:
7421 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7422 **/
7423static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7424{
7425 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7426 int rc;
7427
7428 ENTER;
b30197d2 7429 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
7430 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7431
7432 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 7433 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
96d21f00 7434 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7435 rc = IPR_RC_JOB_CONTINUE;
7436 } else {
e619e1a7 7437 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7438 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7439 rc = IPR_RC_JOB_RETURN;
7440 }
7441
7442 LEAVE;
7443 return rc;
7444}
7445
463fc696
BK
7446/**
7447 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7448 * @ipr_cmd: ipr command struct
7449 *
7450 * Description: This clears PCI reset to the adapter and delays two seconds.
7451 *
7452 * Return value:
7453 * IPR_RC_JOB_RETURN
7454 **/
7455static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7456{
7457 ENTER;
7458 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7459 ipr_cmd->job_step = ipr_reset_bist_done;
7460 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7461 LEAVE;
7462 return IPR_RC_JOB_RETURN;
7463}
7464
7465/**
7466 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7467 * @ipr_cmd: ipr command struct
7468 *
7469 * Description: This asserts PCI reset to the adapter.
7470 *
7471 * Return value:
7472 * IPR_RC_JOB_RETURN
7473 **/
7474static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7475{
7476 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7477 struct pci_dev *pdev = ioa_cfg->pdev;
7478
7479 ENTER;
7480 pci_block_user_cfg_access(pdev);
7481 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7482 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7483 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7484 LEAVE;
7485 return IPR_RC_JOB_RETURN;
7486}
7487
1da177e4
LT
7488/**
7489 * ipr_reset_allowed - Query whether or not IOA can be reset
7490 * @ioa_cfg: ioa config struct
7491 *
7492 * Return value:
7493 * 0 if reset not allowed / non-zero if reset is allowed
7494 **/
7495static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7496{
7497 volatile u32 temp_reg;
7498
7499 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7500 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7501}
7502
7503/**
7504 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7505 * @ipr_cmd: ipr command struct
7506 *
7507 * Description: This function waits for adapter permission to run BIST,
7508 * then runs BIST. If the adapter does not give permission after a
7509 * reasonable time, we will reset the adapter anyway. The impact of
7510 * resetting the adapter without warning the adapter is the risk of
7511 * losing the persistent error log on the adapter. If the adapter is
7512 * reset while it is writing to the flash on the adapter, the flash
7513 * segment will have bad ECC and be zeroed.
7514 *
7515 * Return value:
7516 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7517 **/
7518static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7519{
7520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7521 int rc = IPR_RC_JOB_RETURN;
7522
7523 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7524 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7525 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7526 } else {
463fc696 7527 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7528 rc = IPR_RC_JOB_CONTINUE;
7529 }
7530
7531 return rc;
7532}
7533
7534/**
7535 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7536 * @ipr_cmd: ipr command struct
7537 *
7538 * Description: This function alerts the adapter that it will be reset.
7539 * If memory space is not currently enabled, proceed directly
7540 * to running BIST on the adapter. The timer must always be started
7541 * so we guarantee we do not run BIST from ipr_isr.
7542 *
7543 * Return value:
7544 * IPR_RC_JOB_RETURN
7545 **/
7546static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7547{
7548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7549 u16 cmd_reg;
7550 int rc;
7551
7552 ENTER;
7553 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7554
7555 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7556 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7557 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7558 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7559 } else {
463fc696 7560 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
7561 }
7562
7563 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7564 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7565
7566 LEAVE;
7567 return IPR_RC_JOB_RETURN;
7568}
7569
7570/**
7571 * ipr_reset_ucode_download_done - Microcode download completion
7572 * @ipr_cmd: ipr command struct
7573 *
7574 * Description: This function unmaps the microcode download buffer.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_CONTINUE
7578 **/
7579static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7580{
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7583
7584 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7585 sglist->num_sg, DMA_TO_DEVICE);
7586
7587 ipr_cmd->job_step = ipr_reset_alert;
7588 return IPR_RC_JOB_CONTINUE;
7589}
7590
7591/**
7592 * ipr_reset_ucode_download - Download microcode to the adapter
7593 * @ipr_cmd: ipr command struct
7594 *
7595 * Description: This function checks to see if it there is microcode
7596 * to download to the adapter. If there is, a download is performed.
7597 *
7598 * Return value:
7599 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7600 **/
7601static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7602{
7603 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7604 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7605
7606 ENTER;
7607 ipr_cmd->job_step = ipr_reset_alert;
7608
7609 if (!sglist)
7610 return IPR_RC_JOB_CONTINUE;
7611
7612 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7613 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7614 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7615 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7616 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7617 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7618 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7619
a32c055f
WB
7620 if (ioa_cfg->sis64)
7621 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7622 else
7623 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7624 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7625
7626 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7627 IPR_WRITE_BUFFER_TIMEOUT);
7628
7629 LEAVE;
7630 return IPR_RC_JOB_RETURN;
7631}
7632
7633/**
7634 * ipr_reset_shutdown_ioa - Shutdown the adapter
7635 * @ipr_cmd: ipr command struct
7636 *
7637 * Description: This function issues an adapter shutdown of the
7638 * specified type to the specified adapter as part of the
7639 * adapter reset job.
7640 *
7641 * Return value:
7642 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7643 **/
7644static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7645{
7646 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7648 unsigned long timeout;
7649 int rc = IPR_RC_JOB_CONTINUE;
7650
7651 ENTER;
7652 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7653 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7654 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7655 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7656 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7657
ac09c349
BK
7658 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7659 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7660 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7661 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7662 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7663 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7664 else
ac09c349 7665 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7666
7667 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7668
7669 rc = IPR_RC_JOB_RETURN;
7670 ipr_cmd->job_step = ipr_reset_ucode_download;
7671 } else
7672 ipr_cmd->job_step = ipr_reset_alert;
7673
7674 LEAVE;
7675 return rc;
7676}
7677
7678/**
7679 * ipr_reset_ioa_job - Adapter reset job
7680 * @ipr_cmd: ipr command struct
7681 *
7682 * Description: This function is the job router for the adapter reset job.
7683 *
7684 * Return value:
7685 * none
7686 **/
7687static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7688{
7689 u32 rc, ioasc;
1da177e4
LT
7690 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7691
7692 do {
96d21f00 7693 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
7694
7695 if (ioa_cfg->reset_cmd != ipr_cmd) {
7696 /*
7697 * We are doing nested adapter resets and this is
7698 * not the current reset job.
7699 */
7700 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7701 return;
7702 }
7703
7704 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
7705 rc = ipr_cmd->job_step_failed(ipr_cmd);
7706 if (rc == IPR_RC_JOB_RETURN)
7707 return;
1da177e4
LT
7708 }
7709
7710 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 7711 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
7712 rc = ipr_cmd->job_step(ipr_cmd);
7713 } while(rc == IPR_RC_JOB_CONTINUE);
7714}
7715
7716/**
7717 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7718 * @ioa_cfg: ioa config struct
7719 * @job_step: first job step of reset job
7720 * @shutdown_type: shutdown type
7721 *
7722 * Description: This function will initiate the reset of the given adapter
7723 * starting at the selected job step.
7724 * If the caller needs to wait on the completion of the reset,
7725 * the caller must sleep on the reset_wait_q.
7726 *
7727 * Return value:
7728 * none
7729 **/
7730static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7731 int (*job_step) (struct ipr_cmnd *),
7732 enum ipr_shutdown_type shutdown_type)
7733{
7734 struct ipr_cmnd *ipr_cmd;
7735
7736 ioa_cfg->in_reset_reload = 1;
7737 ioa_cfg->allow_cmds = 0;
7738 scsi_block_requests(ioa_cfg->host);
7739
7740 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7741 ioa_cfg->reset_cmd = ipr_cmd;
7742 ipr_cmd->job_step = job_step;
7743 ipr_cmd->u.shutdown_type = shutdown_type;
7744
7745 ipr_reset_ioa_job(ipr_cmd);
7746}
7747
7748/**
7749 * ipr_initiate_ioa_reset - Initiate an adapter reset
7750 * @ioa_cfg: ioa config struct
7751 * @shutdown_type: shutdown type
7752 *
7753 * Description: This function will initiate the reset of the given adapter.
7754 * If the caller needs to wait on the completion of the reset,
7755 * the caller must sleep on the reset_wait_q.
7756 *
7757 * Return value:
7758 * none
7759 **/
7760static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7761 enum ipr_shutdown_type shutdown_type)
7762{
7763 if (ioa_cfg->ioa_is_dead)
7764 return;
7765
7766 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7767 ioa_cfg->sdt_state = ABORT_DUMP;
7768
7769 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7770 dev_err(&ioa_cfg->pdev->dev,
7771 "IOA taken offline - error recovery failed\n");
7772
7773 ioa_cfg->reset_retries = 0;
7774 ioa_cfg->ioa_is_dead = 1;
7775
7776 if (ioa_cfg->in_ioa_bringdown) {
7777 ioa_cfg->reset_cmd = NULL;
7778 ioa_cfg->in_reset_reload = 0;
7779 ipr_fail_all_ops(ioa_cfg);
7780 wake_up_all(&ioa_cfg->reset_wait_q);
7781
7782 spin_unlock_irq(ioa_cfg->host->host_lock);
7783 scsi_unblock_requests(ioa_cfg->host);
7784 spin_lock_irq(ioa_cfg->host->host_lock);
7785 return;
7786 } else {
7787 ioa_cfg->in_ioa_bringdown = 1;
7788 shutdown_type = IPR_SHUTDOWN_NONE;
7789 }
7790 }
7791
7792 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7793 shutdown_type);
7794}
7795
f8a88b19
LV
7796/**
7797 * ipr_reset_freeze - Hold off all I/O activity
7798 * @ipr_cmd: ipr command struct
7799 *
7800 * Description: If the PCI slot is frozen, hold off all I/O
7801 * activity; then, as soon as the slot is available again,
7802 * initiate an adapter reset.
7803 */
7804static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7805{
7806 /* Disallow new interrupts, avoid loop */
7807 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7808 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7809 ipr_cmd->done = ipr_reset_ioa_job;
7810 return IPR_RC_JOB_RETURN;
7811}
7812
7813/**
7814 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7815 * @pdev: PCI device struct
7816 *
7817 * Description: This routine is called to tell us that the PCI bus
7818 * is down. Can't do anything here, except put the device driver
7819 * into a holding pattern, waiting for the PCI bus to come back.
7820 */
7821static void ipr_pci_frozen(struct pci_dev *pdev)
7822{
7823 unsigned long flags = 0;
7824 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7825
7826 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7827 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7829}
7830
7831/**
7832 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7833 * @pdev: PCI device struct
7834 *
7835 * Description: This routine is called by the pci error recovery
7836 * code after the PCI slot has been reset, just before we
7837 * should resume normal operations.
7838 */
7839static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7840{
7841 unsigned long flags = 0;
7842 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7843
7844 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7845 if (ioa_cfg->needs_warm_reset)
7846 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7847 else
7848 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7849 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7851 return PCI_ERS_RESULT_RECOVERED;
7852}
7853
7854/**
7855 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7856 * @pdev: PCI device struct
7857 *
7858 * Description: This routine is called when the PCI bus has
7859 * permanently failed.
7860 */
7861static void ipr_pci_perm_failure(struct pci_dev *pdev)
7862{
7863 unsigned long flags = 0;
7864 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7865
7866 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7867 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7868 ioa_cfg->sdt_state = ABORT_DUMP;
7869 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7870 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 7871 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
7872 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7873 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7874}
7875
7876/**
7877 * ipr_pci_error_detected - Called when a PCI error is detected.
7878 * @pdev: PCI device struct
7879 * @state: PCI channel state
7880 *
7881 * Description: Called when a PCI error is detected.
7882 *
7883 * Return value:
7884 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7885 */
7886static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7887 pci_channel_state_t state)
7888{
7889 switch (state) {
7890 case pci_channel_io_frozen:
7891 ipr_pci_frozen(pdev);
7892 return PCI_ERS_RESULT_NEED_RESET;
7893 case pci_channel_io_perm_failure:
7894 ipr_pci_perm_failure(pdev);
7895 return PCI_ERS_RESULT_DISCONNECT;
7896 break;
7897 default:
7898 break;
7899 }
7900 return PCI_ERS_RESULT_NEED_RESET;
7901}
7902
1da177e4
LT
7903/**
7904 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7905 * @ioa_cfg: ioa cfg struct
7906 *
7907 * Description: This is the second phase of adapter intialization
7908 * This function takes care of initilizing the adapter to the point
7909 * where it can accept new commands.
7910
7911 * Return value:
b1c11812 7912 * 0 on success / -EIO on failure
1da177e4
LT
7913 **/
7914static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7915{
7916 int rc = 0;
7917 unsigned long host_lock_flags = 0;
7918
7919 ENTER;
7920 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7921 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
7922 if (ioa_cfg->needs_hard_reset) {
7923 ioa_cfg->needs_hard_reset = 0;
7924 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7925 } else
7926 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7927 IPR_SHUTDOWN_NONE);
1da177e4
LT
7928
7929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7930 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7931 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7932
7933 if (ioa_cfg->ioa_is_dead) {
7934 rc = -EIO;
7935 } else if (ipr_invalid_adapter(ioa_cfg)) {
7936 if (!ipr_testmode)
7937 rc = -EIO;
7938
7939 dev_err(&ioa_cfg->pdev->dev,
7940 "Adapter not supported in this hardware configuration.\n");
7941 }
7942
7943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7944
7945 LEAVE;
7946 return rc;
7947}
7948
7949/**
7950 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7951 * @ioa_cfg: ioa config struct
7952 *
7953 * Return value:
7954 * none
7955 **/
7956static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7957{
7958 int i;
7959
7960 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7961 if (ioa_cfg->ipr_cmnd_list[i])
7962 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7963 ioa_cfg->ipr_cmnd_list[i],
7964 ioa_cfg->ipr_cmnd_list_dma[i]);
7965
7966 ioa_cfg->ipr_cmnd_list[i] = NULL;
7967 }
7968
7969 if (ioa_cfg->ipr_cmd_pool)
7970 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7971
7972 ioa_cfg->ipr_cmd_pool = NULL;
7973}
7974
7975/**
7976 * ipr_free_mem - Frees memory allocated for an adapter
7977 * @ioa_cfg: ioa cfg struct
7978 *
7979 * Return value:
7980 * nothing
7981 **/
7982static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7983{
7984 int i;
7985
7986 kfree(ioa_cfg->res_entries);
7987 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7988 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7989 ipr_free_cmd_blks(ioa_cfg);
7990 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7991 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
7992 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7993 ioa_cfg->u.cfg_table,
1da177e4
LT
7994 ioa_cfg->cfg_table_dma);
7995
7996 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7997 pci_free_consistent(ioa_cfg->pdev,
7998 sizeof(struct ipr_hostrcb),
7999 ioa_cfg->hostrcb[i],
8000 ioa_cfg->hostrcb_dma[i]);
8001 }
8002
8003 ipr_free_dump(ioa_cfg);
1da177e4
LT
8004 kfree(ioa_cfg->trace);
8005}
8006
8007/**
8008 * ipr_free_all_resources - Free all allocated resources for an adapter.
8009 * @ipr_cmd: ipr command struct
8010 *
8011 * This function frees all allocated resources for the
8012 * specified adapter.
8013 *
8014 * Return value:
8015 * none
8016 **/
8017static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8018{
8019 struct pci_dev *pdev = ioa_cfg->pdev;
8020
8021 ENTER;
8022 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 8023 pci_disable_msi(pdev);
1da177e4
LT
8024 iounmap(ioa_cfg->hdw_dma_regs);
8025 pci_release_regions(pdev);
8026 ipr_free_mem(ioa_cfg);
8027 scsi_host_put(ioa_cfg->host);
8028 pci_disable_device(pdev);
8029 LEAVE;
8030}
8031
8032/**
8033 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8034 * @ioa_cfg: ioa config struct
8035 *
8036 * Return value:
8037 * 0 on success / -ENOMEM on allocation failure
8038 **/
8039static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8040{
8041 struct ipr_cmnd *ipr_cmd;
8042 struct ipr_ioarcb *ioarcb;
8043 dma_addr_t dma_addr;
8044 int i;
8045
8046 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8047 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8048
8049 if (!ioa_cfg->ipr_cmd_pool)
8050 return -ENOMEM;
8051
8052 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8053 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8054
8055 if (!ipr_cmd) {
8056 ipr_free_cmd_blks(ioa_cfg);
8057 return -ENOMEM;
8058 }
8059
8060 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8061 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8062 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8063
8064 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8065 ipr_cmd->dma_addr = dma_addr;
8066 if (ioa_cfg->sis64)
8067 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8068 else
8069 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8070
1da177e4 8071 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8072 if (ioa_cfg->sis64) {
8073 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8074 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8075 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8076 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8077 } else {
8078 ioarcb->write_ioadl_addr =
8079 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8080 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8081 ioarcb->ioasa_host_pci_addr =
96d21f00 8082 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8083 }
1da177e4
LT
8084 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8085 ipr_cmd->cmd_index = i;
8086 ipr_cmd->ioa_cfg = ioa_cfg;
8087 ipr_cmd->sense_buffer_dma = dma_addr +
8088 offsetof(struct ipr_cmnd, sense_buffer);
8089
8090 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8091 }
8092
8093 return 0;
8094}
8095
8096/**
8097 * ipr_alloc_mem - Allocate memory for an adapter
8098 * @ioa_cfg: ioa config struct
8099 *
8100 * Return value:
8101 * 0 on success / non-zero for error
8102 **/
8103static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8104{
8105 struct pci_dev *pdev = ioa_cfg->pdev;
8106 int i, rc = -ENOMEM;
8107
8108 ENTER;
0bc42e35 8109 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8110 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8111
8112 if (!ioa_cfg->res_entries)
8113 goto out;
8114
3e7ebdfa
WB
8115 if (ioa_cfg->sis64) {
8116 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8117 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8118 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8119 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8120 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8121 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8122 }
8123
8124 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8125 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8126 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8127 }
1da177e4
LT
8128
8129 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8130 sizeof(struct ipr_misc_cbs),
8131 &ioa_cfg->vpd_cbs_dma);
8132
8133 if (!ioa_cfg->vpd_cbs)
8134 goto out_free_res_entries;
8135
8136 if (ipr_alloc_cmd_blks(ioa_cfg))
8137 goto out_free_vpd_cbs;
8138
8139 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8140 sizeof(u32) * IPR_NUM_CMD_BLKS,
8141 &ioa_cfg->host_rrq_dma);
8142
8143 if (!ioa_cfg->host_rrq)
8144 goto out_ipr_free_cmd_blocks;
8145
3e7ebdfa
WB
8146 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8147 ioa_cfg->cfg_table_size,
8148 &ioa_cfg->cfg_table_dma);
1da177e4 8149
3e7ebdfa 8150 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8151 goto out_free_host_rrq;
8152
8153 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8154 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8155 sizeof(struct ipr_hostrcb),
8156 &ioa_cfg->hostrcb_dma[i]);
8157
8158 if (!ioa_cfg->hostrcb[i])
8159 goto out_free_hostrcb_dma;
8160
8161 ioa_cfg->hostrcb[i]->hostrcb_dma =
8162 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8163 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8164 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8165 }
8166
0bc42e35 8167 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8168 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8169
8170 if (!ioa_cfg->trace)
8171 goto out_free_hostrcb_dma;
8172
1da177e4
LT
8173 rc = 0;
8174out:
8175 LEAVE;
8176 return rc;
8177
8178out_free_hostrcb_dma:
8179 while (i-- > 0) {
8180 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8181 ioa_cfg->hostrcb[i],
8182 ioa_cfg->hostrcb_dma[i]);
8183 }
3e7ebdfa
WB
8184 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8185 ioa_cfg->u.cfg_table,
8186 ioa_cfg->cfg_table_dma);
1da177e4
LT
8187out_free_host_rrq:
8188 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8189 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8190out_ipr_free_cmd_blocks:
8191 ipr_free_cmd_blks(ioa_cfg);
8192out_free_vpd_cbs:
8193 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8194 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8195out_free_res_entries:
8196 kfree(ioa_cfg->res_entries);
8197 goto out;
8198}
8199
8200/**
8201 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8202 * @ioa_cfg: ioa config struct
8203 *
8204 * Return value:
8205 * none
8206 **/
8207static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8208{
8209 int i;
8210
8211 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8212 ioa_cfg->bus_attr[i].bus = i;
8213 ioa_cfg->bus_attr[i].qas_enabled = 0;
8214 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8215 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8216 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8217 else
8218 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8219 }
8220}
8221
8222/**
8223 * ipr_init_ioa_cfg - Initialize IOA config struct
8224 * @ioa_cfg: ioa config struct
8225 * @host: scsi host struct
8226 * @pdev: PCI dev struct
8227 *
8228 * Return value:
8229 * none
8230 **/
8231static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8232 struct Scsi_Host *host, struct pci_dev *pdev)
8233{
8234 const struct ipr_interrupt_offsets *p;
8235 struct ipr_interrupts *t;
8236 void __iomem *base;
8237
8238 ioa_cfg->host = host;
8239 ioa_cfg->pdev = pdev;
8240 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8241 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8242 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8243 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8244 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8245 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8246 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8247 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8248 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8249 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8250
8251 INIT_LIST_HEAD(&ioa_cfg->free_q);
8252 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8253 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8254 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8255 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8256 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8257 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8258 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8259 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8260 ioa_cfg->sdt_state = INACTIVE;
8261
8262 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8263 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8264
3e7ebdfa
WB
8265 if (ioa_cfg->sis64) {
8266 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8267 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8268 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8269 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8270 } else {
8271 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8272 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8273 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8274 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8275 }
1da177e4
LT
8276 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8277 host->unique_id = host->host_no;
8278 host->max_cmd_len = IPR_MAX_CDB_LEN;
8279 pci_set_drvdata(pdev, ioa_cfg);
8280
8281 p = &ioa_cfg->chip_cfg->regs;
8282 t = &ioa_cfg->regs;
8283 base = ioa_cfg->hdw_dma_regs;
8284
8285 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8286 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8287 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8288 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8289 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8290 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8291 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8292 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8293 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8294 t->ioarrin_reg = base + p->ioarrin_reg;
8295 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8296 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8297 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8298 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8299 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8300 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8301
8302 if (ioa_cfg->sis64) {
214777ba 8303 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8304 t->dump_addr_reg = base + p->dump_addr_reg;
8305 t->dump_data_reg = base + p->dump_data_reg;
8306 }
1da177e4
LT
8307}
8308
8309/**
1be7bd82 8310 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8311 * @dev_id: PCI device id struct
8312 *
8313 * Return value:
1be7bd82 8314 * ptr to chip information on success / NULL on failure
1da177e4 8315 **/
1be7bd82
WB
8316static const struct ipr_chip_t * __devinit
8317ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8318{
8319 int i;
8320
1da177e4
LT
8321 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8322 if (ipr_chip[i].vendor == dev_id->vendor &&
8323 ipr_chip[i].device == dev_id->device)
1be7bd82 8324 return &ipr_chip[i];
1da177e4
LT
8325 return NULL;
8326}
8327
95fecd90
WB
8328/**
8329 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8330 * @pdev: PCI device struct
8331 *
8332 * Description: Simply set the msi_received flag to 1 indicating that
8333 * Message Signaled Interrupts are supported.
8334 *
8335 * Return value:
8336 * 0 on success / non-zero on failure
8337 **/
8338static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8339{
8340 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8341 unsigned long lock_flags = 0;
8342 irqreturn_t rc = IRQ_HANDLED;
8343
8344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8345
8346 ioa_cfg->msi_received = 1;
8347 wake_up(&ioa_cfg->msi_wait_q);
8348
8349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8350 return rc;
8351}
8352
8353/**
8354 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8355 * @pdev: PCI device struct
8356 *
8357 * Description: The return value from pci_enable_msi() can not always be
8358 * trusted. This routine sets up and initiates a test interrupt to determine
8359 * if the interrupt is received via the ipr_test_intr() service routine.
8360 * If the tests fails, the driver will fall back to LSI.
8361 *
8362 * Return value:
8363 * 0 on success / non-zero on failure
8364 **/
8365static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8366 struct pci_dev *pdev)
8367{
8368 int rc;
8369 volatile u32 int_reg;
8370 unsigned long lock_flags = 0;
8371
8372 ENTER;
8373
8374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8375 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8376 ioa_cfg->msi_received = 0;
8377 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8378 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8379 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8381
8382 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8383 if (rc) {
8384 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8385 return rc;
8386 } else if (ipr_debug)
8387 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8388
214777ba 8389 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8390 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8391 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8393
8394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8395 if (!ioa_cfg->msi_received) {
8396 /* MSI test failed */
8397 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8398 rc = -EOPNOTSUPP;
8399 } else if (ipr_debug)
8400 dev_info(&pdev->dev, "MSI test succeeded.\n");
8401
8402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8403
8404 free_irq(pdev->irq, ioa_cfg);
8405
8406 LEAVE;
8407
8408 return rc;
8409}
8410
1da177e4
LT
8411/**
8412 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8413 * @pdev: PCI device struct
8414 * @dev_id: PCI device id struct
8415 *
8416 * Return value:
8417 * 0 on success / non-zero on failure
8418 **/
8419static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8420 const struct pci_device_id *dev_id)
8421{
8422 struct ipr_ioa_cfg *ioa_cfg;
8423 struct Scsi_Host *host;
8424 unsigned long ipr_regs_pci;
8425 void __iomem *ipr_regs;
a2a65a3e 8426 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8427 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8428
8429 ENTER;
8430
8431 if ((rc = pci_enable_device(pdev))) {
8432 dev_err(&pdev->dev, "Cannot enable adapter\n");
8433 goto out;
8434 }
8435
8436 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8437
8438 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8439
8440 if (!host) {
8441 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8442 rc = -ENOMEM;
8443 goto out_disable;
8444 }
8445
8446 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8447 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8448 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8449 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8450
1be7bd82 8451 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8452
1be7bd82 8453 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8454 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8455 dev_id->vendor, dev_id->device);
8456 goto out_scsi_host_put;
8457 }
8458
a32c055f
WB
8459 /* set SIS 32 or SIS 64 */
8460 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82
WB
8461 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8462
5469cb5b
BK
8463 if (ipr_transop_timeout)
8464 ioa_cfg->transop_timeout = ipr_transop_timeout;
8465 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8466 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8467 else
8468 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8469
44c10138 8470 ioa_cfg->revid = pdev->revision;
463fc696 8471
1da177e4
LT
8472 ipr_regs_pci = pci_resource_start(pdev, 0);
8473
8474 rc = pci_request_regions(pdev, IPR_NAME);
8475 if (rc < 0) {
8476 dev_err(&pdev->dev,
8477 "Couldn't register memory range of registers\n");
8478 goto out_scsi_host_put;
8479 }
8480
25729a7f 8481 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8482
8483 if (!ipr_regs) {
8484 dev_err(&pdev->dev,
8485 "Couldn't map memory range of registers\n");
8486 rc = -ENOMEM;
8487 goto out_release_regions;
8488 }
8489
8490 ioa_cfg->hdw_dma_regs = ipr_regs;
8491 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8492 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8493
8494 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8495
8496 pci_set_master(pdev);
8497
a32c055f
WB
8498 if (ioa_cfg->sis64) {
8499 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8500 if (rc < 0) {
8501 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8502 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8503 }
8504
8505 } else
8506 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8507
1da177e4
LT
8508 if (rc < 0) {
8509 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8510 goto cleanup_nomem;
8511 }
8512
8513 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8514 ioa_cfg->chip_cfg->cache_line_size);
8515
8516 if (rc != PCIBIOS_SUCCESSFUL) {
8517 dev_err(&pdev->dev, "Write of cache line size failed\n");
8518 rc = -EIO;
8519 goto cleanup_nomem;
8520 }
8521
95fecd90 8522 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8523 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8524 rc = ipr_test_msi(ioa_cfg, pdev);
8525 if (rc == -EOPNOTSUPP)
8526 pci_disable_msi(pdev);
8527 else if (rc)
8528 goto out_msi_disable;
8529 else
8530 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8531 } else if (ipr_debug)
8532 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8533
1da177e4
LT
8534 /* Save away PCI config space for use following IOA reset */
8535 rc = pci_save_state(pdev);
8536
8537 if (rc != PCIBIOS_SUCCESSFUL) {
8538 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8539 rc = -EIO;
8540 goto cleanup_nomem;
8541 }
8542
8543 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8544 goto cleanup_nomem;
8545
8546 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8547 goto cleanup_nomem;
8548
3e7ebdfa
WB
8549 if (ioa_cfg->sis64)
8550 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8551 + ((sizeof(struct ipr_config_table_entry64)
8552 * ioa_cfg->max_devs_supported)));
8553 else
8554 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8555 + ((sizeof(struct ipr_config_table_entry)
8556 * ioa_cfg->max_devs_supported)));
8557
1da177e4
LT
8558 rc = ipr_alloc_mem(ioa_cfg);
8559 if (rc < 0) {
8560 dev_err(&pdev->dev,
8561 "Couldn't allocate enough memory for device driver!\n");
8562 goto cleanup_nomem;
8563 }
8564
ce155cce
BK
8565 /*
8566 * If HRRQ updated interrupt is not masked, or reset alert is set,
8567 * the card is in an unknown state and needs a hard reset
8568 */
214777ba
WB
8569 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8570 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8571 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
8572 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8573 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
8574 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8575 ioa_cfg->needs_hard_reset = 1;
8576 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8577 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8578
1da177e4 8579 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8580 rc = request_irq(pdev->irq, ipr_isr,
8581 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8582 IPR_NAME, ioa_cfg);
1da177e4
LT
8583
8584 if (rc) {
8585 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8586 pdev->irq, rc);
8587 goto cleanup_nolog;
8588 }
8589
463fc696
BK
8590 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8591 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8592 ioa_cfg->needs_warm_reset = 1;
8593 ioa_cfg->reset = ipr_reset_slot_reset;
8594 } else
8595 ioa_cfg->reset = ipr_reset_start_bist;
8596
1da177e4
LT
8597 spin_lock(&ipr_driver_lock);
8598 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8599 spin_unlock(&ipr_driver_lock);
8600
8601 LEAVE;
8602out:
8603 return rc;
8604
8605cleanup_nolog:
8606 ipr_free_mem(ioa_cfg);
8607cleanup_nomem:
8608 iounmap(ipr_regs);
95fecd90
WB
8609out_msi_disable:
8610 pci_disable_msi(pdev);
1da177e4
LT
8611out_release_regions:
8612 pci_release_regions(pdev);
8613out_scsi_host_put:
8614 scsi_host_put(host);
8615out_disable:
8616 pci_disable_device(pdev);
8617 goto out;
8618}
8619
8620/**
8621 * ipr_scan_vsets - Scans for VSET devices
8622 * @ioa_cfg: ioa config struct
8623 *
8624 * Description: Since the VSET resources do not follow SAM in that we can have
8625 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8626 *
8627 * Return value:
8628 * none
8629 **/
8630static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8631{
8632 int target, lun;
8633
8634 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8635 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8636 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8637}
8638
8639/**
8640 * ipr_initiate_ioa_bringdown - Bring down an adapter
8641 * @ioa_cfg: ioa config struct
8642 * @shutdown_type: shutdown type
8643 *
8644 * Description: This function will initiate bringing down the adapter.
8645 * This consists of issuing an IOA shutdown to the adapter
8646 * to flush the cache, and running BIST.
8647 * If the caller needs to wait on the completion of the reset,
8648 * the caller must sleep on the reset_wait_q.
8649 *
8650 * Return value:
8651 * none
8652 **/
8653static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8654 enum ipr_shutdown_type shutdown_type)
8655{
8656 ENTER;
8657 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8658 ioa_cfg->sdt_state = ABORT_DUMP;
8659 ioa_cfg->reset_retries = 0;
8660 ioa_cfg->in_ioa_bringdown = 1;
8661 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8662 LEAVE;
8663}
8664
8665/**
8666 * __ipr_remove - Remove a single adapter
8667 * @pdev: pci device struct
8668 *
8669 * Adapter hot plug remove entry point.
8670 *
8671 * Return value:
8672 * none
8673 **/
8674static void __ipr_remove(struct pci_dev *pdev)
8675{
8676 unsigned long host_lock_flags = 0;
8677 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8678 ENTER;
8679
8680 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8681 while(ioa_cfg->in_reset_reload) {
8682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8683 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8684 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8685 }
8686
1da177e4
LT
8687 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8688
8689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8690 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 8691 flush_scheduled_work();
1da177e4
LT
8692 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8693
8694 spin_lock(&ipr_driver_lock);
8695 list_del(&ioa_cfg->queue);
8696 spin_unlock(&ipr_driver_lock);
8697
8698 if (ioa_cfg->sdt_state == ABORT_DUMP)
8699 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8701
8702 ipr_free_all_resources(ioa_cfg);
8703
8704 LEAVE;
8705}
8706
8707/**
8708 * ipr_remove - IOA hot plug remove entry point
8709 * @pdev: pci device struct
8710 *
8711 * Adapter hot plug remove entry point.
8712 *
8713 * Return value:
8714 * none
8715 **/
f381642d 8716static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
8717{
8718 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8719
8720 ENTER;
8721
ee959b00 8722 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 8723 &ipr_trace_attr);
ee959b00 8724 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8725 &ipr_dump_attr);
8726 scsi_remove_host(ioa_cfg->host);
8727
8728 __ipr_remove(pdev);
8729
8730 LEAVE;
8731}
8732
8733/**
8734 * ipr_probe - Adapter hot plug add entry point
8735 *
8736 * Return value:
8737 * 0 on success / non-zero on failure
8738 **/
8739static int __devinit ipr_probe(struct pci_dev *pdev,
8740 const struct pci_device_id *dev_id)
8741{
8742 struct ipr_ioa_cfg *ioa_cfg;
8743 int rc;
8744
8745 rc = ipr_probe_ioa(pdev, dev_id);
8746
8747 if (rc)
8748 return rc;
8749
8750 ioa_cfg = pci_get_drvdata(pdev);
8751 rc = ipr_probe_ioa_part2(ioa_cfg);
8752
8753 if (rc) {
8754 __ipr_remove(pdev);
8755 return rc;
8756 }
8757
8758 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8759
8760 if (rc) {
8761 __ipr_remove(pdev);
8762 return rc;
8763 }
8764
ee959b00 8765 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8766 &ipr_trace_attr);
8767
8768 if (rc) {
8769 scsi_remove_host(ioa_cfg->host);
8770 __ipr_remove(pdev);
8771 return rc;
8772 }
8773
ee959b00 8774 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8775 &ipr_dump_attr);
8776
8777 if (rc) {
ee959b00 8778 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
8779 &ipr_trace_attr);
8780 scsi_remove_host(ioa_cfg->host);
8781 __ipr_remove(pdev);
8782 return rc;
8783 }
8784
8785 scsi_scan_host(ioa_cfg->host);
8786 ipr_scan_vsets(ioa_cfg);
8787 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8788 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 8789 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
8790 schedule_work(&ioa_cfg->work_q);
8791 return 0;
8792}
8793
8794/**
8795 * ipr_shutdown - Shutdown handler.
d18c3db5 8796 * @pdev: pci device struct
1da177e4
LT
8797 *
8798 * This function is invoked upon system shutdown/reboot. It will issue
8799 * an adapter shutdown to the adapter to flush the write cache.
8800 *
8801 * Return value:
8802 * none
8803 **/
d18c3db5 8804static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 8805{
d18c3db5 8806 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
8807 unsigned long lock_flags = 0;
8808
8809 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
8810 while(ioa_cfg->in_reset_reload) {
8811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8812 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8813 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8814 }
8815
1da177e4
LT
8816 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8818 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8819}
8820
8821static struct pci_device_id ipr_pci_table[] __devinitdata = {
8822 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8823 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 8824 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8825 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 8826 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8827 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 8828 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 8829 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 8830 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8831 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 8832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 8834 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 8835 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 8836 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
8837 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8838 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8839 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 8840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8841 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8843 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8844 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
8845 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8846 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 8847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 8848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 8849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
8850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8851 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 8852 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
8853 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8854 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 8855 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
8856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8857 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
8858 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 8860 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 8861 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 8862 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 8863 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 8864 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 8865 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 8866 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 8867 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8869 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 8870 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
8871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8872 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
8873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8875 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8877 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8878 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8879 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8880 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8881 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8882 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8883 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8884 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8885 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8887 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8888 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
1da177e4
LT
8889 { }
8890};
8891MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8892
f8a88b19
LV
8893static struct pci_error_handlers ipr_err_handler = {
8894 .error_detected = ipr_pci_error_detected,
8895 .slot_reset = ipr_pci_slot_reset,
8896};
8897
1da177e4
LT
8898static struct pci_driver ipr_driver = {
8899 .name = IPR_NAME,
8900 .id_table = ipr_pci_table,
8901 .probe = ipr_probe,
f381642d 8902 .remove = __devexit_p(ipr_remove),
d18c3db5 8903 .shutdown = ipr_shutdown,
f8a88b19 8904 .err_handler = &ipr_err_handler,
1da177e4
LT
8905};
8906
f72919ec
WB
8907/**
8908 * ipr_halt_done - Shutdown prepare completion
8909 *
8910 * Return value:
8911 * none
8912 **/
8913static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8914{
8915 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8916
8917 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8918}
8919
8920/**
8921 * ipr_halt - Issue shutdown prepare to all adapters
8922 *
8923 * Return value:
8924 * NOTIFY_OK on success / NOTIFY_DONE on failure
8925 **/
8926static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8927{
8928 struct ipr_cmnd *ipr_cmd;
8929 struct ipr_ioa_cfg *ioa_cfg;
8930 unsigned long flags = 0;
8931
8932 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8933 return NOTIFY_DONE;
8934
8935 spin_lock(&ipr_driver_lock);
8936
8937 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8938 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8939 if (!ioa_cfg->allow_cmds) {
8940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8941 continue;
8942 }
8943
8944 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8945 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8946 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8947 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8948 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8949
8950 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8952 }
8953 spin_unlock(&ipr_driver_lock);
8954
8955 return NOTIFY_OK;
8956}
8957
8958static struct notifier_block ipr_notifier = {
8959 ipr_halt, NULL, 0
8960};
8961
1da177e4
LT
8962/**
8963 * ipr_init - Module entry point
8964 *
8965 * Return value:
8966 * 0 on success / negative value on failure
8967 **/
8968static int __init ipr_init(void)
8969{
8970 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8971 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8972
f72919ec 8973 register_reboot_notifier(&ipr_notifier);
dcbccbde 8974 return pci_register_driver(&ipr_driver);
1da177e4
LT
8975}
8976
8977/**
8978 * ipr_exit - Module unload
8979 *
8980 * Module unload entry point.
8981 *
8982 * Return value:
8983 * none
8984 **/
8985static void __exit ipr_exit(void)
8986{
f72919ec 8987 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
8988 pci_unregister_driver(&ipr_driver);
8989}
8990
8991module_init(ipr_init);
8992module_exit(ipr_exit);