]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] zfcp: IO stall after deleting and path checker changes after reenabling zfcp...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
1da177e4
LT
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
1da177e4
LT
82#include "ipr.h"
83
84/*
85 * Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
5469cb5b 92static unsigned int ipr_transop_timeout = 0;
62275040 93static unsigned int ipr_enable_cache = 1;
d3c74871 94static unsigned int ipr_debug = 0;
ac09c349 95static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
103 {
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
113 }
114 },
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
118 {
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
128 }
129 },
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436
BK
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
60e7486b 137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
1da177e4
LT
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871
BK
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
162module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
163MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
1da177e4
LT
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
1da177e4
LT
167/* A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
933916f3 170 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
171 "8155: An unknown error was received"},
172 {0x00330000, 0, 0,
173 "Soft underlength error"},
174 {0x005A0000, 0, 0,
175 "Command to be cancelled not found"},
176 {0x00808000, 0, 0,
177 "Qualified success"},
933916f3 178 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 179 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 180 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 181 "4101: Soft device bus fabric error"},
933916f3 182 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 183 "FFF9: Device sector reassign successful"},
933916f3 184 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 185 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 186 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 187 "7001: IOA sector reassignment successful"},
933916f3 188 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 189 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 190 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 191 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 192 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 193 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 194 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 195 "FFF6: Device hardware error recovered by the IOA"},
933916f3 196 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 197 "FFF6: Device hardware error recovered by the device"},
933916f3 198 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 199 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 200 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 201 "FFFA: Undefined device response recovered by the IOA"},
933916f3 202 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 203 "FFF6: Device bus error, message or command phase"},
933916f3 204 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 205 "FFFE: Task Management Function failed"},
933916f3 206 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 207 "FFF6: Failure prediction threshold exceeded"},
933916f3 208 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
209 "8009: Impending cache battery pack failure"},
210 {0x02040400, 0, 0,
211 "34FF: Disk device format in progress"},
65f56475
BK
212 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
213 "9070: IOA requested reset"},
1da177e4
LT
214 {0x023F0000, 0, 0,
215 "Synchronization required"},
216 {0x024E0000, 0, 0,
217 "No ready, IOA shutdown"},
218 {0x025A0000, 0, 0,
219 "Not ready, IOA has been shutdown"},
933916f3 220 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
221 "3020: Storage subsystem configuration error"},
222 {0x03110B00, 0, 0,
223 "FFF5: Medium error, data unreadable, recommend reassign"},
224 {0x03110C00, 0, 0,
225 "7000: Medium error, data unreadable, do not reassign"},
933916f3 226 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 227 "FFF3: Disk media format bad"},
933916f3 228 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 229 "3002: Addressed device failed to respond to selection"},
933916f3 230 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 231 "3100: Device bus error"},
933916f3 232 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
233 "3109: IOA timed out a device command"},
234 {0x04088000, 0, 0,
235 "3120: SCSI bus is not operational"},
933916f3 236 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 237 "4100: Hard device bus fabric error"},
933916f3 238 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 239 "9000: IOA reserved area data check"},
933916f3 240 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 241 "9001: IOA reserved area invalid data pattern"},
933916f3 242 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 243 "9002: IOA reserved area LRC error"},
933916f3 244 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 245 "102E: Out of alternate sectors for disk storage"},
933916f3 246 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 247 "FFF4: Data transfer underlength error"},
933916f3 248 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 249 "FFF4: Data transfer overlength error"},
933916f3 250 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 251 "3400: Logical unit failure"},
933916f3 252 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 253 "FFF4: Device microcode is corrupt"},
933916f3 254 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
255 "8150: PCI bus error"},
256 {0x04430000, 1, 0,
257 "Unsupported device bus message received"},
933916f3 258 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF4: Disk device problem"},
933916f3 260 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "8150: Permanent IOA failure"},
933916f3 262 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "3010: Disk device returned wrong response to IOA"},
933916f3 264 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
265 "8151: IOA microcode error"},
266 {0x04448500, 0, 0,
267 "Device bus status error"},
933916f3 268 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
270 {0x04448700, 0, 0,
271 "ATA device status error"},
1da177e4
LT
272 {0x04490000, 0, 0,
273 "Message reject received from the device"},
933916f3 274 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "8008: A permanent cache battery pack failure occurred"},
933916f3 276 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "9090: Disk unit has been modified after the last known status"},
933916f3 278 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 279 "9081: IOA detected device error"},
933916f3 280 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "9082: IOA detected device error"},
933916f3 282 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 283 "3110: Device bus error, message or command phase"},
933916f3 284 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 285 "3110: SAS Command / Task Management Function failed"},
933916f3 286 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 287 "9091: Incorrect hardware configuration change has been detected"},
933916f3 288 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 289 "9073: Invalid multi-adapter configuration"},
933916f3 290 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 291 "4010: Incorrect connection between cascaded expanders"},
933916f3 292 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 293 "4020: Connections exceed IOA design limits"},
933916f3 294 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 295 "4030: Incorrect multipath connection"},
933916f3 296 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 297 "4110: Unsupported enclosure function"},
933916f3 298 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
299 "FFF4: Command to logical unit failed"},
300 {0x05240000, 1, 0,
301 "Illegal request, invalid request type or request packet"},
302 {0x05250000, 0, 0,
303 "Illegal request, invalid resource handle"},
b0df54bb
BK
304 {0x05258000, 0, 0,
305 "Illegal request, commands not allowed to this device"},
306 {0x05258100, 0, 0,
307 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
308 {0x05260000, 0, 0,
309 "Illegal request, invalid field in parameter list"},
310 {0x05260100, 0, 0,
311 "Illegal request, parameter not supported"},
312 {0x05260200, 0, 0,
313 "Illegal request, parameter value invalid"},
314 {0x052C0000, 0, 0,
315 "Illegal request, command sequence error"},
b0df54bb
BK
316 {0x052C8000, 1, 0,
317 "Illegal request, dual adapter support not enabled"},
933916f3 318 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 319 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 320 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 321 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 322 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 323 "3140: Device bus not ready to ready transition"},
933916f3 324 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
325 "FFFB: SCSI bus was reset"},
326 {0x06290500, 0, 0,
327 "FFFE: SCSI bus transition to single ended"},
328 {0x06290600, 0, 0,
329 "FFFE: SCSI bus transition to LVD"},
933916f3 330 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 331 "FFFB: SCSI bus was reset by another initiator"},
933916f3 332 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 333 "3029: A device replacement has occurred"},
933916f3 334 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "9051: IOA cache data exists for a missing or failed device"},
933916f3 336 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 337 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 338 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "9025: Disk unit is not supported at its physical location"},
933916f3 340 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "3020: IOA detected a SCSI bus configuration error"},
933916f3 342 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "3150: SCSI bus configuration error"},
933916f3 344 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 345 "9074: Asymmetric advanced function disk configuration"},
933916f3 346 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 347 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 348 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 349 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 350 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 351 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 352 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 353 "9076: Configuration error, missing remote IOA"},
933916f3 354 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 355 "4050: Enclosure does not support a required multipath function"},
933916f3 356 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 357 "9041: Array protection temporarily suspended"},
933916f3 358 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "9042: Corrupt array parity detected on specified device"},
933916f3 360 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 361 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 362 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 363 "9071: Link operational transition"},
933916f3 364 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 365 "9072: Link not operational transition"},
933916f3 366 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "9032: Array exposed but still protected"},
e435340c
BK
368 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
369 "70DD: Device forced failed by disrupt device command"},
933916f3 370 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 371 "4061: Multipath redundancy level got better"},
933916f3 372 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 373 "4060: Multipath redundancy level got worse"},
1da177e4
LT
374 {0x07270000, 0, 0,
375 "Failure due to other device"},
933916f3 376 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 377 "9008: IOA does not support functions expected by devices"},
933916f3 378 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 379 "9010: Cache data associated with attached devices cannot be found"},
933916f3 380 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 381 "9011: Cache data belongs to devices other than those attached"},
933916f3 382 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 383 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 384 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 385 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 386 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 387 "9022: Exposed array is missing a required device"},
933916f3 388 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 389 "9023: Array member(s) not at required physical locations"},
933916f3 390 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 391 "9024: Array not functional due to present hardware configuration"},
933916f3 392 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 393 "9026: Array not functional due to present hardware configuration"},
933916f3 394 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 395 "9027: Array is missing a device and parity is out of sync"},
933916f3 396 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 397 "9028: Maximum number of arrays already exist"},
933916f3 398 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 399 "9050: Required cache data cannot be located for a disk unit"},
933916f3 400 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 401 "9052: Cache data exists for a device that has been modified"},
933916f3 402 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 403 "9054: IOA resources not available due to previous problems"},
933916f3 404 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 405 "9092: Disk unit requires initialization before use"},
933916f3 406 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 407 "9029: Incorrect hardware configuration change has been detected"},
933916f3 408 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 409 "9060: One or more disk pairs are missing from an array"},
933916f3 410 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 411 "9061: One or more disks are missing from an array"},
933916f3 412 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 413 "9062: One or more disks are missing from an array"},
933916f3 414 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
415 "9063: Maximum number of functional arrays has been exceeded"},
416 {0x0B260000, 0, 0,
417 "Aborted command, invalid descriptor"},
418 {0x0B5A0000, 0, 0,
419 "Command terminated by host"}
420};
421
422static const struct ipr_ses_table_entry ipr_ses_table[] = {
423 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
424 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
425 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
426 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
427 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
428 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
429 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
430 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
431 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
432 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
433 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
434 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
435 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
436};
437
438/*
439 * Function Prototypes
440 */
441static int ipr_reset_alert(struct ipr_cmnd *);
442static void ipr_process_ccn(struct ipr_cmnd *);
443static void ipr_process_error(struct ipr_cmnd *);
444static void ipr_reset_ioa_job(struct ipr_cmnd *);
445static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
446 enum ipr_shutdown_type);
447
448#ifdef CONFIG_SCSI_IPR_TRACE
449/**
450 * ipr_trc_hook - Add a trace entry to the driver trace
451 * @ipr_cmd: ipr command struct
452 * @type: trace type
453 * @add_data: additional data
454 *
455 * Return value:
456 * none
457 **/
458static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
459 u8 type, u32 add_data)
460{
461 struct ipr_trace_entry *trace_entry;
462 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463
464 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
465 trace_entry->time = jiffies;
466 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
467 trace_entry->type = type;
35a39691
BK
468 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
469 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
470 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
471 trace_entry->u.add_data = add_data;
472}
473#else
474#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
475#endif
476
477/**
478 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
479 * @ipr_cmd: ipr command struct
480 *
481 * Return value:
482 * none
483 **/
484static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
485{
486 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
487 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
51b1c7e1 488 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
489
490 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
491 ioarcb->write_data_transfer_length = 0;
492 ioarcb->read_data_transfer_length = 0;
493 ioarcb->write_ioadl_len = 0;
494 ioarcb->read_ioadl_len = 0;
51b1c7e1
BK
495 ioarcb->write_ioadl_addr =
496 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
497 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
498 ioasa->ioasc = 0;
499 ioasa->residual_data_len = 0;
35a39691 500 ioasa->u.gata.status = 0;
1da177e4
LT
501
502 ipr_cmd->scsi_cmd = NULL;
35a39691 503 ipr_cmd->qc = NULL;
1da177e4
LT
504 ipr_cmd->sense_buffer[0] = 0;
505 ipr_cmd->dma_use_sg = 0;
506}
507
508/**
509 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
510 * @ipr_cmd: ipr command struct
511 *
512 * Return value:
513 * none
514 **/
515static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
516{
517 ipr_reinit_ipr_cmnd(ipr_cmd);
518 ipr_cmd->u.scratch = 0;
519 ipr_cmd->sibling = NULL;
520 init_timer(&ipr_cmd->timer);
521}
522
523/**
524 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
525 * @ioa_cfg: ioa config struct
526 *
527 * Return value:
528 * pointer to ipr command struct
529 **/
530static
531struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
532{
533 struct ipr_cmnd *ipr_cmd;
534
535 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
536 list_del(&ipr_cmd->queue);
537 ipr_init_ipr_cmnd(ipr_cmd);
538
539 return ipr_cmd;
540}
541
542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear
572 *
573 * This function masks all interrupts on the adapter, then clears the
574 * interrupts specified in the mask
575 *
576 * Return value:
577 * none
578 **/
579static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
580 u32 clr_ints)
581{
582 volatile u32 int_reg;
583
584 /* Stop new interrupts */
585 ioa_cfg->allow_interrupts = 0;
586
587 /* Set interrupt mask to stop all new interrupts */
588 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
589
590 /* Clear any pending interrupts */
591 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
592 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
593}
594
595/**
596 * ipr_save_pcix_cmd_reg - Save PCI-X command register
597 * @ioa_cfg: ioa config struct
598 *
599 * Return value:
600 * 0 on success / -EIO on failure
601 **/
602static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
603{
604 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
605
7dce0e1c
BK
606 if (pcix_cmd_reg == 0)
607 return 0;
1da177e4
LT
608
609 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
610 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
611 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
612 return -EIO;
613 }
614
615 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
616 return 0;
617}
618
619/**
620 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
621 * @ioa_cfg: ioa config struct
622 *
623 * Return value:
624 * 0 on success / -EIO on failure
625 **/
626static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
627{
628 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
629
630 if (pcix_cmd_reg) {
631 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
632 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
633 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
634 return -EIO;
635 }
1da177e4
LT
636 }
637
638 return 0;
639}
640
35a39691
BK
641/**
642 * ipr_sata_eh_done - done function for aborted SATA commands
643 * @ipr_cmd: ipr command struct
644 *
645 * This function is invoked for ops generated to SATA
646 * devices which are being aborted.
647 *
648 * Return value:
649 * none
650 **/
651static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
652{
653 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
654 struct ata_queued_cmd *qc = ipr_cmd->qc;
655 struct ipr_sata_port *sata_port = qc->ap->private_data;
656
657 qc->err_mask |= AC_ERR_OTHER;
658 sata_port->ioasa.status |= ATA_BUSY;
659 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
660 ata_qc_complete(qc);
661}
662
1da177e4
LT
663/**
664 * ipr_scsi_eh_done - mid-layer done function for aborted ops
665 * @ipr_cmd: ipr command struct
666 *
667 * This function is invoked by the interrupt handler for
668 * ops generated by the SCSI mid-layer which are being aborted.
669 *
670 * Return value:
671 * none
672 **/
673static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
674{
675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
676 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
677
678 scsi_cmd->result |= (DID_ERROR << 16);
679
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
681 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683}
684
685/**
686 * ipr_fail_all_ops - Fails all outstanding ops.
687 * @ioa_cfg: ioa config struct
688 *
689 * This function fails all outstanding ops.
690 *
691 * Return value:
692 * none
693 **/
694static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
695{
696 struct ipr_cmnd *ipr_cmd, *temp;
697
698 ENTER;
699 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
700 list_del(&ipr_cmd->queue);
701
702 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
703 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
704
705 if (ipr_cmd->scsi_cmd)
706 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
707 else if (ipr_cmd->qc)
708 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
709
710 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
711 del_timer(&ipr_cmd->timer);
712 ipr_cmd->done(ipr_cmd);
713 }
714
715 LEAVE;
716}
717
718/**
719 * ipr_do_req - Send driver initiated requests.
720 * @ipr_cmd: ipr command struct
721 * @done: done function
722 * @timeout_func: timeout function
723 * @timeout: timeout value
724 *
725 * This function sends the specified command to the adapter with the
726 * timeout given. The done function is invoked on command completion.
727 *
728 * Return value:
729 * none
730 **/
731static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
732 void (*done) (struct ipr_cmnd *),
733 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
734{
735 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
736
737 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
738
739 ipr_cmd->done = done;
740
741 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
742 ipr_cmd->timer.expires = jiffies + timeout;
743 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
744
745 add_timer(&ipr_cmd->timer);
746
747 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
748
749 mb();
750 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
751 ioa_cfg->regs.ioarrin_reg);
752}
753
754/**
755 * ipr_internal_cmd_done - Op done function for an internally generated op.
756 * @ipr_cmd: ipr command struct
757 *
758 * This function is the op done function for an internally generated,
759 * blocking op. It simply wakes the sleeping thread.
760 *
761 * Return value:
762 * none
763 **/
764static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
765{
766 if (ipr_cmd->sibling)
767 ipr_cmd->sibling = NULL;
768 else
769 complete(&ipr_cmd->completion);
770}
771
772/**
773 * ipr_send_blocking_cmd - Send command and sleep on its completion.
774 * @ipr_cmd: ipr command struct
775 * @timeout_func: function to invoke if command times out
776 * @timeout: timeout
777 *
778 * Return value:
779 * none
780 **/
781static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
782 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
783 u32 timeout)
784{
785 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
786
787 init_completion(&ipr_cmd->completion);
788 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
789
790 spin_unlock_irq(ioa_cfg->host->host_lock);
791 wait_for_completion(&ipr_cmd->completion);
792 spin_lock_irq(ioa_cfg->host->host_lock);
793}
794
795/**
796 * ipr_send_hcam - Send an HCAM to the adapter.
797 * @ioa_cfg: ioa config struct
798 * @type: HCAM type
799 * @hostrcb: hostrcb struct
800 *
801 * This function will send a Host Controlled Async command to the adapter.
802 * If HCAMs are currently not allowed to be issued to the adapter, it will
803 * place the hostrcb on the free queue.
804 *
805 * Return value:
806 * none
807 **/
808static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
809 struct ipr_hostrcb *hostrcb)
810{
811 struct ipr_cmnd *ipr_cmd;
812 struct ipr_ioarcb *ioarcb;
813
814 if (ioa_cfg->allow_cmds) {
815 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
816 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
817 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
818
819 ipr_cmd->u.hostrcb = hostrcb;
820 ioarcb = &ipr_cmd->ioarcb;
821
822 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
823 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
824 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
825 ioarcb->cmd_pkt.cdb[1] = type;
826 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
827 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
828
829 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
830 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
831 ipr_cmd->ioadl[0].flags_and_data_len =
832 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
833 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
834
835 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
836 ipr_cmd->done = ipr_process_ccn;
837 else
838 ipr_cmd->done = ipr_process_error;
839
840 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
841
842 mb();
843 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
844 ioa_cfg->regs.ioarrin_reg);
845 } else {
846 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
847 }
848}
849
850/**
851 * ipr_init_res_entry - Initialize a resource entry struct.
852 * @res: resource entry struct
853 *
854 * Return value:
855 * none
856 **/
857static void ipr_init_res_entry(struct ipr_resource_entry *res)
858{
ee0a90fa 859 res->needs_sync_complete = 0;
1da177e4
LT
860 res->in_erp = 0;
861 res->add_to_ml = 0;
862 res->del_from_ml = 0;
863 res->resetting_device = 0;
864 res->sdev = NULL;
35a39691 865 res->sata_port = NULL;
1da177e4
LT
866}
867
868/**
869 * ipr_handle_config_change - Handle a config change from the adapter
870 * @ioa_cfg: ioa config struct
871 * @hostrcb: hostrcb
872 *
873 * Return value:
874 * none
875 **/
876static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
877 struct ipr_hostrcb *hostrcb)
878{
879 struct ipr_resource_entry *res = NULL;
880 struct ipr_config_table_entry *cfgte;
881 u32 is_ndn = 1;
882
883 cfgte = &hostrcb->hcam.u.ccn.cfgte;
884
885 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
886 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
887 sizeof(cfgte->res_addr))) {
888 is_ndn = 0;
889 break;
890 }
891 }
892
893 if (is_ndn) {
894 if (list_empty(&ioa_cfg->free_res_q)) {
895 ipr_send_hcam(ioa_cfg,
896 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
897 hostrcb);
898 return;
899 }
900
901 res = list_entry(ioa_cfg->free_res_q.next,
902 struct ipr_resource_entry, queue);
903
904 list_del(&res->queue);
905 ipr_init_res_entry(res);
906 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
907 }
908
909 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
910
911 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
912 if (res->sdev) {
1da177e4 913 res->del_from_ml = 1;
1121b794 914 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
915 if (ioa_cfg->allow_ml_add_del)
916 schedule_work(&ioa_cfg->work_q);
917 } else
918 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
919 } else if (!res->sdev) {
920 res->add_to_ml = 1;
921 if (ioa_cfg->allow_ml_add_del)
922 schedule_work(&ioa_cfg->work_q);
923 }
924
925 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
926}
927
928/**
929 * ipr_process_ccn - Op done function for a CCN.
930 * @ipr_cmd: ipr command struct
931 *
932 * This function is the op done function for a configuration
933 * change notification host controlled async from the adapter.
934 *
935 * Return value:
936 * none
937 **/
938static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
939{
940 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
941 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
942 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
943
944 list_del(&hostrcb->queue);
945 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
946
947 if (ioasc) {
948 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
949 dev_err(&ioa_cfg->pdev->dev,
950 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
951
952 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
953 } else {
954 ipr_handle_config_change(ioa_cfg, hostrcb);
955 }
956}
957
8cf093e2
BK
958/**
959 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
960 * @i: index into buffer
961 * @buf: string to modify
962 *
963 * This function will strip all trailing whitespace, pad the end
964 * of the string with a single space, and NULL terminate the string.
965 *
966 * Return value:
967 * new length of string
968 **/
969static int strip_and_pad_whitespace(int i, char *buf)
970{
971 while (i && buf[i] == ' ')
972 i--;
973 buf[i+1] = ' ';
974 buf[i+2] = '\0';
975 return i + 2;
976}
977
978/**
979 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
980 * @prefix: string to print at start of printk
981 * @hostrcb: hostrcb pointer
982 * @vpd: vendor/product id/sn struct
983 *
984 * Return value:
985 * none
986 **/
987static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
988 struct ipr_vpd *vpd)
989{
990 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
991 int i = 0;
992
993 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
994 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
995
996 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
997 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
998
999 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1000 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1001
1002 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1003}
1004
1da177e4
LT
1005/**
1006 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1007 * @vpd: vendor/product id/sn struct
1da177e4
LT
1008 *
1009 * Return value:
1010 * none
1011 **/
cfc32139 1012static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1013{
1014 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1015 + IPR_SERIAL_NUM_LEN];
1016
cfc32139
BK
1017 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1018 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1019 IPR_PROD_ID_LEN);
1020 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1021 ipr_err("Vendor/Product ID: %s\n", buffer);
1022
cfc32139 1023 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1024 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1025 ipr_err(" Serial Number: %s\n", buffer);
1026}
1027
8cf093e2
BK
1028/**
1029 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1030 * @prefix: string to print at start of printk
1031 * @hostrcb: hostrcb pointer
1032 * @vpd: vendor/product id/sn/wwn struct
1033 *
1034 * Return value:
1035 * none
1036 **/
1037static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1038 struct ipr_ext_vpd *vpd)
1039{
1040 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1041 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1042 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1043}
1044
ee0f05b8
BK
1045/**
1046 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1047 * @vpd: vendor/product id/sn/wwn struct
1048 *
1049 * Return value:
1050 * none
1051 **/
1052static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1053{
1054 ipr_log_vpd(&vpd->vpd);
1055 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1056 be32_to_cpu(vpd->wwid[1]));
1057}
1058
1059/**
1060 * ipr_log_enhanced_cache_error - Log a cache error.
1061 * @ioa_cfg: ioa config struct
1062 * @hostrcb: hostrcb struct
1063 *
1064 * Return value:
1065 * none
1066 **/
1067static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1068 struct ipr_hostrcb *hostrcb)
1069{
1070 struct ipr_hostrcb_type_12_error *error =
1071 &hostrcb->hcam.u.error.u.type_12_error;
1072
1073 ipr_err("-----Current Configuration-----\n");
1074 ipr_err("Cache Directory Card Information:\n");
1075 ipr_log_ext_vpd(&error->ioa_vpd);
1076 ipr_err("Adapter Card Information:\n");
1077 ipr_log_ext_vpd(&error->cfc_vpd);
1078
1079 ipr_err("-----Expected Configuration-----\n");
1080 ipr_err("Cache Directory Card Information:\n");
1081 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1082 ipr_err("Adapter Card Information:\n");
1083 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1084
1085 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1086 be32_to_cpu(error->ioa_data[0]),
1087 be32_to_cpu(error->ioa_data[1]),
1088 be32_to_cpu(error->ioa_data[2]));
1089}
1090
1da177e4
LT
1091/**
1092 * ipr_log_cache_error - Log a cache error.
1093 * @ioa_cfg: ioa config struct
1094 * @hostrcb: hostrcb struct
1095 *
1096 * Return value:
1097 * none
1098 **/
1099static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1100 struct ipr_hostrcb *hostrcb)
1101{
1102 struct ipr_hostrcb_type_02_error *error =
1103 &hostrcb->hcam.u.error.u.type_02_error;
1104
1105 ipr_err("-----Current Configuration-----\n");
1106 ipr_err("Cache Directory Card Information:\n");
cfc32139 1107 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1108 ipr_err("Adapter Card Information:\n");
cfc32139 1109 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1110
1111 ipr_err("-----Expected Configuration-----\n");
1112 ipr_err("Cache Directory Card Information:\n");
cfc32139 1113 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1114 ipr_err("Adapter Card Information:\n");
cfc32139 1115 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1116
1117 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1118 be32_to_cpu(error->ioa_data[0]),
1119 be32_to_cpu(error->ioa_data[1]),
1120 be32_to_cpu(error->ioa_data[2]));
1121}
1122
ee0f05b8
BK
1123/**
1124 * ipr_log_enhanced_config_error - Log a configuration error.
1125 * @ioa_cfg: ioa config struct
1126 * @hostrcb: hostrcb struct
1127 *
1128 * Return value:
1129 * none
1130 **/
1131static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1132 struct ipr_hostrcb *hostrcb)
1133{
1134 int errors_logged, i;
1135 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1136 struct ipr_hostrcb_type_13_error *error;
1137
1138 error = &hostrcb->hcam.u.error.u.type_13_error;
1139 errors_logged = be32_to_cpu(error->errors_logged);
1140
1141 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1142 be32_to_cpu(error->errors_detected), errors_logged);
1143
1144 dev_entry = error->dev;
1145
1146 for (i = 0; i < errors_logged; i++, dev_entry++) {
1147 ipr_err_separator;
1148
1149 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1150 ipr_log_ext_vpd(&dev_entry->vpd);
1151
1152 ipr_err("-----New Device Information-----\n");
1153 ipr_log_ext_vpd(&dev_entry->new_vpd);
1154
1155 ipr_err("Cache Directory Card Information:\n");
1156 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1157
1158 ipr_err("Adapter Card Information:\n");
1159 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1160 }
1161}
1162
1da177e4
LT
1163/**
1164 * ipr_log_config_error - Log a configuration error.
1165 * @ioa_cfg: ioa config struct
1166 * @hostrcb: hostrcb struct
1167 *
1168 * Return value:
1169 * none
1170 **/
1171static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1172 struct ipr_hostrcb *hostrcb)
1173{
1174 int errors_logged, i;
1175 struct ipr_hostrcb_device_data_entry *dev_entry;
1176 struct ipr_hostrcb_type_03_error *error;
1177
1178 error = &hostrcb->hcam.u.error.u.type_03_error;
1179 errors_logged = be32_to_cpu(error->errors_logged);
1180
1181 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1182 be32_to_cpu(error->errors_detected), errors_logged);
1183
cfc32139 1184 dev_entry = error->dev;
1da177e4
LT
1185
1186 for (i = 0; i < errors_logged; i++, dev_entry++) {
1187 ipr_err_separator;
1188
fa15b1f6 1189 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1190 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1191
1192 ipr_err("-----New Device Information-----\n");
cfc32139 1193 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1194
1195 ipr_err("Cache Directory Card Information:\n");
cfc32139 1196 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1197
1198 ipr_err("Adapter Card Information:\n");
cfc32139 1199 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1200
1201 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1202 be32_to_cpu(dev_entry->ioa_data[0]),
1203 be32_to_cpu(dev_entry->ioa_data[1]),
1204 be32_to_cpu(dev_entry->ioa_data[2]),
1205 be32_to_cpu(dev_entry->ioa_data[3]),
1206 be32_to_cpu(dev_entry->ioa_data[4]));
1207 }
1208}
1209
ee0f05b8
BK
1210/**
1211 * ipr_log_enhanced_array_error - Log an array configuration error.
1212 * @ioa_cfg: ioa config struct
1213 * @hostrcb: hostrcb struct
1214 *
1215 * Return value:
1216 * none
1217 **/
1218static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1219 struct ipr_hostrcb *hostrcb)
1220{
1221 int i, num_entries;
1222 struct ipr_hostrcb_type_14_error *error;
1223 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1224 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1225
1226 error = &hostrcb->hcam.u.error.u.type_14_error;
1227
1228 ipr_err_separator;
1229
1230 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1231 error->protection_level,
1232 ioa_cfg->host->host_no,
1233 error->last_func_vset_res_addr.bus,
1234 error->last_func_vset_res_addr.target,
1235 error->last_func_vset_res_addr.lun);
1236
1237 ipr_err_separator;
1238
1239 array_entry = error->array_member;
1240 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1241 sizeof(error->array_member));
1242
1243 for (i = 0; i < num_entries; i++, array_entry++) {
1244 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1245 continue;
1246
1247 if (be32_to_cpu(error->exposed_mode_adn) == i)
1248 ipr_err("Exposed Array Member %d:\n", i);
1249 else
1250 ipr_err("Array Member %d:\n", i);
1251
1252 ipr_log_ext_vpd(&array_entry->vpd);
1253 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1254 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1255 "Expected Location");
1256
1257 ipr_err_separator;
1258 }
1259}
1260
1da177e4
LT
1261/**
1262 * ipr_log_array_error - Log an array configuration error.
1263 * @ioa_cfg: ioa config struct
1264 * @hostrcb: hostrcb struct
1265 *
1266 * Return value:
1267 * none
1268 **/
1269static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1270 struct ipr_hostrcb *hostrcb)
1271{
1272 int i;
1273 struct ipr_hostrcb_type_04_error *error;
1274 struct ipr_hostrcb_array_data_entry *array_entry;
1275 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1276
1277 error = &hostrcb->hcam.u.error.u.type_04_error;
1278
1279 ipr_err_separator;
1280
1281 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1282 error->protection_level,
1283 ioa_cfg->host->host_no,
1284 error->last_func_vset_res_addr.bus,
1285 error->last_func_vset_res_addr.target,
1286 error->last_func_vset_res_addr.lun);
1287
1288 ipr_err_separator;
1289
1290 array_entry = error->array_member;
1291
1292 for (i = 0; i < 18; i++) {
cfc32139 1293 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1294 continue;
1295
fa15b1f6 1296 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1297 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1298 else
1da177e4 1299 ipr_err("Array Member %d:\n", i);
1da177e4 1300
cfc32139 1301 ipr_log_vpd(&array_entry->vpd);
1da177e4 1302
fa15b1f6
BK
1303 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1304 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1305 "Expected Location");
1da177e4
LT
1306
1307 ipr_err_separator;
1308
1309 if (i == 9)
1310 array_entry = error->array_member2;
1311 else
1312 array_entry++;
1313 }
1314}
1315
1316/**
b0df54bb 1317 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1318 * @ioa_cfg: ioa config struct
b0df54bb
BK
1319 * @data: IOA error data
1320 * @len: data length
1da177e4
LT
1321 *
1322 * Return value:
1323 * none
1324 **/
ac719aba 1325static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1326{
1327 int i;
1da177e4 1328
b0df54bb 1329 if (len == 0)
1da177e4
LT
1330 return;
1331
ac719aba
BK
1332 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1333 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1334
b0df54bb 1335 for (i = 0; i < len / 4; i += 4) {
1da177e4 1336 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1337 be32_to_cpu(data[i]),
1338 be32_to_cpu(data[i+1]),
1339 be32_to_cpu(data[i+2]),
1340 be32_to_cpu(data[i+3]));
1da177e4
LT
1341 }
1342}
1343
ee0f05b8
BK
1344/**
1345 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1346 * @ioa_cfg: ioa config struct
1347 * @hostrcb: hostrcb struct
1348 *
1349 * Return value:
1350 * none
1351 **/
1352static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1353 struct ipr_hostrcb *hostrcb)
1354{
1355 struct ipr_hostrcb_type_17_error *error;
1356
1357 error = &hostrcb->hcam.u.error.u.type_17_error;
1358 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
8cf093e2 1359 strstrip(error->failure_reason);
ee0f05b8 1360
8cf093e2
BK
1361 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1362 be32_to_cpu(hostrcb->hcam.u.error.prc));
1363 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1364 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1365 be32_to_cpu(hostrcb->hcam.length) -
1366 (offsetof(struct ipr_hostrcb_error, u) +
1367 offsetof(struct ipr_hostrcb_type_17_error, data)));
1368}
1369
b0df54bb
BK
1370/**
1371 * ipr_log_dual_ioa_error - Log a dual adapter error.
1372 * @ioa_cfg: ioa config struct
1373 * @hostrcb: hostrcb struct
1374 *
1375 * Return value:
1376 * none
1377 **/
1378static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1379 struct ipr_hostrcb *hostrcb)
1380{
1381 struct ipr_hostrcb_type_07_error *error;
1382
1383 error = &hostrcb->hcam.u.error.u.type_07_error;
1384 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
8cf093e2 1385 strstrip(error->failure_reason);
b0df54bb 1386
8cf093e2
BK
1387 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1388 be32_to_cpu(hostrcb->hcam.u.error.prc));
1389 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1390 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1391 be32_to_cpu(hostrcb->hcam.length) -
1392 (offsetof(struct ipr_hostrcb_error, u) +
1393 offsetof(struct ipr_hostrcb_type_07_error, data)));
1394}
1395
49dc6a18
BK
1396static const struct {
1397 u8 active;
1398 char *desc;
1399} path_active_desc[] = {
1400 { IPR_PATH_NO_INFO, "Path" },
1401 { IPR_PATH_ACTIVE, "Active path" },
1402 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1403};
1404
1405static const struct {
1406 u8 state;
1407 char *desc;
1408} path_state_desc[] = {
1409 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1410 { IPR_PATH_HEALTHY, "is healthy" },
1411 { IPR_PATH_DEGRADED, "is degraded" },
1412 { IPR_PATH_FAILED, "is failed" }
1413};
1414
1415/**
1416 * ipr_log_fabric_path - Log a fabric path error
1417 * @hostrcb: hostrcb struct
1418 * @fabric: fabric descriptor
1419 *
1420 * Return value:
1421 * none
1422 **/
1423static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1424 struct ipr_hostrcb_fabric_desc *fabric)
1425{
1426 int i, j;
1427 u8 path_state = fabric->path_state;
1428 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1429 u8 state = path_state & IPR_PATH_STATE_MASK;
1430
1431 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1432 if (path_active_desc[i].active != active)
1433 continue;
1434
1435 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1436 if (path_state_desc[j].state != state)
1437 continue;
1438
1439 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1440 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1441 path_active_desc[i].desc, path_state_desc[j].desc,
1442 fabric->ioa_port);
1443 } else if (fabric->cascaded_expander == 0xff) {
1444 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1445 path_active_desc[i].desc, path_state_desc[j].desc,
1446 fabric->ioa_port, fabric->phy);
1447 } else if (fabric->phy == 0xff) {
1448 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1449 path_active_desc[i].desc, path_state_desc[j].desc,
1450 fabric->ioa_port, fabric->cascaded_expander);
1451 } else {
1452 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1453 path_active_desc[i].desc, path_state_desc[j].desc,
1454 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1455 }
1456 return;
1457 }
1458 }
1459
1460 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1461 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1462}
1463
1464static const struct {
1465 u8 type;
1466 char *desc;
1467} path_type_desc[] = {
1468 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1469 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1470 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1471 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1472};
1473
1474static const struct {
1475 u8 status;
1476 char *desc;
1477} path_status_desc[] = {
1478 { IPR_PATH_CFG_NO_PROB, "Functional" },
1479 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1480 { IPR_PATH_CFG_FAILED, "Failed" },
1481 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1482 { IPR_PATH_NOT_DETECTED, "Missing" },
1483 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1484};
1485
1486static const char *link_rate[] = {
1487 "unknown",
1488 "disabled",
1489 "phy reset problem",
1490 "spinup hold",
1491 "port selector",
1492 "unknown",
1493 "unknown",
1494 "unknown",
1495 "1.5Gbps",
1496 "3.0Gbps",
1497 "unknown",
1498 "unknown",
1499 "unknown",
1500 "unknown",
1501 "unknown",
1502 "unknown"
1503};
1504
1505/**
1506 * ipr_log_path_elem - Log a fabric path element.
1507 * @hostrcb: hostrcb struct
1508 * @cfg: fabric path element struct
1509 *
1510 * Return value:
1511 * none
1512 **/
1513static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1514 struct ipr_hostrcb_config_element *cfg)
1515{
1516 int i, j;
1517 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1518 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1519
1520 if (type == IPR_PATH_CFG_NOT_EXIST)
1521 return;
1522
1523 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1524 if (path_type_desc[i].type != type)
1525 continue;
1526
1527 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1528 if (path_status_desc[j].status != status)
1529 continue;
1530
1531 if (type == IPR_PATH_CFG_IOA_PORT) {
1532 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1533 path_status_desc[j].desc, path_type_desc[i].desc,
1534 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1535 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1536 } else {
1537 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1538 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1539 path_status_desc[j].desc, path_type_desc[i].desc,
1540 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1541 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1542 } else if (cfg->cascaded_expander == 0xff) {
1543 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1544 "WWN=%08X%08X\n", path_status_desc[j].desc,
1545 path_type_desc[i].desc, cfg->phy,
1546 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1547 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1548 } else if (cfg->phy == 0xff) {
1549 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1550 "WWN=%08X%08X\n", path_status_desc[j].desc,
1551 path_type_desc[i].desc, cfg->cascaded_expander,
1552 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1553 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1554 } else {
1555 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1556 "WWN=%08X%08X\n", path_status_desc[j].desc,
1557 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1558 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1559 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1560 }
1561 }
1562 return;
1563 }
1564 }
1565
1566 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1567 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1568 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1569 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1570}
1571
1572/**
1573 * ipr_log_fabric_error - Log a fabric error.
1574 * @ioa_cfg: ioa config struct
1575 * @hostrcb: hostrcb struct
1576 *
1577 * Return value:
1578 * none
1579 **/
1580static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1581 struct ipr_hostrcb *hostrcb)
1582{
1583 struct ipr_hostrcb_type_20_error *error;
1584 struct ipr_hostrcb_fabric_desc *fabric;
1585 struct ipr_hostrcb_config_element *cfg;
1586 int i, add_len;
1587
1588 error = &hostrcb->hcam.u.error.u.type_20_error;
1589 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1590 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1591
1592 add_len = be32_to_cpu(hostrcb->hcam.length) -
1593 (offsetof(struct ipr_hostrcb_error, u) +
1594 offsetof(struct ipr_hostrcb_type_20_error, desc));
1595
1596 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1597 ipr_log_fabric_path(hostrcb, fabric);
1598 for_each_fabric_cfg(fabric, cfg)
1599 ipr_log_path_elem(hostrcb, cfg);
1600
1601 add_len -= be16_to_cpu(fabric->length);
1602 fabric = (struct ipr_hostrcb_fabric_desc *)
1603 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1604 }
1605
ac719aba 1606 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
1607}
1608
b0df54bb
BK
1609/**
1610 * ipr_log_generic_error - Log an adapter error.
1611 * @ioa_cfg: ioa config struct
1612 * @hostrcb: hostrcb struct
1613 *
1614 * Return value:
1615 * none
1616 **/
1617static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1618 struct ipr_hostrcb *hostrcb)
1619{
ac719aba 1620 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
1621 be32_to_cpu(hostrcb->hcam.length));
1622}
1623
1da177e4
LT
1624/**
1625 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1626 * @ioasc: IOASC
1627 *
1628 * This function will return the index of into the ipr_error_table
1629 * for the specified IOASC. If the IOASC is not in the table,
1630 * 0 will be returned, which points to the entry used for unknown errors.
1631 *
1632 * Return value:
1633 * index into the ipr_error_table
1634 **/
1635static u32 ipr_get_error(u32 ioasc)
1636{
1637 int i;
1638
1639 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 1640 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
1641 return i;
1642
1643 return 0;
1644}
1645
1646/**
1647 * ipr_handle_log_data - Log an adapter error.
1648 * @ioa_cfg: ioa config struct
1649 * @hostrcb: hostrcb struct
1650 *
1651 * This function logs an adapter error to the system.
1652 *
1653 * Return value:
1654 * none
1655 **/
1656static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1657 struct ipr_hostrcb *hostrcb)
1658{
1659 u32 ioasc;
1660 int error_index;
1661
1662 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1663 return;
1664
1665 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1666 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1667
1668 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1669
1670 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1671 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1672 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1673 scsi_report_bus_reset(ioa_cfg->host,
1674 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1675 }
1676
1677 error_index = ipr_get_error(ioasc);
1678
1679 if (!ipr_error_table[error_index].log_hcam)
1680 return;
1681
49dc6a18 1682 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1683
1684 /* Set indication we have logged an error */
1685 ioa_cfg->errors_logged++;
1686
933916f3 1687 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 1688 return;
cf852037
BK
1689 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1690 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1691
1692 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1693 case IPR_HOST_RCB_OVERLAY_ID_2:
1694 ipr_log_cache_error(ioa_cfg, hostrcb);
1695 break;
1696 case IPR_HOST_RCB_OVERLAY_ID_3:
1697 ipr_log_config_error(ioa_cfg, hostrcb);
1698 break;
1699 case IPR_HOST_RCB_OVERLAY_ID_4:
1700 case IPR_HOST_RCB_OVERLAY_ID_6:
1701 ipr_log_array_error(ioa_cfg, hostrcb);
1702 break;
b0df54bb
BK
1703 case IPR_HOST_RCB_OVERLAY_ID_7:
1704 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1705 break;
ee0f05b8
BK
1706 case IPR_HOST_RCB_OVERLAY_ID_12:
1707 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1708 break;
1709 case IPR_HOST_RCB_OVERLAY_ID_13:
1710 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1711 break;
1712 case IPR_HOST_RCB_OVERLAY_ID_14:
1713 case IPR_HOST_RCB_OVERLAY_ID_16:
1714 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1715 break;
1716 case IPR_HOST_RCB_OVERLAY_ID_17:
1717 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1718 break;
49dc6a18
BK
1719 case IPR_HOST_RCB_OVERLAY_ID_20:
1720 ipr_log_fabric_error(ioa_cfg, hostrcb);
1721 break;
cf852037 1722 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1723 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1724 default:
a9cfca96 1725 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1726 break;
1727 }
1728}
1729
1730/**
1731 * ipr_process_error - Op done function for an adapter error log.
1732 * @ipr_cmd: ipr command struct
1733 *
1734 * This function is the op done function for an error log host
1735 * controlled async from the adapter. It will log the error and
1736 * send the HCAM back to the adapter.
1737 *
1738 * Return value:
1739 * none
1740 **/
1741static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1742{
1743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1744 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1745 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
65f56475 1746 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1da177e4
LT
1747
1748 list_del(&hostrcb->queue);
1749 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1750
1751 if (!ioasc) {
1752 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
1753 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1754 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
1755 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1756 dev_err(&ioa_cfg->pdev->dev,
1757 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1758 }
1759
1760 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1761}
1762
1763/**
1764 * ipr_timeout - An internally generated op has timed out.
1765 * @ipr_cmd: ipr command struct
1766 *
1767 * This function blocks host requests and initiates an
1768 * adapter reset.
1769 *
1770 * Return value:
1771 * none
1772 **/
1773static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1774{
1775 unsigned long lock_flags = 0;
1776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1777
1778 ENTER;
1779 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1780
1781 ioa_cfg->errors_logged++;
1782 dev_err(&ioa_cfg->pdev->dev,
1783 "Adapter being reset due to command timeout.\n");
1784
1785 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1786 ioa_cfg->sdt_state = GET_DUMP;
1787
1788 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1789 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1790
1791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1792 LEAVE;
1793}
1794
1795/**
1796 * ipr_oper_timeout - Adapter timed out transitioning to operational
1797 * @ipr_cmd: ipr command struct
1798 *
1799 * This function blocks host requests and initiates an
1800 * adapter reset.
1801 *
1802 * Return value:
1803 * none
1804 **/
1805static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1806{
1807 unsigned long lock_flags = 0;
1808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1809
1810 ENTER;
1811 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1812
1813 ioa_cfg->errors_logged++;
1814 dev_err(&ioa_cfg->pdev->dev,
1815 "Adapter timed out transitioning to operational.\n");
1816
1817 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1818 ioa_cfg->sdt_state = GET_DUMP;
1819
1820 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1821 if (ipr_fastfail)
1822 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1823 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1824 }
1825
1826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1827 LEAVE;
1828}
1829
1830/**
1831 * ipr_reset_reload - Reset/Reload the IOA
1832 * @ioa_cfg: ioa config struct
1833 * @shutdown_type: shutdown type
1834 *
1835 * This function resets the adapter and re-initializes it.
1836 * This function assumes that all new host commands have been stopped.
1837 * Return value:
1838 * SUCCESS / FAILED
1839 **/
1840static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1841 enum ipr_shutdown_type shutdown_type)
1842{
1843 if (!ioa_cfg->in_reset_reload)
1844 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1845
1846 spin_unlock_irq(ioa_cfg->host->host_lock);
1847 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1848 spin_lock_irq(ioa_cfg->host->host_lock);
1849
1850 /* If we got hit with a host reset while we were already resetting
1851 the adapter for some reason, and the reset failed. */
1852 if (ioa_cfg->ioa_is_dead) {
1853 ipr_trace;
1854 return FAILED;
1855 }
1856
1857 return SUCCESS;
1858}
1859
1860/**
1861 * ipr_find_ses_entry - Find matching SES in SES table
1862 * @res: resource entry struct of SES
1863 *
1864 * Return value:
1865 * pointer to SES table entry / NULL on failure
1866 **/
1867static const struct ipr_ses_table_entry *
1868ipr_find_ses_entry(struct ipr_resource_entry *res)
1869{
1870 int i, j, matches;
1871 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1872
1873 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1874 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1875 if (ste->compare_product_id_byte[j] == 'X') {
1876 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1877 matches++;
1878 else
1879 break;
1880 } else
1881 matches++;
1882 }
1883
1884 if (matches == IPR_PROD_ID_LEN)
1885 return ste;
1886 }
1887
1888 return NULL;
1889}
1890
1891/**
1892 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1893 * @ioa_cfg: ioa config struct
1894 * @bus: SCSI bus
1895 * @bus_width: bus width
1896 *
1897 * Return value:
1898 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1899 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1900 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1901 * max 160MHz = max 320MB/sec).
1902 **/
1903static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1904{
1905 struct ipr_resource_entry *res;
1906 const struct ipr_ses_table_entry *ste;
1907 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1908
1909 /* Loop through each config table entry in the config table buffer */
1910 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1911 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1912 continue;
1913
1914 if (bus != res->cfgte.res_addr.bus)
1915 continue;
1916
1917 if (!(ste = ipr_find_ses_entry(res)))
1918 continue;
1919
1920 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1921 }
1922
1923 return max_xfer_rate;
1924}
1925
1926/**
1927 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1928 * @ioa_cfg: ioa config struct
1929 * @max_delay: max delay in micro-seconds to wait
1930 *
1931 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1932 *
1933 * Return value:
1934 * 0 on success / other on failure
1935 **/
1936static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1937{
1938 volatile u32 pcii_reg;
1939 int delay = 1;
1940
1941 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1942 while (delay < max_delay) {
1943 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1944
1945 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1946 return 0;
1947
1948 /* udelay cannot be used if delay is more than a few milliseconds */
1949 if ((delay / 1000) > MAX_UDELAY_MS)
1950 mdelay(delay / 1000);
1951 else
1952 udelay(delay);
1953
1954 delay += delay;
1955 }
1956 return -EIO;
1957}
1958
1959/**
1960 * ipr_get_ldump_data_section - Dump IOA memory
1961 * @ioa_cfg: ioa config struct
1962 * @start_addr: adapter address to dump
1963 * @dest: destination kernel buffer
1964 * @length_in_words: length to dump in 4 byte words
1965 *
1966 * Return value:
1967 * 0 on success / -EIO on failure
1968 **/
1969static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1970 u32 start_addr,
1971 __be32 *dest, u32 length_in_words)
1972{
1973 volatile u32 temp_pcii_reg;
1974 int i, delay = 0;
1975
1976 /* Write IOA interrupt reg starting LDUMP state */
1977 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1978 ioa_cfg->regs.set_uproc_interrupt_reg);
1979
1980 /* Wait for IO debug acknowledge */
1981 if (ipr_wait_iodbg_ack(ioa_cfg,
1982 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1983 dev_err(&ioa_cfg->pdev->dev,
1984 "IOA dump long data transfer timeout\n");
1985 return -EIO;
1986 }
1987
1988 /* Signal LDUMP interlocked - clear IO debug ack */
1989 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1990 ioa_cfg->regs.clr_interrupt_reg);
1991
1992 /* Write Mailbox with starting address */
1993 writel(start_addr, ioa_cfg->ioa_mailbox);
1994
1995 /* Signal address valid - clear IOA Reset alert */
1996 writel(IPR_UPROCI_RESET_ALERT,
1997 ioa_cfg->regs.clr_uproc_interrupt_reg);
1998
1999 for (i = 0; i < length_in_words; i++) {
2000 /* Wait for IO debug acknowledge */
2001 if (ipr_wait_iodbg_ack(ioa_cfg,
2002 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2003 dev_err(&ioa_cfg->pdev->dev,
2004 "IOA dump short data transfer timeout\n");
2005 return -EIO;
2006 }
2007
2008 /* Read data from mailbox and increment destination pointer */
2009 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2010 dest++;
2011
2012 /* For all but the last word of data, signal data received */
2013 if (i < (length_in_words - 1)) {
2014 /* Signal dump data received - Clear IO debug Ack */
2015 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2016 ioa_cfg->regs.clr_interrupt_reg);
2017 }
2018 }
2019
2020 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2021 writel(IPR_UPROCI_RESET_ALERT,
2022 ioa_cfg->regs.set_uproc_interrupt_reg);
2023
2024 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2025 ioa_cfg->regs.clr_uproc_interrupt_reg);
2026
2027 /* Signal dump data received - Clear IO debug Ack */
2028 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2029 ioa_cfg->regs.clr_interrupt_reg);
2030
2031 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2032 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2033 temp_pcii_reg =
2034 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2035
2036 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2037 return 0;
2038
2039 udelay(10);
2040 delay += 10;
2041 }
2042
2043 return 0;
2044}
2045
2046#ifdef CONFIG_SCSI_IPR_DUMP
2047/**
2048 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2049 * @ioa_cfg: ioa config struct
2050 * @pci_address: adapter address
2051 * @length: length of data to copy
2052 *
2053 * Copy data from PCI adapter to kernel buffer.
2054 * Note: length MUST be a 4 byte multiple
2055 * Return value:
2056 * 0 on success / other on failure
2057 **/
2058static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2059 unsigned long pci_address, u32 length)
2060{
2061 int bytes_copied = 0;
2062 int cur_len, rc, rem_len, rem_page_len;
2063 __be32 *page;
2064 unsigned long lock_flags = 0;
2065 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2066
2067 while (bytes_copied < length &&
2068 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2069 if (ioa_dump->page_offset >= PAGE_SIZE ||
2070 ioa_dump->page_offset == 0) {
2071 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2072
2073 if (!page) {
2074 ipr_trace;
2075 return bytes_copied;
2076 }
2077
2078 ioa_dump->page_offset = 0;
2079 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2080 ioa_dump->next_page_index++;
2081 } else
2082 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2083
2084 rem_len = length - bytes_copied;
2085 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2086 cur_len = min(rem_len, rem_page_len);
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2090 rc = -EIO;
2091 } else {
2092 rc = ipr_get_ldump_data_section(ioa_cfg,
2093 pci_address + bytes_copied,
2094 &page[ioa_dump->page_offset / 4],
2095 (cur_len / sizeof(u32)));
2096 }
2097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2098
2099 if (!rc) {
2100 ioa_dump->page_offset += cur_len;
2101 bytes_copied += cur_len;
2102 } else {
2103 ipr_trace;
2104 break;
2105 }
2106 schedule();
2107 }
2108
2109 return bytes_copied;
2110}
2111
2112/**
2113 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2114 * @hdr: dump entry header struct
2115 *
2116 * Return value:
2117 * nothing
2118 **/
2119static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2120{
2121 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2122 hdr->num_elems = 1;
2123 hdr->offset = sizeof(*hdr);
2124 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2125}
2126
2127/**
2128 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2129 * @ioa_cfg: ioa config struct
2130 * @driver_dump: driver dump struct
2131 *
2132 * Return value:
2133 * nothing
2134 **/
2135static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2136 struct ipr_driver_dump *driver_dump)
2137{
2138 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2139
2140 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2141 driver_dump->ioa_type_entry.hdr.len =
2142 sizeof(struct ipr_dump_ioa_type_entry) -
2143 sizeof(struct ipr_dump_entry_header);
2144 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2145 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2146 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2147 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2148 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2149 ucode_vpd->minor_release[1];
2150 driver_dump->hdr.num_entries++;
2151}
2152
2153/**
2154 * ipr_dump_version_data - Fill in the driver version in the dump.
2155 * @ioa_cfg: ioa config struct
2156 * @driver_dump: driver dump struct
2157 *
2158 * Return value:
2159 * nothing
2160 **/
2161static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2162 struct ipr_driver_dump *driver_dump)
2163{
2164 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2165 driver_dump->version_entry.hdr.len =
2166 sizeof(struct ipr_dump_version_entry) -
2167 sizeof(struct ipr_dump_entry_header);
2168 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2169 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2170 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2171 driver_dump->hdr.num_entries++;
2172}
2173
2174/**
2175 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2176 * @ioa_cfg: ioa config struct
2177 * @driver_dump: driver dump struct
2178 *
2179 * Return value:
2180 * nothing
2181 **/
2182static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2183 struct ipr_driver_dump *driver_dump)
2184{
2185 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2186 driver_dump->trace_entry.hdr.len =
2187 sizeof(struct ipr_dump_trace_entry) -
2188 sizeof(struct ipr_dump_entry_header);
2189 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2190 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2191 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2192 driver_dump->hdr.num_entries++;
2193}
2194
2195/**
2196 * ipr_dump_location_data - Fill in the IOA location in the dump.
2197 * @ioa_cfg: ioa config struct
2198 * @driver_dump: driver dump struct
2199 *
2200 * Return value:
2201 * nothing
2202 **/
2203static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2204 struct ipr_driver_dump *driver_dump)
2205{
2206 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2207 driver_dump->location_entry.hdr.len =
2208 sizeof(struct ipr_dump_location_entry) -
2209 sizeof(struct ipr_dump_entry_header);
2210 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2211 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2212 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2213 driver_dump->hdr.num_entries++;
2214}
2215
2216/**
2217 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2218 * @ioa_cfg: ioa config struct
2219 * @dump: dump struct
2220 *
2221 * Return value:
2222 * nothing
2223 **/
2224static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2225{
2226 unsigned long start_addr, sdt_word;
2227 unsigned long lock_flags = 0;
2228 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2229 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2230 u32 num_entries, start_off, end_off;
2231 u32 bytes_to_copy, bytes_copied, rc;
2232 struct ipr_sdt *sdt;
2233 int i;
2234
2235 ENTER;
2236
2237 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2238
2239 if (ioa_cfg->sdt_state != GET_DUMP) {
2240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2241 return;
2242 }
2243
2244 start_addr = readl(ioa_cfg->ioa_mailbox);
2245
2246 if (!ipr_sdt_is_fmt2(start_addr)) {
2247 dev_err(&ioa_cfg->pdev->dev,
2248 "Invalid dump table format: %lx\n", start_addr);
2249 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2250 return;
2251 }
2252
2253 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2254
2255 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2256
2257 /* Initialize the overall dump header */
2258 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2259 driver_dump->hdr.num_entries = 1;
2260 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2261 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2262 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2263 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2264
2265 ipr_dump_version_data(ioa_cfg, driver_dump);
2266 ipr_dump_location_data(ioa_cfg, driver_dump);
2267 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2268 ipr_dump_trace_data(ioa_cfg, driver_dump);
2269
2270 /* Update dump_header */
2271 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2272
2273 /* IOA Dump entry */
2274 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2275 ioa_dump->format = IPR_SDT_FMT2;
2276 ioa_dump->hdr.len = 0;
2277 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2278 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2279
2280 /* First entries in sdt are actually a list of dump addresses and
2281 lengths to gather the real dump data. sdt represents the pointer
2282 to the ioa generated dump table. Dump data will be extracted based
2283 on entries in this table */
2284 sdt = &ioa_dump->sdt;
2285
2286 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2287 sizeof(struct ipr_sdt) / sizeof(__be32));
2288
2289 /* Smart Dump table is ready to use and the first entry is valid */
2290 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2291 dev_err(&ioa_cfg->pdev->dev,
2292 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2293 rc, be32_to_cpu(sdt->hdr.state));
2294 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2295 ioa_cfg->sdt_state = DUMP_OBTAINED;
2296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2297 return;
2298 }
2299
2300 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2301
2302 if (num_entries > IPR_NUM_SDT_ENTRIES)
2303 num_entries = IPR_NUM_SDT_ENTRIES;
2304
2305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2306
2307 for (i = 0; i < num_entries; i++) {
2308 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2309 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2310 break;
2311 }
2312
2313 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2314 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2315 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2316 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2317
2318 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2319 bytes_to_copy = end_off - start_off;
2320 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2321 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2322 continue;
2323 }
2324
2325 /* Copy data from adapter to driver buffers */
2326 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2327 bytes_to_copy);
2328
2329 ioa_dump->hdr.len += bytes_copied;
2330
2331 if (bytes_copied != bytes_to_copy) {
2332 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2333 break;
2334 }
2335 }
2336 }
2337 }
2338
2339 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2340
2341 /* Update dump_header */
2342 driver_dump->hdr.len += ioa_dump->hdr.len;
2343 wmb();
2344 ioa_cfg->sdt_state = DUMP_OBTAINED;
2345 LEAVE;
2346}
2347
2348#else
2349#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2350#endif
2351
2352/**
2353 * ipr_release_dump - Free adapter dump memory
2354 * @kref: kref struct
2355 *
2356 * Return value:
2357 * nothing
2358 **/
2359static void ipr_release_dump(struct kref *kref)
2360{
2361 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2362 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2363 unsigned long lock_flags = 0;
2364 int i;
2365
2366 ENTER;
2367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2368 ioa_cfg->dump = NULL;
2369 ioa_cfg->sdt_state = INACTIVE;
2370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2371
2372 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2373 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2374
2375 kfree(dump);
2376 LEAVE;
2377}
2378
2379/**
2380 * ipr_worker_thread - Worker thread
c4028958 2381 * @work: ioa config struct
1da177e4
LT
2382 *
2383 * Called at task level from a work thread. This function takes care
2384 * of adding and removing device from the mid-layer as configuration
2385 * changes are detected by the adapter.
2386 *
2387 * Return value:
2388 * nothing
2389 **/
c4028958 2390static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
2391{
2392 unsigned long lock_flags;
2393 struct ipr_resource_entry *res;
2394 struct scsi_device *sdev;
2395 struct ipr_dump *dump;
c4028958
DH
2396 struct ipr_ioa_cfg *ioa_cfg =
2397 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
2398 u8 bus, target, lun;
2399 int did_work;
2400
2401 ENTER;
2402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2403
2404 if (ioa_cfg->sdt_state == GET_DUMP) {
2405 dump = ioa_cfg->dump;
2406 if (!dump) {
2407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2408 return;
2409 }
2410 kref_get(&dump->kref);
2411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2412 ipr_get_ioa_dump(ioa_cfg, dump);
2413 kref_put(&dump->kref, ipr_release_dump);
2414
2415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2416 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2417 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419 return;
2420 }
2421
2422restart:
2423 do {
2424 did_work = 0;
2425 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2427 return;
2428 }
2429
2430 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2431 if (res->del_from_ml && res->sdev) {
2432 did_work = 1;
2433 sdev = res->sdev;
2434 if (!scsi_device_get(sdev)) {
1da177e4
LT
2435 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2437 scsi_remove_device(sdev);
2438 scsi_device_put(sdev);
2439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2440 }
2441 break;
2442 }
2443 }
2444 } while(did_work);
2445
2446 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2447 if (res->add_to_ml) {
2448 bus = res->cfgte.res_addr.bus;
2449 target = res->cfgte.res_addr.target;
2450 lun = res->cfgte.res_addr.lun;
1121b794 2451 res->add_to_ml = 0;
1da177e4
LT
2452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2453 scsi_add_device(ioa_cfg->host, bus, target, lun);
2454 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2455 goto restart;
2456 }
2457 }
2458
2459 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
312c004d 2460 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
1da177e4
LT
2461 LEAVE;
2462}
2463
2464#ifdef CONFIG_SCSI_IPR_TRACE
2465/**
2466 * ipr_read_trace - Dump the adapter trace
2467 * @kobj: kobject struct
2468 * @buf: buffer
2469 * @off: offset
2470 * @count: buffer size
2471 *
2472 * Return value:
2473 * number of bytes printed to buffer
2474 **/
2475static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2476 loff_t off, size_t count)
2477{
2478 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2479 struct Scsi_Host *shost = class_to_shost(cdev);
2480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2481 unsigned long lock_flags = 0;
2482 int size = IPR_TRACE_SIZE;
2483 char *src = (char *)ioa_cfg->trace;
2484
2485 if (off > size)
2486 return 0;
2487 if (off + count > size) {
2488 size -= off;
2489 count = size;
2490 }
2491
2492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2493 memcpy(buf, &src[off], count);
2494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2495 return count;
2496}
2497
2498static struct bin_attribute ipr_trace_attr = {
2499 .attr = {
2500 .name = "trace",
2501 .mode = S_IRUGO,
2502 },
2503 .size = 0,
2504 .read = ipr_read_trace,
2505};
2506#endif
2507
62275040
BK
2508static const struct {
2509 enum ipr_cache_state state;
2510 char *name;
2511} cache_state [] = {
2512 { CACHE_NONE, "none" },
2513 { CACHE_DISABLED, "disabled" },
2514 { CACHE_ENABLED, "enabled" }
2515};
2516
2517/**
2518 * ipr_show_write_caching - Show the write caching attribute
2519 * @class_dev: class device struct
2520 * @buf: buffer
2521 *
2522 * Return value:
2523 * number of bytes printed to buffer
2524 **/
2525static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2526{
2527 struct Scsi_Host *shost = class_to_shost(class_dev);
2528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2529 unsigned long lock_flags = 0;
2530 int i, len = 0;
2531
2532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2533 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2534 if (cache_state[i].state == ioa_cfg->cache_state) {
2535 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2536 break;
2537 }
2538 }
2539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2540 return len;
2541}
2542
2543
2544/**
2545 * ipr_store_write_caching - Enable/disable adapter write cache
2546 * @class_dev: class_device struct
2547 * @buf: buffer
2548 * @count: buffer size
2549 *
2550 * This function will enable/disable adapter write cache.
2551 *
2552 * Return value:
2553 * count on success / other on failure
2554 **/
2555static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2556 const char *buf, size_t count)
2557{
2558 struct Scsi_Host *shost = class_to_shost(class_dev);
2559 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2560 unsigned long lock_flags = 0;
2561 enum ipr_cache_state new_state = CACHE_INVALID;
2562 int i;
2563
2564 if (!capable(CAP_SYS_ADMIN))
2565 return -EACCES;
2566 if (ioa_cfg->cache_state == CACHE_NONE)
2567 return -EINVAL;
2568
2569 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2570 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2571 new_state = cache_state[i].state;
2572 break;
2573 }
2574 }
2575
2576 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2577 return -EINVAL;
2578
2579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2580 if (ioa_cfg->cache_state == new_state) {
2581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2582 return count;
2583 }
2584
2585 ioa_cfg->cache_state = new_state;
2586 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2587 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2588 if (!ioa_cfg->in_reset_reload)
2589 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2591 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2592
2593 return count;
2594}
2595
2596static struct class_device_attribute ipr_ioa_cache_attr = {
2597 .attr = {
2598 .name = "write_cache",
2599 .mode = S_IRUGO | S_IWUSR,
2600 },
2601 .show = ipr_show_write_caching,
2602 .store = ipr_store_write_caching
2603};
2604
1da177e4
LT
2605/**
2606 * ipr_show_fw_version - Show the firmware version
2607 * @class_dev: class device struct
2608 * @buf: buffer
2609 *
2610 * Return value:
2611 * number of bytes printed to buffer
2612 **/
2613static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2614{
2615 struct Scsi_Host *shost = class_to_shost(class_dev);
2616 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2617 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2618 unsigned long lock_flags = 0;
2619 int len;
2620
2621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2622 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2623 ucode_vpd->major_release, ucode_vpd->card_type,
2624 ucode_vpd->minor_release[0],
2625 ucode_vpd->minor_release[1]);
2626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2627 return len;
2628}
2629
2630static struct class_device_attribute ipr_fw_version_attr = {
2631 .attr = {
2632 .name = "fw_version",
2633 .mode = S_IRUGO,
2634 },
2635 .show = ipr_show_fw_version,
2636};
2637
2638/**
2639 * ipr_show_log_level - Show the adapter's error logging level
2640 * @class_dev: class device struct
2641 * @buf: buffer
2642 *
2643 * Return value:
2644 * number of bytes printed to buffer
2645 **/
2646static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2647{
2648 struct Scsi_Host *shost = class_to_shost(class_dev);
2649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2650 unsigned long lock_flags = 0;
2651 int len;
2652
2653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2654 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2656 return len;
2657}
2658
2659/**
2660 * ipr_store_log_level - Change the adapter's error logging level
2661 * @class_dev: class device struct
2662 * @buf: buffer
2663 *
2664 * Return value:
2665 * number of bytes printed to buffer
2666 **/
2667static ssize_t ipr_store_log_level(struct class_device *class_dev,
2668 const char *buf, size_t count)
2669{
2670 struct Scsi_Host *shost = class_to_shost(class_dev);
2671 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2672 unsigned long lock_flags = 0;
2673
2674 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2675 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2677 return strlen(buf);
2678}
2679
2680static struct class_device_attribute ipr_log_level_attr = {
2681 .attr = {
2682 .name = "log_level",
2683 .mode = S_IRUGO | S_IWUSR,
2684 },
2685 .show = ipr_show_log_level,
2686 .store = ipr_store_log_level
2687};
2688
2689/**
2690 * ipr_store_diagnostics - IOA Diagnostics interface
2691 * @class_dev: class_device struct
2692 * @buf: buffer
2693 * @count: buffer size
2694 *
2695 * This function will reset the adapter and wait a reasonable
2696 * amount of time for any errors that the adapter might log.
2697 *
2698 * Return value:
2699 * count on success / other on failure
2700 **/
2701static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2702 const char *buf, size_t count)
2703{
2704 struct Scsi_Host *shost = class_to_shost(class_dev);
2705 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2706 unsigned long lock_flags = 0;
2707 int rc = count;
2708
2709 if (!capable(CAP_SYS_ADMIN))
2710 return -EACCES;
2711
1da177e4 2712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
2713 while(ioa_cfg->in_reset_reload) {
2714 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2715 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 }
2718
1da177e4
LT
2719 ioa_cfg->errors_logged = 0;
2720 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2721
2722 if (ioa_cfg->in_reset_reload) {
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2725
2726 /* Wait for a second for any errors to be logged */
2727 msleep(1000);
2728 } else {
2729 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2730 return -EIO;
2731 }
2732
2733 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2735 rc = -EIO;
2736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2737
2738 return rc;
2739}
2740
2741static struct class_device_attribute ipr_diagnostics_attr = {
2742 .attr = {
2743 .name = "run_diagnostics",
2744 .mode = S_IWUSR,
2745 },
2746 .store = ipr_store_diagnostics
2747};
2748
f37eb54b
BK
2749/**
2750 * ipr_show_adapter_state - Show the adapter's state
2751 * @class_dev: class device struct
2752 * @buf: buffer
2753 *
2754 * Return value:
2755 * number of bytes printed to buffer
2756 **/
2757static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2758{
2759 struct Scsi_Host *shost = class_to_shost(class_dev);
2760 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2761 unsigned long lock_flags = 0;
2762 int len;
2763
2764 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2765 if (ioa_cfg->ioa_is_dead)
2766 len = snprintf(buf, PAGE_SIZE, "offline\n");
2767 else
2768 len = snprintf(buf, PAGE_SIZE, "online\n");
2769 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2770 return len;
2771}
2772
2773/**
2774 * ipr_store_adapter_state - Change adapter state
2775 * @class_dev: class_device struct
2776 * @buf: buffer
2777 * @count: buffer size
2778 *
2779 * This function will change the adapter's state.
2780 *
2781 * Return value:
2782 * count on success / other on failure
2783 **/
2784static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2785 const char *buf, size_t count)
2786{
2787 struct Scsi_Host *shost = class_to_shost(class_dev);
2788 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2789 unsigned long lock_flags;
2790 int result = count;
2791
2792 if (!capable(CAP_SYS_ADMIN))
2793 return -EACCES;
2794
2795 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2796 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2797 ioa_cfg->ioa_is_dead = 0;
2798 ioa_cfg->reset_retries = 0;
2799 ioa_cfg->in_ioa_bringdown = 0;
2800 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2801 }
2802 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2803 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2804
2805 return result;
2806}
2807
2808static struct class_device_attribute ipr_ioa_state_attr = {
2809 .attr = {
2810 .name = "state",
2811 .mode = S_IRUGO | S_IWUSR,
2812 },
2813 .show = ipr_show_adapter_state,
2814 .store = ipr_store_adapter_state
2815};
2816
1da177e4
LT
2817/**
2818 * ipr_store_reset_adapter - Reset the adapter
2819 * @class_dev: class_device struct
2820 * @buf: buffer
2821 * @count: buffer size
2822 *
2823 * This function will reset the adapter.
2824 *
2825 * Return value:
2826 * count on success / other on failure
2827 **/
2828static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2829 const char *buf, size_t count)
2830{
2831 struct Scsi_Host *shost = class_to_shost(class_dev);
2832 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2833 unsigned long lock_flags;
2834 int result = count;
2835
2836 if (!capable(CAP_SYS_ADMIN))
2837 return -EACCES;
2838
2839 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2840 if (!ioa_cfg->in_reset_reload)
2841 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2843 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2844
2845 return result;
2846}
2847
2848static struct class_device_attribute ipr_ioa_reset_attr = {
2849 .attr = {
2850 .name = "reset_host",
2851 .mode = S_IWUSR,
2852 },
2853 .store = ipr_store_reset_adapter
2854};
2855
2856/**
2857 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2858 * @buf_len: buffer length
2859 *
2860 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2861 * list to use for microcode download
2862 *
2863 * Return value:
2864 * pointer to sglist / NULL on failure
2865 **/
2866static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2867{
2868 int sg_size, order, bsize_elem, num_elem, i, j;
2869 struct ipr_sglist *sglist;
2870 struct scatterlist *scatterlist;
2871 struct page *page;
2872
2873 /* Get the minimum size per scatter/gather element */
2874 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2875
2876 /* Get the actual size per element */
2877 order = get_order(sg_size);
2878
2879 /* Determine the actual number of bytes per element */
2880 bsize_elem = PAGE_SIZE * (1 << order);
2881
2882 /* Determine the actual number of sg entries needed */
2883 if (buf_len % bsize_elem)
2884 num_elem = (buf_len / bsize_elem) + 1;
2885 else
2886 num_elem = buf_len / bsize_elem;
2887
2888 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2889 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2890 (sizeof(struct scatterlist) * (num_elem - 1)),
2891 GFP_KERNEL);
2892
2893 if (sglist == NULL) {
2894 ipr_trace;
2895 return NULL;
2896 }
2897
1da177e4
LT
2898 scatterlist = sglist->scatterlist;
2899
2900 sglist->order = order;
2901 sglist->num_sg = num_elem;
2902
2903 /* Allocate a bunch of sg elements */
2904 for (i = 0; i < num_elem; i++) {
2905 page = alloc_pages(GFP_KERNEL, order);
2906 if (!page) {
2907 ipr_trace;
2908
2909 /* Free up what we already allocated */
2910 for (j = i - 1; j >= 0; j--)
2911 __free_pages(scatterlist[j].page, order);
2912 kfree(sglist);
2913 return NULL;
2914 }
2915
2916 scatterlist[i].page = page;
2917 }
2918
2919 return sglist;
2920}
2921
2922/**
2923 * ipr_free_ucode_buffer - Frees a microcode download buffer
2924 * @p_dnld: scatter/gather list pointer
2925 *
2926 * Free a DMA'able ucode download buffer previously allocated with
2927 * ipr_alloc_ucode_buffer
2928 *
2929 * Return value:
2930 * nothing
2931 **/
2932static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2933{
2934 int i;
2935
2936 for (i = 0; i < sglist->num_sg; i++)
2937 __free_pages(sglist->scatterlist[i].page, sglist->order);
2938
2939 kfree(sglist);
2940}
2941
2942/**
2943 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2944 * @sglist: scatter/gather list pointer
2945 * @buffer: buffer pointer
2946 * @len: buffer length
2947 *
2948 * Copy a microcode image from a user buffer into a buffer allocated by
2949 * ipr_alloc_ucode_buffer
2950 *
2951 * Return value:
2952 * 0 on success / other on failure
2953 **/
2954static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2955 u8 *buffer, u32 len)
2956{
2957 int bsize_elem, i, result = 0;
2958 struct scatterlist *scatterlist;
2959 void *kaddr;
2960
2961 /* Determine the actual number of bytes per element */
2962 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2963
2964 scatterlist = sglist->scatterlist;
2965
2966 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2967 kaddr = kmap(scatterlist[i].page);
2968 memcpy(kaddr, buffer, bsize_elem);
2969 kunmap(scatterlist[i].page);
2970
2971 scatterlist[i].length = bsize_elem;
2972
2973 if (result != 0) {
2974 ipr_trace;
2975 return result;
2976 }
2977 }
2978
2979 if (len % bsize_elem) {
2980 kaddr = kmap(scatterlist[i].page);
2981 memcpy(kaddr, buffer, len % bsize_elem);
2982 kunmap(scatterlist[i].page);
2983
2984 scatterlist[i].length = len % bsize_elem;
2985 }
2986
2987 sglist->buffer_len = len;
2988 return result;
2989}
2990
2991/**
12baa420 2992 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2993 * @ipr_cmd: ipr command struct
2994 * @sglist: scatter/gather list
1da177e4 2995 *
12baa420 2996 * Builds a microcode download IOA data list (IOADL).
1da177e4 2997 *
1da177e4 2998 **/
12baa420
BK
2999static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3000 struct ipr_sglist *sglist)
1da177e4 3001{
1da177e4
LT
3002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3003 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3004 struct scatterlist *scatterlist = sglist->scatterlist;
3005 int i;
3006
12baa420 3007 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3008 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 3009 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
3010 ioarcb->write_ioadl_len =
3011 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3012
3013 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3014 ioadl[i].flags_and_data_len =
3015 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3016 ioadl[i].address =
3017 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3018 }
3019
12baa420
BK
3020 ioadl[i-1].flags_and_data_len |=
3021 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3022}
3023
3024/**
3025 * ipr_update_ioa_ucode - Update IOA's microcode
3026 * @ioa_cfg: ioa config struct
3027 * @sglist: scatter/gather list
3028 *
3029 * Initiate an adapter reset to update the IOA's microcode
3030 *
3031 * Return value:
3032 * 0 on success / -EIO on failure
3033 **/
3034static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3035 struct ipr_sglist *sglist)
3036{
3037 unsigned long lock_flags;
3038
3039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3040 while(ioa_cfg->in_reset_reload) {
3041 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3042 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3043 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3044 }
12baa420
BK
3045
3046 if (ioa_cfg->ucode_sglist) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 dev_err(&ioa_cfg->pdev->dev,
3049 "Microcode download already in progress\n");
3050 return -EIO;
1da177e4 3051 }
12baa420
BK
3052
3053 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3054 sglist->num_sg, DMA_TO_DEVICE);
3055
3056 if (!sglist->num_dma_sg) {
3057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3058 dev_err(&ioa_cfg->pdev->dev,
3059 "Failed to map microcode download buffer!\n");
1da177e4
LT
3060 return -EIO;
3061 }
3062
12baa420
BK
3063 ioa_cfg->ucode_sglist = sglist;
3064 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3067
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069 ioa_cfg->ucode_sglist = NULL;
3070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3071 return 0;
3072}
3073
3074/**
3075 * ipr_store_update_fw - Update the firmware on the adapter
3076 * @class_dev: class_device struct
3077 * @buf: buffer
3078 * @count: buffer size
3079 *
3080 * This function will update the firmware on the adapter.
3081 *
3082 * Return value:
3083 * count on success / other on failure
3084 **/
3085static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3086 const char *buf, size_t count)
3087{
3088 struct Scsi_Host *shost = class_to_shost(class_dev);
3089 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3090 struct ipr_ucode_image_header *image_hdr;
3091 const struct firmware *fw_entry;
3092 struct ipr_sglist *sglist;
1da177e4
LT
3093 char fname[100];
3094 char *src;
3095 int len, result, dnld_size;
3096
3097 if (!capable(CAP_SYS_ADMIN))
3098 return -EACCES;
3099
3100 len = snprintf(fname, 99, "%s", buf);
3101 fname[len-1] = '\0';
3102
3103 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3104 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3105 return -EIO;
3106 }
3107
3108 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3109
3110 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3111 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3112 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3113 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3114 release_firmware(fw_entry);
3115 return -EINVAL;
3116 }
3117
3118 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3119 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3120 sglist = ipr_alloc_ucode_buffer(dnld_size);
3121
3122 if (!sglist) {
3123 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3124 release_firmware(fw_entry);
3125 return -ENOMEM;
3126 }
3127
3128 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3129
3130 if (result) {
3131 dev_err(&ioa_cfg->pdev->dev,
3132 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3133 goto out;
1da177e4
LT
3134 }
3135
12baa420 3136 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3137
12baa420
BK
3138 if (!result)
3139 result = count;
3140out:
1da177e4
LT
3141 ipr_free_ucode_buffer(sglist);
3142 release_firmware(fw_entry);
12baa420 3143 return result;
1da177e4
LT
3144}
3145
3146static struct class_device_attribute ipr_update_fw_attr = {
3147 .attr = {
3148 .name = "update_fw",
3149 .mode = S_IWUSR,
3150 },
3151 .store = ipr_store_update_fw
3152};
3153
3154static struct class_device_attribute *ipr_ioa_attrs[] = {
3155 &ipr_fw_version_attr,
3156 &ipr_log_level_attr,
3157 &ipr_diagnostics_attr,
f37eb54b 3158 &ipr_ioa_state_attr,
1da177e4
LT
3159 &ipr_ioa_reset_attr,
3160 &ipr_update_fw_attr,
62275040 3161 &ipr_ioa_cache_attr,
1da177e4
LT
3162 NULL,
3163};
3164
3165#ifdef CONFIG_SCSI_IPR_DUMP
3166/**
3167 * ipr_read_dump - Dump the adapter
3168 * @kobj: kobject struct
3169 * @buf: buffer
3170 * @off: offset
3171 * @count: buffer size
3172 *
3173 * Return value:
3174 * number of bytes printed to buffer
3175 **/
3176static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3177 loff_t off, size_t count)
3178{
3179 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3180 struct Scsi_Host *shost = class_to_shost(cdev);
3181 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3182 struct ipr_dump *dump;
3183 unsigned long lock_flags = 0;
3184 char *src;
3185 int len;
3186 size_t rc = count;
3187
3188 if (!capable(CAP_SYS_ADMIN))
3189 return -EACCES;
3190
3191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3192 dump = ioa_cfg->dump;
3193
3194 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3195 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3196 return 0;
3197 }
3198 kref_get(&dump->kref);
3199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3200
3201 if (off > dump->driver_dump.hdr.len) {
3202 kref_put(&dump->kref, ipr_release_dump);
3203 return 0;
3204 }
3205
3206 if (off + count > dump->driver_dump.hdr.len) {
3207 count = dump->driver_dump.hdr.len - off;
3208 rc = count;
3209 }
3210
3211 if (count && off < sizeof(dump->driver_dump)) {
3212 if (off + count > sizeof(dump->driver_dump))
3213 len = sizeof(dump->driver_dump) - off;
3214 else
3215 len = count;
3216 src = (u8 *)&dump->driver_dump + off;
3217 memcpy(buf, src, len);
3218 buf += len;
3219 off += len;
3220 count -= len;
3221 }
3222
3223 off -= sizeof(dump->driver_dump);
3224
3225 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3226 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3227 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3228 else
3229 len = count;
3230 src = (u8 *)&dump->ioa_dump + off;
3231 memcpy(buf, src, len);
3232 buf += len;
3233 off += len;
3234 count -= len;
3235 }
3236
3237 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3238
3239 while (count) {
3240 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3241 len = PAGE_ALIGN(off) - off;
3242 else
3243 len = count;
3244 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3245 src += off & ~PAGE_MASK;
3246 memcpy(buf, src, len);
3247 buf += len;
3248 off += len;
3249 count -= len;
3250 }
3251
3252 kref_put(&dump->kref, ipr_release_dump);
3253 return rc;
3254}
3255
3256/**
3257 * ipr_alloc_dump - Prepare for adapter dump
3258 * @ioa_cfg: ioa config struct
3259 *
3260 * Return value:
3261 * 0 on success / other on failure
3262 **/
3263static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3264{
3265 struct ipr_dump *dump;
3266 unsigned long lock_flags = 0;
3267
0bc42e35 3268 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3269
3270 if (!dump) {
3271 ipr_err("Dump memory allocation failed\n");
3272 return -ENOMEM;
3273 }
3274
1da177e4
LT
3275 kref_init(&dump->kref);
3276 dump->ioa_cfg = ioa_cfg;
3277
3278 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3279
3280 if (INACTIVE != ioa_cfg->sdt_state) {
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 kfree(dump);
3283 return 0;
3284 }
3285
3286 ioa_cfg->dump = dump;
3287 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3288 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3289 ioa_cfg->dump_taken = 1;
3290 schedule_work(&ioa_cfg->work_q);
3291 }
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293
1da177e4
LT
3294 return 0;
3295}
3296
3297/**
3298 * ipr_free_dump - Free adapter dump memory
3299 * @ioa_cfg: ioa config struct
3300 *
3301 * Return value:
3302 * 0 on success / other on failure
3303 **/
3304static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3305{
3306 struct ipr_dump *dump;
3307 unsigned long lock_flags = 0;
3308
3309 ENTER;
3310
3311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312 dump = ioa_cfg->dump;
3313 if (!dump) {
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315 return 0;
3316 }
3317
3318 ioa_cfg->dump = NULL;
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320
3321 kref_put(&dump->kref, ipr_release_dump);
3322
3323 LEAVE;
3324 return 0;
3325}
3326
3327/**
3328 * ipr_write_dump - Setup dump state of adapter
3329 * @kobj: kobject struct
3330 * @buf: buffer
3331 * @off: offset
3332 * @count: buffer size
3333 *
3334 * Return value:
3335 * number of bytes printed to buffer
3336 **/
3337static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3338 loff_t off, size_t count)
3339{
3340 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3341 struct Scsi_Host *shost = class_to_shost(cdev);
3342 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3343 int rc;
3344
3345 if (!capable(CAP_SYS_ADMIN))
3346 return -EACCES;
3347
3348 if (buf[0] == '1')
3349 rc = ipr_alloc_dump(ioa_cfg);
3350 else if (buf[0] == '0')
3351 rc = ipr_free_dump(ioa_cfg);
3352 else
3353 return -EINVAL;
3354
3355 if (rc)
3356 return rc;
3357 else
3358 return count;
3359}
3360
3361static struct bin_attribute ipr_dump_attr = {
3362 .attr = {
3363 .name = "dump",
3364 .mode = S_IRUSR | S_IWUSR,
3365 },
3366 .size = 0,
3367 .read = ipr_read_dump,
3368 .write = ipr_write_dump
3369};
3370#else
3371static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3372#endif
3373
3374/**
3375 * ipr_change_queue_depth - Change the device's queue depth
3376 * @sdev: scsi device struct
3377 * @qdepth: depth to set
3378 *
3379 * Return value:
3380 * actual depth set
3381 **/
3382static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3383{
35a39691
BK
3384 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3385 struct ipr_resource_entry *res;
3386 unsigned long lock_flags = 0;
3387
3388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3389 res = (struct ipr_resource_entry *)sdev->hostdata;
3390
3391 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3392 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3394
1da177e4
LT
3395 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3396 return sdev->queue_depth;
3397}
3398
3399/**
3400 * ipr_change_queue_type - Change the device's queue type
3401 * @dsev: scsi device struct
3402 * @tag_type: type of tags to use
3403 *
3404 * Return value:
3405 * actual queue type set
3406 **/
3407static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3408{
3409 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3410 struct ipr_resource_entry *res;
3411 unsigned long lock_flags = 0;
3412
3413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3414 res = (struct ipr_resource_entry *)sdev->hostdata;
3415
3416 if (res) {
3417 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3418 /*
3419 * We don't bother quiescing the device here since the
3420 * adapter firmware does it for us.
3421 */
3422 scsi_set_tag_type(sdev, tag_type);
3423
3424 if (tag_type)
3425 scsi_activate_tcq(sdev, sdev->queue_depth);
3426 else
3427 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3428 } else
3429 tag_type = 0;
3430 } else
3431 tag_type = 0;
3432
3433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3434 return tag_type;
3435}
3436
3437/**
3438 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3439 * @dev: device struct
3440 * @buf: buffer
3441 *
3442 * Return value:
3443 * number of bytes printed to buffer
3444 **/
10523b3b 3445static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3446{
3447 struct scsi_device *sdev = to_scsi_device(dev);
3448 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3449 struct ipr_resource_entry *res;
3450 unsigned long lock_flags = 0;
3451 ssize_t len = -ENXIO;
3452
3453 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3454 res = (struct ipr_resource_entry *)sdev->hostdata;
3455 if (res)
3456 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3458 return len;
3459}
3460
3461static struct device_attribute ipr_adapter_handle_attr = {
3462 .attr = {
3463 .name = "adapter_handle",
3464 .mode = S_IRUSR,
3465 },
3466 .show = ipr_show_adapter_handle
3467};
3468
3469static struct device_attribute *ipr_dev_attrs[] = {
3470 &ipr_adapter_handle_attr,
3471 NULL,
3472};
3473
3474/**
3475 * ipr_biosparam - Return the HSC mapping
3476 * @sdev: scsi device struct
3477 * @block_device: block device pointer
3478 * @capacity: capacity of the device
3479 * @parm: Array containing returned HSC values.
3480 *
3481 * This function generates the HSC parms that fdisk uses.
3482 * We want to make sure we return something that places partitions
3483 * on 4k boundaries for best performance with the IOA.
3484 *
3485 * Return value:
3486 * 0 on success
3487 **/
3488static int ipr_biosparam(struct scsi_device *sdev,
3489 struct block_device *block_device,
3490 sector_t capacity, int *parm)
3491{
3492 int heads, sectors;
3493 sector_t cylinders;
3494
3495 heads = 128;
3496 sectors = 32;
3497
3498 cylinders = capacity;
3499 sector_div(cylinders, (128 * 32));
3500
3501 /* return result */
3502 parm[0] = heads;
3503 parm[1] = sectors;
3504 parm[2] = cylinders;
3505
3506 return 0;
3507}
3508
35a39691
BK
3509/**
3510 * ipr_find_starget - Find target based on bus/target.
3511 * @starget: scsi target struct
3512 *
3513 * Return value:
3514 * resource entry pointer if found / NULL if not found
3515 **/
3516static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3517{
3518 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3519 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3520 struct ipr_resource_entry *res;
3521
3522 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3523 if ((res->cfgte.res_addr.bus == starget->channel) &&
3524 (res->cfgte.res_addr.target == starget->id) &&
3525 (res->cfgte.res_addr.lun == 0)) {
3526 return res;
3527 }
3528 }
3529
3530 return NULL;
3531}
3532
3533static struct ata_port_info sata_port_info;
3534
3535/**
3536 * ipr_target_alloc - Prepare for commands to a SCSI target
3537 * @starget: scsi target struct
3538 *
3539 * If the device is a SATA device, this function allocates an
3540 * ATA port with libata, else it does nothing.
3541 *
3542 * Return value:
3543 * 0 on success / non-0 on failure
3544 **/
3545static int ipr_target_alloc(struct scsi_target *starget)
3546{
3547 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3548 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3549 struct ipr_sata_port *sata_port;
3550 struct ata_port *ap;
3551 struct ipr_resource_entry *res;
3552 unsigned long lock_flags;
3553
3554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3555 res = ipr_find_starget(starget);
3556 starget->hostdata = NULL;
3557
3558 if (res && ipr_is_gata(res)) {
3559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3561 if (!sata_port)
3562 return -ENOMEM;
3563
3564 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3565 if (ap) {
3566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567 sata_port->ioa_cfg = ioa_cfg;
3568 sata_port->ap = ap;
3569 sata_port->res = res;
3570
3571 res->sata_port = sata_port;
3572 ap->private_data = sata_port;
3573 starget->hostdata = sata_port;
3574 } else {
3575 kfree(sata_port);
3576 return -ENOMEM;
3577 }
3578 }
3579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580
3581 return 0;
3582}
3583
3584/**
3585 * ipr_target_destroy - Destroy a SCSI target
3586 * @starget: scsi target struct
3587 *
3588 * If the device was a SATA device, this function frees the libata
3589 * ATA port, else it does nothing.
3590 *
3591 **/
3592static void ipr_target_destroy(struct scsi_target *starget)
3593{
3594 struct ipr_sata_port *sata_port = starget->hostdata;
3595
3596 if (sata_port) {
3597 starget->hostdata = NULL;
3598 ata_sas_port_destroy(sata_port->ap);
3599 kfree(sata_port);
3600 }
3601}
3602
3603/**
3604 * ipr_find_sdev - Find device based on bus/target/lun.
3605 * @sdev: scsi device struct
3606 *
3607 * Return value:
3608 * resource entry pointer if found / NULL if not found
3609 **/
3610static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3611{
3612 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3613 struct ipr_resource_entry *res;
3614
3615 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3616 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3617 (res->cfgte.res_addr.target == sdev->id) &&
3618 (res->cfgte.res_addr.lun == sdev->lun))
3619 return res;
3620 }
3621
3622 return NULL;
3623}
3624
1da177e4
LT
3625/**
3626 * ipr_slave_destroy - Unconfigure a SCSI device
3627 * @sdev: scsi device struct
3628 *
3629 * Return value:
3630 * nothing
3631 **/
3632static void ipr_slave_destroy(struct scsi_device *sdev)
3633{
3634 struct ipr_resource_entry *res;
3635 struct ipr_ioa_cfg *ioa_cfg;
3636 unsigned long lock_flags = 0;
3637
3638 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3639
3640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3641 res = (struct ipr_resource_entry *) sdev->hostdata;
3642 if (res) {
35a39691
BK
3643 if (res->sata_port)
3644 ata_port_disable(res->sata_port->ap);
1da177e4
LT
3645 sdev->hostdata = NULL;
3646 res->sdev = NULL;
35a39691 3647 res->sata_port = NULL;
1da177e4
LT
3648 }
3649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3650}
3651
3652/**
3653 * ipr_slave_configure - Configure a SCSI device
3654 * @sdev: scsi device struct
3655 *
3656 * This function configures the specified scsi device.
3657 *
3658 * Return value:
3659 * 0 on success
3660 **/
3661static int ipr_slave_configure(struct scsi_device *sdev)
3662{
3663 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3664 struct ipr_resource_entry *res;
3665 unsigned long lock_flags = 0;
3666
3667 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3668 res = sdev->hostdata;
3669 if (res) {
3670 if (ipr_is_af_dasd_device(res))
3671 sdev->type = TYPE_RAID;
0726ce26 3672 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3673 sdev->scsi_level = 4;
0726ce26
BK
3674 sdev->no_uld_attach = 1;
3675 }
1da177e4
LT
3676 if (ipr_is_vset_device(res)) {
3677 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3678 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3679 }
e4fbf44e 3680 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 3681 sdev->allow_restart = 1;
35a39691
BK
3682 if (ipr_is_gata(res) && res->sata_port) {
3683 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3684 ata_sas_slave_configure(sdev, res->sata_port->ap);
3685 } else {
3686 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3687 }
1da177e4
LT
3688 }
3689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690 return 0;
3691}
3692
35a39691
BK
3693/**
3694 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3695 * @sdev: scsi device struct
3696 *
3697 * This function initializes an ATA port so that future commands
3698 * sent through queuecommand will work.
3699 *
3700 * Return value:
3701 * 0 on success
3702 **/
3703static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3704{
3705 struct ipr_sata_port *sata_port = NULL;
3706 int rc = -ENXIO;
3707
3708 ENTER;
3709 if (sdev->sdev_target)
3710 sata_port = sdev->sdev_target->hostdata;
3711 if (sata_port)
3712 rc = ata_sas_port_init(sata_port->ap);
3713 if (rc)
3714 ipr_slave_destroy(sdev);
3715
3716 LEAVE;
3717 return rc;
3718}
3719
1da177e4
LT
3720/**
3721 * ipr_slave_alloc - Prepare for commands to a device.
3722 * @sdev: scsi device struct
3723 *
3724 * This function saves a pointer to the resource entry
3725 * in the scsi device struct if the device exists. We
3726 * can then use this pointer in ipr_queuecommand when
3727 * handling new commands.
3728 *
3729 * Return value:
692aebfc 3730 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3731 **/
3732static int ipr_slave_alloc(struct scsi_device *sdev)
3733{
3734 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3735 struct ipr_resource_entry *res;
3736 unsigned long lock_flags;
692aebfc 3737 int rc = -ENXIO;
1da177e4
LT
3738
3739 sdev->hostdata = NULL;
3740
3741 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3742
35a39691
BK
3743 res = ipr_find_sdev(sdev);
3744 if (res) {
3745 res->sdev = sdev;
3746 res->add_to_ml = 0;
3747 res->in_erp = 0;
3748 sdev->hostdata = res;
3749 if (!ipr_is_naca_model(res))
3750 res->needs_sync_complete = 1;
3751 rc = 0;
3752 if (ipr_is_gata(res)) {
3753 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3754 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
3755 }
3756 }
3757
3758 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3759
692aebfc 3760 return rc;
1da177e4
LT
3761}
3762
3763/**
3764 * ipr_eh_host_reset - Reset the host adapter
3765 * @scsi_cmd: scsi command struct
3766 *
3767 * Return value:
3768 * SUCCESS / FAILED
3769 **/
df0ae249 3770static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3771{
3772 struct ipr_ioa_cfg *ioa_cfg;
3773 int rc;
3774
3775 ENTER;
3776 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3777
3778 dev_err(&ioa_cfg->pdev->dev,
3779 "Adapter being reset as a result of error recovery.\n");
3780
3781 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3782 ioa_cfg->sdt_state = GET_DUMP;
3783
3784 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3785
3786 LEAVE;
3787 return rc;
3788}
3789
df0ae249
JG
3790static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3791{
3792 int rc;
3793
3794 spin_lock_irq(cmd->device->host->host_lock);
3795 rc = __ipr_eh_host_reset(cmd);
3796 spin_unlock_irq(cmd->device->host->host_lock);
3797
3798 return rc;
3799}
3800
c6513096
BK
3801/**
3802 * ipr_device_reset - Reset the device
3803 * @ioa_cfg: ioa config struct
3804 * @res: resource entry struct
3805 *
3806 * This function issues a device reset to the affected device.
3807 * If the device is a SCSI device, a LUN reset will be sent
3808 * to the device first. If that does not work, a target reset
35a39691
BK
3809 * will be sent. If the device is a SATA device, a PHY reset will
3810 * be sent.
c6513096
BK
3811 *
3812 * Return value:
3813 * 0 on success / non-zero on failure
3814 **/
3815static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3816 struct ipr_resource_entry *res)
3817{
3818 struct ipr_cmnd *ipr_cmd;
3819 struct ipr_ioarcb *ioarcb;
3820 struct ipr_cmd_pkt *cmd_pkt;
35a39691 3821 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
3822 u32 ioasc;
3823
3824 ENTER;
3825 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3826 ioarcb = &ipr_cmd->ioarcb;
3827 cmd_pkt = &ioarcb->cmd_pkt;
35a39691 3828 regs = &ioarcb->add_data.u.regs;
c6513096
BK
3829
3830 ioarcb->res_handle = res->cfgte.res_handle;
3831 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3832 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
3833 if (ipr_is_gata(res)) {
3834 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3835 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3836 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3837 }
c6513096
BK
3838
3839 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3840 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3841 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
3842 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3843 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3844 sizeof(struct ipr_ioasa_gata));
c6513096
BK
3845
3846 LEAVE;
3847 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3848}
3849
35a39691
BK
3850/**
3851 * ipr_sata_reset - Reset the SATA port
3852 * @ap: SATA port to reset
3853 * @classes: class of the attached device
3854 *
3855 * This function issues a SATA phy reset to the affected ATA port.
3856 *
3857 * Return value:
3858 * 0 on success / non-zero on failure
3859 **/
120bda35
AM
3860static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3861 unsigned long deadline)
35a39691
BK
3862{
3863 struct ipr_sata_port *sata_port = ap->private_data;
3864 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3865 struct ipr_resource_entry *res;
3866 unsigned long lock_flags = 0;
3867 int rc = -ENXIO;
3868
3869 ENTER;
3870 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
3871 while(ioa_cfg->in_reset_reload) {
3872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3873 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3874 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3875 }
3876
35a39691
BK
3877 res = sata_port->res;
3878 if (res) {
3879 rc = ipr_device_reset(ioa_cfg, res);
3880 switch(res->cfgte.proto) {
3881 case IPR_PROTO_SATA:
3882 case IPR_PROTO_SAS_STP:
3883 *classes = ATA_DEV_ATA;
3884 break;
3885 case IPR_PROTO_SATA_ATAPI:
3886 case IPR_PROTO_SAS_STP_ATAPI:
3887 *classes = ATA_DEV_ATAPI;
3888 break;
3889 default:
3890 *classes = ATA_DEV_UNKNOWN;
3891 break;
3892 };
3893 }
3894
3895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3896 LEAVE;
3897 return rc;
3898}
3899
1da177e4
LT
3900/**
3901 * ipr_eh_dev_reset - Reset the device
3902 * @scsi_cmd: scsi command struct
3903 *
3904 * This function issues a device reset to the affected device.
3905 * A LUN reset will be sent to the device first. If that does
3906 * not work, a target reset will be sent.
3907 *
3908 * Return value:
3909 * SUCCESS / FAILED
3910 **/
94d0e7b8 3911static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3912{
3913 struct ipr_cmnd *ipr_cmd;
3914 struct ipr_ioa_cfg *ioa_cfg;
3915 struct ipr_resource_entry *res;
35a39691
BK
3916 struct ata_port *ap;
3917 int rc = 0;
1da177e4
LT
3918
3919 ENTER;
3920 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3921 res = scsi_cmd->device->hostdata;
3922
eeb88307 3923 if (!res)
1da177e4
LT
3924 return FAILED;
3925
3926 /*
3927 * If we are currently going through reset/reload, return failed. This will force the
3928 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3929 * reset to complete
3930 */
3931 if (ioa_cfg->in_reset_reload)
3932 return FAILED;
3933 if (ioa_cfg->ioa_is_dead)
3934 return FAILED;
3935
3936 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3937 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3938 if (ipr_cmd->scsi_cmd)
3939 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
3940 if (ipr_cmd->qc)
3941 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
3942 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3943 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3944 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3945 }
1da177e4
LT
3946 }
3947 }
3948
3949 res->resetting_device = 1;
fb3ed3cb 3950 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
3951
3952 if (ipr_is_gata(res) && res->sata_port) {
3953 ap = res->sata_port->ap;
3954 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3955 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3956 spin_lock_irq(scsi_cmd->device->host->host_lock);
3957 } else
3958 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3959 res->resetting_device = 0;
3960
1da177e4 3961 LEAVE;
c6513096 3962 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3963}
3964
94d0e7b8
JG
3965static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3966{
3967 int rc;
3968
3969 spin_lock_irq(cmd->device->host->host_lock);
3970 rc = __ipr_eh_dev_reset(cmd);
3971 spin_unlock_irq(cmd->device->host->host_lock);
3972
3973 return rc;
3974}
3975
1da177e4
LT
3976/**
3977 * ipr_bus_reset_done - Op done function for bus reset.
3978 * @ipr_cmd: ipr command struct
3979 *
3980 * This function is the op done function for a bus reset
3981 *
3982 * Return value:
3983 * none
3984 **/
3985static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3986{
3987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3988 struct ipr_resource_entry *res;
3989
3990 ENTER;
3991 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3992 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3993 sizeof(res->cfgte.res_handle))) {
3994 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3995 break;
3996 }
3997 }
3998
3999 /*
4000 * If abort has not completed, indicate the reset has, else call the
4001 * abort's done function to wake the sleeping eh thread
4002 */
4003 if (ipr_cmd->sibling->sibling)
4004 ipr_cmd->sibling->sibling = NULL;
4005 else
4006 ipr_cmd->sibling->done(ipr_cmd->sibling);
4007
4008 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4009 LEAVE;
4010}
4011
4012/**
4013 * ipr_abort_timeout - An abort task has timed out
4014 * @ipr_cmd: ipr command struct
4015 *
4016 * This function handles when an abort task times out. If this
4017 * happens we issue a bus reset since we have resources tied
4018 * up that must be freed before returning to the midlayer.
4019 *
4020 * Return value:
4021 * none
4022 **/
4023static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4024{
4025 struct ipr_cmnd *reset_cmd;
4026 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4027 struct ipr_cmd_pkt *cmd_pkt;
4028 unsigned long lock_flags = 0;
4029
4030 ENTER;
4031 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4032 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034 return;
4035 }
4036
fb3ed3cb 4037 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4038 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4039 ipr_cmd->sibling = reset_cmd;
4040 reset_cmd->sibling = ipr_cmd;
4041 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4042 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4043 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4044 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4045 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4046
4047 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4049 LEAVE;
4050}
4051
4052/**
4053 * ipr_cancel_op - Cancel specified op
4054 * @scsi_cmd: scsi command struct
4055 *
4056 * This function cancels specified op.
4057 *
4058 * Return value:
4059 * SUCCESS / FAILED
4060 **/
4061static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4062{
4063 struct ipr_cmnd *ipr_cmd;
4064 struct ipr_ioa_cfg *ioa_cfg;
4065 struct ipr_resource_entry *res;
4066 struct ipr_cmd_pkt *cmd_pkt;
4067 u32 ioasc;
4068 int op_found = 0;
4069
4070 ENTER;
4071 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4072 res = scsi_cmd->device->hostdata;
4073
8fa728a2
JG
4074 /* If we are currently going through reset/reload, return failed.
4075 * This will force the mid-layer to call ipr_eh_host_reset,
4076 * which will then go to sleep and wait for the reset to complete
4077 */
4078 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4079 return FAILED;
04d9768f 4080 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4081 return FAILED;
4082
4083 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4084 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4085 ipr_cmd->done = ipr_scsi_eh_done;
4086 op_found = 1;
4087 break;
4088 }
4089 }
4090
4091 if (!op_found)
4092 return SUCCESS;
4093
4094 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4095 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4096 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4097 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4098 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4099 ipr_cmd->u.sdev = scsi_cmd->device;
4100
fb3ed3cb
BK
4101 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4102 scsi_cmd->cmnd[0]);
1da177e4
LT
4103 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4104 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4105
4106 /*
4107 * If the abort task timed out and we sent a bus reset, we will get
4108 * one the following responses to the abort
4109 */
4110 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4111 ioasc = 0;
4112 ipr_trace;
4113 }
4114
4115 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4116 if (!ipr_is_naca_model(res))
4117 res->needs_sync_complete = 1;
1da177e4
LT
4118
4119 LEAVE;
4120 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4121}
4122
4123/**
4124 * ipr_eh_abort - Abort a single op
4125 * @scsi_cmd: scsi command struct
4126 *
4127 * Return value:
4128 * SUCCESS / FAILED
4129 **/
4130static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4131{
8fa728a2
JG
4132 unsigned long flags;
4133 int rc;
1da177e4
LT
4134
4135 ENTER;
1da177e4 4136
8fa728a2
JG
4137 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4138 rc = ipr_cancel_op(scsi_cmd);
4139 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4140
4141 LEAVE;
8fa728a2 4142 return rc;
1da177e4
LT
4143}
4144
4145/**
4146 * ipr_handle_other_interrupt - Handle "other" interrupts
4147 * @ioa_cfg: ioa config struct
4148 * @int_reg: interrupt register
4149 *
4150 * Return value:
4151 * IRQ_NONE / IRQ_HANDLED
4152 **/
4153static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4154 volatile u32 int_reg)
4155{
4156 irqreturn_t rc = IRQ_HANDLED;
4157
4158 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4159 /* Mask the interrupt */
4160 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4161
4162 /* Clear the interrupt */
4163 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4164 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4165
4166 list_del(&ioa_cfg->reset_cmd->queue);
4167 del_timer(&ioa_cfg->reset_cmd->timer);
4168 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4169 } else {
4170 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4171 ioa_cfg->ioa_unit_checked = 1;
4172 else
4173 dev_err(&ioa_cfg->pdev->dev,
4174 "Permanent IOA failure. 0x%08X\n", int_reg);
4175
4176 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4177 ioa_cfg->sdt_state = GET_DUMP;
4178
4179 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4180 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4181 }
4182
4183 return rc;
4184}
4185
4186/**
4187 * ipr_isr - Interrupt service routine
4188 * @irq: irq number
4189 * @devp: pointer to ioa config struct
1da177e4
LT
4190 *
4191 * Return value:
4192 * IRQ_NONE / IRQ_HANDLED
4193 **/
7d12e780 4194static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4195{
4196 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4197 unsigned long lock_flags = 0;
4198 volatile u32 int_reg, int_mask_reg;
4199 u32 ioasc;
4200 u16 cmd_index;
4201 struct ipr_cmnd *ipr_cmd;
4202 irqreturn_t rc = IRQ_NONE;
4203
4204 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4205
4206 /* If interrupts are disabled, ignore the interrupt */
4207 if (!ioa_cfg->allow_interrupts) {
4208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4209 return IRQ_NONE;
4210 }
4211
4212 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4213 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4214
4215 /* If an interrupt on the adapter did not occur, ignore it */
4216 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4217 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4218 return IRQ_NONE;
4219 }
4220
4221 while (1) {
4222 ipr_cmd = NULL;
4223
4224 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4225 ioa_cfg->toggle_bit) {
4226
4227 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4228 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4229
4230 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4231 ioa_cfg->errors_logged++;
4232 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4233
4234 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4235 ioa_cfg->sdt_state = GET_DUMP;
4236
4237 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4239 return IRQ_HANDLED;
4240 }
4241
4242 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4243
4244 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4245
4246 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4247
4248 list_del(&ipr_cmd->queue);
4249 del_timer(&ipr_cmd->timer);
4250 ipr_cmd->done(ipr_cmd);
4251
4252 rc = IRQ_HANDLED;
4253
4254 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4255 ioa_cfg->hrrq_curr++;
4256 } else {
4257 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4258 ioa_cfg->toggle_bit ^= 1u;
4259 }
4260 }
4261
4262 if (ipr_cmd != NULL) {
4263 /* Clear the PCI interrupt */
4264 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4265 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4266 } else
4267 break;
4268 }
4269
4270 if (unlikely(rc == IRQ_NONE))
4271 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4272
4273 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4274 return rc;
4275}
4276
4277/**
4278 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4279 * @ioa_cfg: ioa config struct
4280 * @ipr_cmd: ipr command struct
4281 *
4282 * Return value:
4283 * 0 on success / -1 on failure
4284 **/
4285static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4286 struct ipr_cmnd *ipr_cmd)
4287{
4288 int i;
4289 struct scatterlist *sglist;
4290 u32 length;
4291 u32 ioadl_flags = 0;
4292 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4293 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4294 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4295
4296 length = scsi_cmd->request_bufflen;
4297
4298 if (length == 0)
4299 return 0;
4300
4301 if (scsi_cmd->use_sg) {
4302 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4303 scsi_cmd->request_buffer,
4304 scsi_cmd->use_sg,
4305 scsi_cmd->sc_data_direction);
4306
4307 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4308 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4309 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4310 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4311 ioarcb->write_ioadl_len =
4312 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4313 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4314 ioadl_flags = IPR_IOADL_FLAGS_READ;
4315 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4316 ioarcb->read_ioadl_len =
4317 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4318 }
4319
4320 sglist = scsi_cmd->request_buffer;
4321
51b1c7e1
BK
4322 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4323 ioadl = ioarcb->add_data.u.ioadl;
4324 ioarcb->write_ioadl_addr =
4325 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4326 offsetof(struct ipr_ioarcb, add_data));
4327 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4328 }
4329
1da177e4
LT
4330 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4331 ioadl[i].flags_and_data_len =
4332 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4333 ioadl[i].address =
4334 cpu_to_be32(sg_dma_address(&sglist[i]));
4335 }
4336
4337 if (likely(ipr_cmd->dma_use_sg)) {
4338 ioadl[i-1].flags_and_data_len |=
4339 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4340 return 0;
4341 } else
4342 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4343 } else {
4344 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4345 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4346 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4347 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4348 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4349 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4350 ioadl_flags = IPR_IOADL_FLAGS_READ;
4351 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4352 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4353 }
4354
4355 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4356 scsi_cmd->request_buffer, length,
4357 scsi_cmd->sc_data_direction);
4358
4359 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
51b1c7e1
BK
4360 ioadl = ioarcb->add_data.u.ioadl;
4361 ioarcb->write_ioadl_addr =
4362 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4363 offsetof(struct ipr_ioarcb, add_data));
4364 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
4365 ipr_cmd->dma_use_sg = 1;
4366 ioadl[0].flags_and_data_len =
4367 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4368 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4369 return 0;
4370 } else
4371 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4372 }
4373
4374 return -1;
4375}
4376
4377/**
4378 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4379 * @scsi_cmd: scsi command struct
4380 *
4381 * Return value:
4382 * task attributes
4383 **/
4384static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4385{
4386 u8 tag[2];
4387 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4388
4389 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4390 switch (tag[0]) {
4391 case MSG_SIMPLE_TAG:
4392 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4393 break;
4394 case MSG_HEAD_TAG:
4395 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4396 break;
4397 case MSG_ORDERED_TAG:
4398 rc = IPR_FLAGS_LO_ORDERED_TASK;
4399 break;
4400 };
4401 }
4402
4403 return rc;
4404}
4405
4406/**
4407 * ipr_erp_done - Process completion of ERP for a device
4408 * @ipr_cmd: ipr command struct
4409 *
4410 * This function copies the sense buffer into the scsi_cmd
4411 * struct and pushes the scsi_done function.
4412 *
4413 * Return value:
4414 * nothing
4415 **/
4416static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4417{
4418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4421 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4422
4423 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4424 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
4425 scmd_printk(KERN_ERR, scsi_cmd,
4426 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
4427 } else {
4428 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4429 SCSI_SENSE_BUFFERSIZE);
4430 }
4431
4432 if (res) {
ee0a90fa
BK
4433 if (!ipr_is_naca_model(res))
4434 res->needs_sync_complete = 1;
1da177e4
LT
4435 res->in_erp = 0;
4436 }
4437 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4438 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4439 scsi_cmd->scsi_done(scsi_cmd);
4440}
4441
4442/**
4443 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4444 * @ipr_cmd: ipr command struct
4445 *
4446 * Return value:
4447 * none
4448 **/
4449static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4450{
51b1c7e1
BK
4451 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4452 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4453 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
4454
4455 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4456 ioarcb->write_data_transfer_length = 0;
4457 ioarcb->read_data_transfer_length = 0;
4458 ioarcb->write_ioadl_len = 0;
4459 ioarcb->read_ioadl_len = 0;
4460 ioasa->ioasc = 0;
4461 ioasa->residual_data_len = 0;
51b1c7e1
BK
4462 ioarcb->write_ioadl_addr =
4463 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4464 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
4465}
4466
4467/**
4468 * ipr_erp_request_sense - Send request sense to a device
4469 * @ipr_cmd: ipr command struct
4470 *
4471 * This function sends a request sense to a device as a result
4472 * of a check condition.
4473 *
4474 * Return value:
4475 * nothing
4476 **/
4477static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4478{
4479 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4480 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4481
4482 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4483 ipr_erp_done(ipr_cmd);
4484 return;
4485 }
4486
4487 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4488
4489 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4490 cmd_pkt->cdb[0] = REQUEST_SENSE;
4491 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4492 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4493 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4494 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4495
4496 ipr_cmd->ioadl[0].flags_and_data_len =
4497 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4498 ipr_cmd->ioadl[0].address =
4499 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4500
4501 ipr_cmd->ioarcb.read_ioadl_len =
4502 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4503 ipr_cmd->ioarcb.read_data_transfer_length =
4504 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4505
4506 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4507 IPR_REQUEST_SENSE_TIMEOUT * 2);
4508}
4509
4510/**
4511 * ipr_erp_cancel_all - Send cancel all to a device
4512 * @ipr_cmd: ipr command struct
4513 *
4514 * This function sends a cancel all to a device to clear the
4515 * queue. If we are running TCQ on the device, QERR is set to 1,
4516 * which means all outstanding ops have been dropped on the floor.
4517 * Cancel all will return them to us.
4518 *
4519 * Return value:
4520 * nothing
4521 **/
4522static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4523{
4524 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4525 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4526 struct ipr_cmd_pkt *cmd_pkt;
4527
4528 res->in_erp = 1;
4529
4530 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4531
4532 if (!scsi_get_tag_type(scsi_cmd->device)) {
4533 ipr_erp_request_sense(ipr_cmd);
4534 return;
4535 }
4536
4537 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4538 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4539 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4540
4541 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4542 IPR_CANCEL_ALL_TIMEOUT);
4543}
4544
4545/**
4546 * ipr_dump_ioasa - Dump contents of IOASA
4547 * @ioa_cfg: ioa config struct
4548 * @ipr_cmd: ipr command struct
fe964d0a 4549 * @res: resource entry struct
1da177e4
LT
4550 *
4551 * This function is invoked by the interrupt handler when ops
4552 * fail. It will log the IOASA if appropriate. Only called
4553 * for GPDD ops.
4554 *
4555 * Return value:
4556 * none
4557 **/
4558static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 4559 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
4560{
4561 int i;
4562 u16 data_len;
b0692dd4 4563 u32 ioasc, fd_ioasc;
1da177e4
LT
4564 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4565 __be32 *ioasa_data = (__be32 *)ioasa;
4566 int error_index;
4567
4568 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 4569 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
4570
4571 if (0 == ioasc)
4572 return;
4573
4574 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4575 return;
4576
b0692dd4
BK
4577 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4578 error_index = ipr_get_error(fd_ioasc);
4579 else
4580 error_index = ipr_get_error(ioasc);
1da177e4
LT
4581
4582 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4583 /* Don't log an error if the IOA already logged one */
4584 if (ioasa->ilid != 0)
4585 return;
4586
cc9bd5d4
BK
4587 if (!ipr_is_gscsi(res))
4588 return;
4589
1da177e4
LT
4590 if (ipr_error_table[error_index].log_ioasa == 0)
4591 return;
4592 }
4593
fe964d0a 4594 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
4595
4596 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4597 data_len = sizeof(struct ipr_ioasa);
4598 else
4599 data_len = be16_to_cpu(ioasa->ret_stat_len);
4600
4601 ipr_err("IOASA Dump:\n");
4602
4603 for (i = 0; i < data_len / 4; i += 4) {
4604 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4605 be32_to_cpu(ioasa_data[i]),
4606 be32_to_cpu(ioasa_data[i+1]),
4607 be32_to_cpu(ioasa_data[i+2]),
4608 be32_to_cpu(ioasa_data[i+3]));
4609 }
4610}
4611
4612/**
4613 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4614 * @ioasa: IOASA
4615 * @sense_buf: sense data buffer
4616 *
4617 * Return value:
4618 * none
4619 **/
4620static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4621{
4622 u32 failing_lba;
4623 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4624 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4625 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4626 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4627
4628 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4629
4630 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4631 return;
4632
4633 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4634
4635 if (ipr_is_vset_device(res) &&
4636 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4637 ioasa->u.vset.failing_lba_hi != 0) {
4638 sense_buf[0] = 0x72;
4639 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4640 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4641 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4642
4643 sense_buf[7] = 12;
4644 sense_buf[8] = 0;
4645 sense_buf[9] = 0x0A;
4646 sense_buf[10] = 0x80;
4647
4648 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4649
4650 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4651 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4652 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4653 sense_buf[15] = failing_lba & 0x000000ff;
4654
4655 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4656
4657 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4658 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4659 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4660 sense_buf[19] = failing_lba & 0x000000ff;
4661 } else {
4662 sense_buf[0] = 0x70;
4663 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4664 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4665 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4666
4667 /* Illegal request */
4668 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4669 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4670 sense_buf[7] = 10; /* additional length */
4671
4672 /* IOARCB was in error */
4673 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4674 sense_buf[15] = 0xC0;
4675 else /* Parameter data was invalid */
4676 sense_buf[15] = 0x80;
4677
4678 sense_buf[16] =
4679 ((IPR_FIELD_POINTER_MASK &
4680 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4681 sense_buf[17] =
4682 (IPR_FIELD_POINTER_MASK &
4683 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4684 } else {
4685 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4686 if (ipr_is_vset_device(res))
4687 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4688 else
4689 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4690
4691 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4692 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4693 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4694 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4695 sense_buf[6] = failing_lba & 0x000000ff;
4696 }
4697
4698 sense_buf[7] = 6; /* additional length */
4699 }
4700 }
4701}
4702
ee0a90fa
BK
4703/**
4704 * ipr_get_autosense - Copy autosense data to sense buffer
4705 * @ipr_cmd: ipr command struct
4706 *
4707 * This function copies the autosense buffer to the buffer
4708 * in the scsi_cmd, if there is autosense available.
4709 *
4710 * Return value:
4711 * 1 if autosense was available / 0 if not
4712 **/
4713static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4714{
4715 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4716
117d2ce1 4717 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
4718 return 0;
4719
4720 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4721 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4722 SCSI_SENSE_BUFFERSIZE));
4723 return 1;
4724}
4725
1da177e4
LT
4726/**
4727 * ipr_erp_start - Process an error response for a SCSI op
4728 * @ioa_cfg: ioa config struct
4729 * @ipr_cmd: ipr command struct
4730 *
4731 * This function determines whether or not to initiate ERP
4732 * on the affected device.
4733 *
4734 * Return value:
4735 * nothing
4736 **/
4737static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4738 struct ipr_cmnd *ipr_cmd)
4739{
4740 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4741 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4742 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
8a048994 4743 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
4744
4745 if (!res) {
4746 ipr_scsi_eh_done(ipr_cmd);
4747 return;
4748 }
4749
8a048994 4750 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
4751 ipr_gen_sense(ipr_cmd);
4752
cc9bd5d4
BK
4753 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4754
8a048994 4755 switch (masked_ioasc) {
1da177e4 4756 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
4757 if (ipr_is_naca_model(res))
4758 scsi_cmd->result |= (DID_ABORT << 16);
4759 else
4760 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4761 break;
4762 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4763 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4764 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4765 break;
4766 case IPR_IOASC_HW_SEL_TIMEOUT:
4767 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
4768 if (!ipr_is_naca_model(res))
4769 res->needs_sync_complete = 1;
1da177e4
LT
4770 break;
4771 case IPR_IOASC_SYNC_REQUIRED:
4772 if (!res->in_erp)
4773 res->needs_sync_complete = 1;
4774 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4775 break;
4776 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4777 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4778 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4779 break;
4780 case IPR_IOASC_BUS_WAS_RESET:
4781 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4782 /*
4783 * Report the bus reset and ask for a retry. The device
4784 * will give CC/UA the next command.
4785 */
4786 if (!res->resetting_device)
4787 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4788 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
4789 if (!ipr_is_naca_model(res))
4790 res->needs_sync_complete = 1;
1da177e4
LT
4791 break;
4792 case IPR_IOASC_HW_DEV_BUS_STATUS:
4793 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4794 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
4795 if (!ipr_get_autosense(ipr_cmd)) {
4796 if (!ipr_is_naca_model(res)) {
4797 ipr_erp_cancel_all(ipr_cmd);
4798 return;
4799 }
4800 }
1da177e4 4801 }
ee0a90fa
BK
4802 if (!ipr_is_naca_model(res))
4803 res->needs_sync_complete = 1;
1da177e4
LT
4804 break;
4805 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4806 break;
4807 default:
5b7304fb
BK
4808 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4809 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4810 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4811 res->needs_sync_complete = 1;
4812 break;
4813 }
4814
4815 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4816 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4817 scsi_cmd->scsi_done(scsi_cmd);
4818}
4819
4820/**
4821 * ipr_scsi_done - mid-layer done function
4822 * @ipr_cmd: ipr command struct
4823 *
4824 * This function is invoked by the interrupt handler for
4825 * ops generated by the SCSI mid-layer
4826 *
4827 * Return value:
4828 * none
4829 **/
4830static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4831{
4832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4833 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4834 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4835
4836 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4837
4838 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4839 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4840 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4841 scsi_cmd->scsi_done(scsi_cmd);
4842 } else
4843 ipr_erp_start(ioa_cfg, ipr_cmd);
4844}
4845
1da177e4
LT
4846/**
4847 * ipr_queuecommand - Queue a mid-layer request
4848 * @scsi_cmd: scsi command struct
4849 * @done: done function
4850 *
4851 * This function queues a request generated by the mid-layer.
4852 *
4853 * Return value:
4854 * 0 on success
4855 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4856 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4857 **/
4858static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4859 void (*done) (struct scsi_cmnd *))
4860{
4861 struct ipr_ioa_cfg *ioa_cfg;
4862 struct ipr_resource_entry *res;
4863 struct ipr_ioarcb *ioarcb;
4864 struct ipr_cmnd *ipr_cmd;
4865 int rc = 0;
4866
4867 scsi_cmd->scsi_done = done;
4868 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4869 res = scsi_cmd->device->hostdata;
4870 scsi_cmd->result = (DID_OK << 16);
4871
4872 /*
4873 * We are currently blocking all devices due to a host reset
4874 * We have told the host to stop giving us new requests, but
4875 * ERP ops don't count. FIXME
4876 */
4877 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4878 return SCSI_MLQUEUE_HOST_BUSY;
4879
4880 /*
4881 * FIXME - Create scsi_set_host_offline interface
4882 * and the ioa_is_dead check can be removed
4883 */
4884 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4885 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4886 scsi_cmd->result = (DID_NO_CONNECT << 16);
4887 scsi_cmd->scsi_done(scsi_cmd);
4888 return 0;
4889 }
4890
35a39691
BK
4891 if (ipr_is_gata(res) && res->sata_port)
4892 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4893
1da177e4
LT
4894 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4895 ioarcb = &ipr_cmd->ioarcb;
4896 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4897
4898 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4899 ipr_cmd->scsi_cmd = scsi_cmd;
4900 ioarcb->res_handle = res->cfgte.res_handle;
4901 ipr_cmd->done = ipr_scsi_done;
4902 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4903
4904 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4905 if (scsi_cmd->underflow == 0)
4906 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4907
4908 if (res->needs_sync_complete) {
4909 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4910 res->needs_sync_complete = 0;
4911 }
4912
4913 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4914 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4915 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4916 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4917 }
4918
4919 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4920 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4921 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4922
1da177e4
LT
4923 if (likely(rc == 0))
4924 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4925
4926 if (likely(rc == 0)) {
4927 mb();
4928 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4929 ioa_cfg->regs.ioarrin_reg);
4930 } else {
4931 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4932 return SCSI_MLQUEUE_HOST_BUSY;
4933 }
4934
4935 return 0;
4936}
4937
35a39691
BK
4938/**
4939 * ipr_ioctl - IOCTL handler
4940 * @sdev: scsi device struct
4941 * @cmd: IOCTL cmd
4942 * @arg: IOCTL arg
4943 *
4944 * Return value:
4945 * 0 on success / other on failure
4946 **/
bd705f2d 4947static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
4948{
4949 struct ipr_resource_entry *res;
4950
4951 res = (struct ipr_resource_entry *)sdev->hostdata;
4952 if (res && ipr_is_gata(res))
4953 return ata_scsi_ioctl(sdev, cmd, arg);
4954
4955 return -EINVAL;
4956}
4957
1da177e4
LT
4958/**
4959 * ipr_info - Get information about the card/driver
4960 * @scsi_host: scsi host struct
4961 *
4962 * Return value:
4963 * pointer to buffer with description string
4964 **/
4965static const char * ipr_ioa_info(struct Scsi_Host *host)
4966{
4967 static char buffer[512];
4968 struct ipr_ioa_cfg *ioa_cfg;
4969 unsigned long lock_flags = 0;
4970
4971 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4972
4973 spin_lock_irqsave(host->host_lock, lock_flags);
4974 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4975 spin_unlock_irqrestore(host->host_lock, lock_flags);
4976
4977 return buffer;
4978}
4979
4980static struct scsi_host_template driver_template = {
4981 .module = THIS_MODULE,
4982 .name = "IPR",
4983 .info = ipr_ioa_info,
35a39691 4984 .ioctl = ipr_ioctl,
1da177e4
LT
4985 .queuecommand = ipr_queuecommand,
4986 .eh_abort_handler = ipr_eh_abort,
4987 .eh_device_reset_handler = ipr_eh_dev_reset,
4988 .eh_host_reset_handler = ipr_eh_host_reset,
4989 .slave_alloc = ipr_slave_alloc,
4990 .slave_configure = ipr_slave_configure,
4991 .slave_destroy = ipr_slave_destroy,
35a39691
BK
4992 .target_alloc = ipr_target_alloc,
4993 .target_destroy = ipr_target_destroy,
1da177e4
LT
4994 .change_queue_depth = ipr_change_queue_depth,
4995 .change_queue_type = ipr_change_queue_type,
4996 .bios_param = ipr_biosparam,
4997 .can_queue = IPR_MAX_COMMANDS,
4998 .this_id = -1,
4999 .sg_tablesize = IPR_MAX_SGLIST,
5000 .max_sectors = IPR_IOA_MAX_SECTORS,
5001 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5002 .use_clustering = ENABLE_CLUSTERING,
5003 .shost_attrs = ipr_ioa_attrs,
5004 .sdev_attrs = ipr_dev_attrs,
5005 .proc_name = IPR_NAME
5006};
5007
35a39691
BK
5008/**
5009 * ipr_ata_phy_reset - libata phy_reset handler
5010 * @ap: ata port to reset
5011 *
5012 **/
5013static void ipr_ata_phy_reset(struct ata_port *ap)
5014{
5015 unsigned long flags;
5016 struct ipr_sata_port *sata_port = ap->private_data;
5017 struct ipr_resource_entry *res = sata_port->res;
5018 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5019 int rc;
5020
5021 ENTER;
5022 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5023 while(ioa_cfg->in_reset_reload) {
5024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5025 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5026 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5027 }
5028
5029 if (!ioa_cfg->allow_cmds)
5030 goto out_unlock;
5031
5032 rc = ipr_device_reset(ioa_cfg, res);
5033
5034 if (rc) {
5035 ap->ops->port_disable(ap);
5036 goto out_unlock;
5037 }
5038
5039 switch(res->cfgte.proto) {
5040 case IPR_PROTO_SATA:
5041 case IPR_PROTO_SAS_STP:
5042 ap->device[0].class = ATA_DEV_ATA;
5043 break;
5044 case IPR_PROTO_SATA_ATAPI:
5045 case IPR_PROTO_SAS_STP_ATAPI:
5046 ap->device[0].class = ATA_DEV_ATAPI;
5047 break;
5048 default:
5049 ap->device[0].class = ATA_DEV_UNKNOWN;
5050 ap->ops->port_disable(ap);
5051 break;
5052 };
5053
5054out_unlock:
5055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5056 LEAVE;
5057}
5058
5059/**
5060 * ipr_ata_post_internal - Cleanup after an internal command
5061 * @qc: ATA queued command
5062 *
5063 * Return value:
5064 * none
5065 **/
5066static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5067{
5068 struct ipr_sata_port *sata_port = qc->ap->private_data;
5069 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5070 struct ipr_cmnd *ipr_cmd;
5071 unsigned long flags;
5072
5073 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5074 while(ioa_cfg->in_reset_reload) {
5075 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5076 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5077 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5078 }
5079
35a39691
BK
5080 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5081 if (ipr_cmd->qc == qc) {
5082 ipr_device_reset(ioa_cfg, sata_port->res);
5083 break;
5084 }
5085 }
5086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5087}
5088
5089/**
5090 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5091 * @ap: ATA port
5092 * @tf: destination ATA taskfile
5093 *
5094 * Return value:
5095 * none
5096 **/
5097static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5098{
5099 struct ipr_sata_port *sata_port = ap->private_data;
5100 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5101
5102 tf->feature = g->error;
5103 tf->nsect = g->nsect;
5104 tf->lbal = g->lbal;
5105 tf->lbam = g->lbam;
5106 tf->lbah = g->lbah;
5107 tf->device = g->device;
5108 tf->command = g->status;
5109 tf->hob_nsect = g->hob_nsect;
5110 tf->hob_lbal = g->hob_lbal;
5111 tf->hob_lbam = g->hob_lbam;
5112 tf->hob_lbah = g->hob_lbah;
5113 tf->ctl = g->alt_status;
5114}
5115
5116/**
5117 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5118 * @regs: destination
5119 * @tf: source ATA taskfile
5120 *
5121 * Return value:
5122 * none
5123 **/
5124static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5125 struct ata_taskfile *tf)
5126{
5127 regs->feature = tf->feature;
5128 regs->nsect = tf->nsect;
5129 regs->lbal = tf->lbal;
5130 regs->lbam = tf->lbam;
5131 regs->lbah = tf->lbah;
5132 regs->device = tf->device;
5133 regs->command = tf->command;
5134 regs->hob_feature = tf->hob_feature;
5135 regs->hob_nsect = tf->hob_nsect;
5136 regs->hob_lbal = tf->hob_lbal;
5137 regs->hob_lbam = tf->hob_lbam;
5138 regs->hob_lbah = tf->hob_lbah;
5139 regs->ctl = tf->ctl;
5140}
5141
5142/**
5143 * ipr_sata_done - done function for SATA commands
5144 * @ipr_cmd: ipr command struct
5145 *
5146 * This function is invoked by the interrupt handler for
5147 * ops generated by the SCSI mid-layer to SATA devices
5148 *
5149 * Return value:
5150 * none
5151 **/
5152static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5153{
5154 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5155 struct ata_queued_cmd *qc = ipr_cmd->qc;
5156 struct ipr_sata_port *sata_port = qc->ap->private_data;
5157 struct ipr_resource_entry *res = sata_port->res;
5158 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5159
5160 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5161 sizeof(struct ipr_ioasa_gata));
5162 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5163
5164 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5165 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5166 res->cfgte.res_addr.target);
5167
5168 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5169 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5170 else
5171 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5172 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5173 ata_qc_complete(qc);
5174}
5175
5176/**
5177 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5178 * @ipr_cmd: ipr command struct
5179 * @qc: ATA queued command
5180 *
5181 **/
5182static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5183 struct ata_queued_cmd *qc)
5184{
5185 u32 ioadl_flags = 0;
5186 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5187 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5188 int len = qc->nbytes + qc->pad_len;
5189 struct scatterlist *sg;
5190
5191 if (len == 0)
5192 return;
5193
5194 if (qc->dma_dir == DMA_TO_DEVICE) {
5195 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5196 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5197 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5198 ioarcb->write_ioadl_len =
5199 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5200 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5201 ioadl_flags = IPR_IOADL_FLAGS_READ;
5202 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5203 ioarcb->read_ioadl_len =
5204 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5205 }
5206
5207 ata_for_each_sg(sg, qc) {
5208 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5209 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5210 if (ata_sg_is_last(sg, qc))
5211 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5212 else
5213 ioadl++;
5214 }
5215}
5216
5217/**
5218 * ipr_qc_issue - Issue a SATA qc to a device
5219 * @qc: queued command
5220 *
5221 * Return value:
5222 * 0 if success
5223 **/
5224static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5225{
5226 struct ata_port *ap = qc->ap;
5227 struct ipr_sata_port *sata_port = ap->private_data;
5228 struct ipr_resource_entry *res = sata_port->res;
5229 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5230 struct ipr_cmnd *ipr_cmd;
5231 struct ipr_ioarcb *ioarcb;
5232 struct ipr_ioarcb_ata_regs *regs;
5233
5234 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5235 return AC_ERR_SYSTEM;
35a39691
BK
5236
5237 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5238 ioarcb = &ipr_cmd->ioarcb;
5239 regs = &ioarcb->add_data.u.regs;
5240
5241 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5242 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5243
5244 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5245 ipr_cmd->qc = qc;
5246 ipr_cmd->done = ipr_sata_done;
5247 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5249 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5250 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5251 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5252
5253 ipr_build_ata_ioadl(ipr_cmd, qc);
5254 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5255 ipr_copy_sata_tf(regs, &qc->tf);
5256 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5257 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5258
5259 switch (qc->tf.protocol) {
5260 case ATA_PROT_NODATA:
5261 case ATA_PROT_PIO:
5262 break;
5263
5264 case ATA_PROT_DMA:
5265 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5266 break;
5267
5268 case ATA_PROT_ATAPI:
5269 case ATA_PROT_ATAPI_NODATA:
5270 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5271 break;
5272
5273 case ATA_PROT_ATAPI_DMA:
5274 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5275 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5276 break;
5277
5278 default:
5279 WARN_ON(1);
0feeed82 5280 return AC_ERR_INVALID;
35a39691
BK
5281 }
5282
5283 mb();
5284 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5285 ioa_cfg->regs.ioarrin_reg);
5286 return 0;
5287}
5288
5289/**
5290 * ipr_ata_check_status - Return last ATA status
5291 * @ap: ATA port
5292 *
5293 * Return value:
5294 * ATA status
5295 **/
5296static u8 ipr_ata_check_status(struct ata_port *ap)
5297{
5298 struct ipr_sata_port *sata_port = ap->private_data;
5299 return sata_port->ioasa.status;
5300}
5301
5302/**
5303 * ipr_ata_check_altstatus - Return last ATA altstatus
5304 * @ap: ATA port
5305 *
5306 * Return value:
5307 * Alt ATA status
5308 **/
5309static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5310{
5311 struct ipr_sata_port *sata_port = ap->private_data;
5312 return sata_port->ioasa.alt_status;
5313}
5314
5315static struct ata_port_operations ipr_sata_ops = {
5316 .port_disable = ata_port_disable,
5317 .check_status = ipr_ata_check_status,
5318 .check_altstatus = ipr_ata_check_altstatus,
5319 .dev_select = ata_noop_dev_select,
5320 .phy_reset = ipr_ata_phy_reset,
5321 .post_internal_cmd = ipr_ata_post_internal,
5322 .tf_read = ipr_tf_read,
5323 .qc_prep = ata_noop_qc_prep,
5324 .qc_issue = ipr_qc_issue,
5325 .port_start = ata_sas_port_start,
5326 .port_stop = ata_sas_port_stop
5327};
5328
5329static struct ata_port_info sata_port_info = {
5330 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5331 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5332 .pio_mask = 0x10, /* pio4 */
5333 .mwdma_mask = 0x07,
5334 .udma_mask = 0x7f, /* udma0-6 */
5335 .port_ops = &ipr_sata_ops
5336};
5337
1da177e4
LT
5338#ifdef CONFIG_PPC_PSERIES
5339static const u16 ipr_blocked_processors[] = {
5340 PV_NORTHSTAR,
5341 PV_PULSAR,
5342 PV_POWER4,
5343 PV_ICESTAR,
5344 PV_SSTAR,
5345 PV_POWER4p,
5346 PV_630,
5347 PV_630p
5348};
5349
5350/**
5351 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5352 * @ioa_cfg: ioa cfg struct
5353 *
5354 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5355 * certain pSeries hardware. This function determines if the given
5356 * adapter is in one of these confgurations or not.
5357 *
5358 * Return value:
5359 * 1 if adapter is not supported / 0 if adapter is supported
5360 **/
5361static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5362{
5363 u8 rev_id;
5364 int i;
5365
5366 if (ioa_cfg->type == 0x5702) {
5367 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5368 &rev_id) == PCIBIOS_SUCCESSFUL) {
5369 if (rev_id < 4) {
5370 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5371 if (__is_processor(ipr_blocked_processors[i]))
5372 return 1;
5373 }
5374 }
5375 }
5376 }
5377 return 0;
5378}
5379#else
5380#define ipr_invalid_adapter(ioa_cfg) 0
5381#endif
5382
5383/**
5384 * ipr_ioa_bringdown_done - IOA bring down completion.
5385 * @ipr_cmd: ipr command struct
5386 *
5387 * This function processes the completion of an adapter bring down.
5388 * It wakes any reset sleepers.
5389 *
5390 * Return value:
5391 * IPR_RC_JOB_RETURN
5392 **/
5393static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5394{
5395 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5396
5397 ENTER;
5398 ioa_cfg->in_reset_reload = 0;
5399 ioa_cfg->reset_retries = 0;
5400 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5401 wake_up_all(&ioa_cfg->reset_wait_q);
5402
5403 spin_unlock_irq(ioa_cfg->host->host_lock);
5404 scsi_unblock_requests(ioa_cfg->host);
5405 spin_lock_irq(ioa_cfg->host->host_lock);
5406 LEAVE;
5407
5408 return IPR_RC_JOB_RETURN;
5409}
5410
5411/**
5412 * ipr_ioa_reset_done - IOA reset completion.
5413 * @ipr_cmd: ipr command struct
5414 *
5415 * This function processes the completion of an adapter reset.
5416 * It schedules any necessary mid-layer add/removes and
5417 * wakes any reset sleepers.
5418 *
5419 * Return value:
5420 * IPR_RC_JOB_RETURN
5421 **/
5422static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5423{
5424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5425 struct ipr_resource_entry *res;
5426 struct ipr_hostrcb *hostrcb, *temp;
5427 int i = 0;
5428
5429 ENTER;
5430 ioa_cfg->in_reset_reload = 0;
5431 ioa_cfg->allow_cmds = 1;
5432 ioa_cfg->reset_cmd = NULL;
3d1d0da6 5433 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
5434
5435 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5436 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5437 ipr_trace;
5438 break;
5439 }
5440 }
5441 schedule_work(&ioa_cfg->work_q);
5442
5443 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5444 list_del(&hostrcb->queue);
5445 if (i++ < IPR_NUM_LOG_HCAMS)
5446 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5447 else
5448 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5449 }
5450
6bb04170 5451 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
5452 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5453
5454 ioa_cfg->reset_retries = 0;
5455 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5456 wake_up_all(&ioa_cfg->reset_wait_q);
5457
5458 spin_unlock_irq(ioa_cfg->host->host_lock);
5459 scsi_unblock_requests(ioa_cfg->host);
5460 spin_lock_irq(ioa_cfg->host->host_lock);
5461
5462 if (!ioa_cfg->allow_cmds)
5463 scsi_block_requests(ioa_cfg->host);
5464
5465 LEAVE;
5466 return IPR_RC_JOB_RETURN;
5467}
5468
5469/**
5470 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5471 * @supported_dev: supported device struct
5472 * @vpids: vendor product id struct
5473 *
5474 * Return value:
5475 * none
5476 **/
5477static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5478 struct ipr_std_inq_vpids *vpids)
5479{
5480 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5481 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5482 supported_dev->num_records = 1;
5483 supported_dev->data_length =
5484 cpu_to_be16(sizeof(struct ipr_supported_device));
5485 supported_dev->reserved = 0;
5486}
5487
5488/**
5489 * ipr_set_supported_devs - Send Set Supported Devices for a device
5490 * @ipr_cmd: ipr command struct
5491 *
5492 * This function send a Set Supported Devices to the adapter
5493 *
5494 * Return value:
5495 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5496 **/
5497static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5498{
5499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5500 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5501 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5502 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5503 struct ipr_resource_entry *res = ipr_cmd->u.res;
5504
5505 ipr_cmd->job_step = ipr_ioa_reset_done;
5506
5507 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 5508 if (!ipr_is_scsi_disk(res))
1da177e4
LT
5509 continue;
5510
5511 ipr_cmd->u.res = res;
5512 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5513
5514 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5515 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5516 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5517
5518 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5519 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5520 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5521
5522 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5523 sizeof(struct ipr_supported_device));
5524 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5525 offsetof(struct ipr_misc_cbs, supp_dev));
5526 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5527 ioarcb->write_data_transfer_length =
5528 cpu_to_be32(sizeof(struct ipr_supported_device));
5529
5530 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5531 IPR_SET_SUP_DEVICE_TIMEOUT);
5532
5533 ipr_cmd->job_step = ipr_set_supported_devs;
5534 return IPR_RC_JOB_RETURN;
5535 }
5536
5537 return IPR_RC_JOB_CONTINUE;
5538}
5539
62275040
BK
5540/**
5541 * ipr_setup_write_cache - Disable write cache if needed
5542 * @ipr_cmd: ipr command struct
5543 *
5544 * This function sets up adapters write cache to desired setting
5545 *
5546 * Return value:
5547 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5548 **/
5549static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5550{
5551 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5552
5553 ipr_cmd->job_step = ipr_set_supported_devs;
5554 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5555 struct ipr_resource_entry, queue);
5556
5557 if (ioa_cfg->cache_state != CACHE_DISABLED)
5558 return IPR_RC_JOB_CONTINUE;
5559
5560 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5561 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5562 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5563 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5564
5565 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5566
5567 return IPR_RC_JOB_RETURN;
5568}
5569
1da177e4
LT
5570/**
5571 * ipr_get_mode_page - Locate specified mode page
5572 * @mode_pages: mode page buffer
5573 * @page_code: page code to find
5574 * @len: minimum required length for mode page
5575 *
5576 * Return value:
5577 * pointer to mode page / NULL on failure
5578 **/
5579static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5580 u32 page_code, u32 len)
5581{
5582 struct ipr_mode_page_hdr *mode_hdr;
5583 u32 page_length;
5584 u32 length;
5585
5586 if (!mode_pages || (mode_pages->hdr.length == 0))
5587 return NULL;
5588
5589 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5590 mode_hdr = (struct ipr_mode_page_hdr *)
5591 (mode_pages->data + mode_pages->hdr.block_desc_len);
5592
5593 while (length) {
5594 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5595 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5596 return mode_hdr;
5597 break;
5598 } else {
5599 page_length = (sizeof(struct ipr_mode_page_hdr) +
5600 mode_hdr->page_length);
5601 length -= page_length;
5602 mode_hdr = (struct ipr_mode_page_hdr *)
5603 ((unsigned long)mode_hdr + page_length);
5604 }
5605 }
5606 return NULL;
5607}
5608
5609/**
5610 * ipr_check_term_power - Check for term power errors
5611 * @ioa_cfg: ioa config struct
5612 * @mode_pages: IOAFP mode pages buffer
5613 *
5614 * Check the IOAFP's mode page 28 for term power errors
5615 *
5616 * Return value:
5617 * nothing
5618 **/
5619static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5620 struct ipr_mode_pages *mode_pages)
5621{
5622 int i;
5623 int entry_length;
5624 struct ipr_dev_bus_entry *bus;
5625 struct ipr_mode_page28 *mode_page;
5626
5627 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5628 sizeof(struct ipr_mode_page28));
5629
5630 entry_length = mode_page->entry_length;
5631
5632 bus = mode_page->bus;
5633
5634 for (i = 0; i < mode_page->num_entries; i++) {
5635 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5636 dev_err(&ioa_cfg->pdev->dev,
5637 "Term power is absent on scsi bus %d\n",
5638 bus->res_addr.bus);
5639 }
5640
5641 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5642 }
5643}
5644
5645/**
5646 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5647 * @ioa_cfg: ioa config struct
5648 *
5649 * Looks through the config table checking for SES devices. If
5650 * the SES device is in the SES table indicating a maximum SCSI
5651 * bus speed, the speed is limited for the bus.
5652 *
5653 * Return value:
5654 * none
5655 **/
5656static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5657{
5658 u32 max_xfer_rate;
5659 int i;
5660
5661 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5662 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5663 ioa_cfg->bus_attr[i].bus_width);
5664
5665 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5666 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5667 }
5668}
5669
5670/**
5671 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5672 * @ioa_cfg: ioa config struct
5673 * @mode_pages: mode page 28 buffer
5674 *
5675 * Updates mode page 28 based on driver configuration
5676 *
5677 * Return value:
5678 * none
5679 **/
5680static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5681 struct ipr_mode_pages *mode_pages)
5682{
5683 int i, entry_length;
5684 struct ipr_dev_bus_entry *bus;
5685 struct ipr_bus_attributes *bus_attr;
5686 struct ipr_mode_page28 *mode_page;
5687
5688 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5689 sizeof(struct ipr_mode_page28));
5690
5691 entry_length = mode_page->entry_length;
5692
5693 /* Loop for each device bus entry */
5694 for (i = 0, bus = mode_page->bus;
5695 i < mode_page->num_entries;
5696 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5697 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5698 dev_err(&ioa_cfg->pdev->dev,
5699 "Invalid resource address reported: 0x%08X\n",
5700 IPR_GET_PHYS_LOC(bus->res_addr));
5701 continue;
5702 }
5703
5704 bus_attr = &ioa_cfg->bus_attr[i];
5705 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5706 bus->bus_width = bus_attr->bus_width;
5707 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5708 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5709 if (bus_attr->qas_enabled)
5710 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5711 else
5712 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5713 }
5714}
5715
5716/**
5717 * ipr_build_mode_select - Build a mode select command
5718 * @ipr_cmd: ipr command struct
5719 * @res_handle: resource handle to send command to
5720 * @parm: Byte 2 of Mode Sense command
5721 * @dma_addr: DMA buffer address
5722 * @xfer_len: data transfer length
5723 *
5724 * Return value:
5725 * none
5726 **/
5727static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5728 __be32 res_handle, u8 parm, u32 dma_addr,
5729 u8 xfer_len)
5730{
5731 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5732 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5733
5734 ioarcb->res_handle = res_handle;
5735 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5736 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5737 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5738 ioarcb->cmd_pkt.cdb[1] = parm;
5739 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5740
5741 ioadl->flags_and_data_len =
5742 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5743 ioadl->address = cpu_to_be32(dma_addr);
5744 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5745 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5746}
5747
5748/**
5749 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5750 * @ipr_cmd: ipr command struct
5751 *
5752 * This function sets up the SCSI bus attributes and sends
5753 * a Mode Select for Page 28 to activate them.
5754 *
5755 * Return value:
5756 * IPR_RC_JOB_RETURN
5757 **/
5758static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5759{
5760 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5761 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5762 int length;
5763
5764 ENTER;
4733804c
BK
5765 ipr_scsi_bus_speed_limit(ioa_cfg);
5766 ipr_check_term_power(ioa_cfg, mode_pages);
5767 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5768 length = mode_pages->hdr.length + 1;
5769 mode_pages->hdr.length = 0;
1da177e4
LT
5770
5771 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5772 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5773 length);
5774
62275040 5775 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
5776 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5777
5778 LEAVE;
5779 return IPR_RC_JOB_RETURN;
5780}
5781
5782/**
5783 * ipr_build_mode_sense - Builds a mode sense command
5784 * @ipr_cmd: ipr command struct
5785 * @res: resource entry struct
5786 * @parm: Byte 2 of mode sense command
5787 * @dma_addr: DMA address of mode sense buffer
5788 * @xfer_len: Size of DMA buffer
5789 *
5790 * Return value:
5791 * none
5792 **/
5793static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5794 __be32 res_handle,
5795 u8 parm, u32 dma_addr, u8 xfer_len)
5796{
5797 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5798 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5799
5800 ioarcb->res_handle = res_handle;
5801 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5802 ioarcb->cmd_pkt.cdb[2] = parm;
5803 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5804 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5805
5806 ioadl->flags_and_data_len =
5807 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5808 ioadl->address = cpu_to_be32(dma_addr);
5809 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5810 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5811}
5812
dfed823e
BK
5813/**
5814 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5815 * @ipr_cmd: ipr command struct
5816 *
5817 * This function handles the failure of an IOA bringup command.
5818 *
5819 * Return value:
5820 * IPR_RC_JOB_RETURN
5821 **/
5822static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5823{
5824 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5825 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5826
5827 dev_err(&ioa_cfg->pdev->dev,
5828 "0x%02X failed with IOASC: 0x%08X\n",
5829 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5830
5831 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5832 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5833 return IPR_RC_JOB_RETURN;
5834}
5835
5836/**
5837 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5838 * @ipr_cmd: ipr command struct
5839 *
5840 * This function handles the failure of a Mode Sense to the IOAFP.
5841 * Some adapters do not handle all mode pages.
5842 *
5843 * Return value:
5844 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5845 **/
5846static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5847{
5848 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5849
5850 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5851 ipr_cmd->job_step = ipr_setup_write_cache;
5852 return IPR_RC_JOB_CONTINUE;
5853 }
5854
5855 return ipr_reset_cmd_failed(ipr_cmd);
5856}
5857
1da177e4
LT
5858/**
5859 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5860 * @ipr_cmd: ipr command struct
5861 *
5862 * This function send a Page 28 mode sense to the IOA to
5863 * retrieve SCSI bus attributes.
5864 *
5865 * Return value:
5866 * IPR_RC_JOB_RETURN
5867 **/
5868static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5869{
5870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5871
5872 ENTER;
5873 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5874 0x28, ioa_cfg->vpd_cbs_dma +
5875 offsetof(struct ipr_misc_cbs, mode_pages),
5876 sizeof(struct ipr_mode_pages));
5877
5878 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 5879 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
5880
5881 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5882
5883 LEAVE;
5884 return IPR_RC_JOB_RETURN;
5885}
5886
ac09c349
BK
5887/**
5888 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5889 * @ipr_cmd: ipr command struct
5890 *
5891 * This function enables dual IOA RAID support if possible.
5892 *
5893 * Return value:
5894 * IPR_RC_JOB_RETURN
5895 **/
5896static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5897{
5898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5899 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5900 struct ipr_mode_page24 *mode_page;
5901 int length;
5902
5903 ENTER;
5904 mode_page = ipr_get_mode_page(mode_pages, 0x24,
5905 sizeof(struct ipr_mode_page24));
5906
5907 if (mode_page)
5908 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5909
5910 length = mode_pages->hdr.length + 1;
5911 mode_pages->hdr.length = 0;
5912
5913 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5914 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5915 length);
5916
5917 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5918 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5919
5920 LEAVE;
5921 return IPR_RC_JOB_RETURN;
5922}
5923
5924/**
5925 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5926 * @ipr_cmd: ipr command struct
5927 *
5928 * This function handles the failure of a Mode Sense to the IOAFP.
5929 * Some adapters do not handle all mode pages.
5930 *
5931 * Return value:
5932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5933 **/
5934static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5935{
5936 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5937
5938 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5939 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5940 return IPR_RC_JOB_CONTINUE;
5941 }
5942
5943 return ipr_reset_cmd_failed(ipr_cmd);
5944}
5945
5946/**
5947 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5948 * @ipr_cmd: ipr command struct
5949 *
5950 * This function send a mode sense to the IOA to retrieve
5951 * the IOA Advanced Function Control mode page.
5952 *
5953 * Return value:
5954 * IPR_RC_JOB_RETURN
5955 **/
5956static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5957{
5958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5959
5960 ENTER;
5961 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5962 0x24, ioa_cfg->vpd_cbs_dma +
5963 offsetof(struct ipr_misc_cbs, mode_pages),
5964 sizeof(struct ipr_mode_pages));
5965
5966 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5967 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5968
5969 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5970
5971 LEAVE;
5972 return IPR_RC_JOB_RETURN;
5973}
5974
1da177e4
LT
5975/**
5976 * ipr_init_res_table - Initialize the resource table
5977 * @ipr_cmd: ipr command struct
5978 *
5979 * This function looks through the existing resource table, comparing
5980 * it with the config table. This function will take care of old/new
5981 * devices and schedule adding/removing them from the mid-layer
5982 * as appropriate.
5983 *
5984 * Return value:
5985 * IPR_RC_JOB_CONTINUE
5986 **/
5987static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5988{
5989 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5990 struct ipr_resource_entry *res, *temp;
5991 struct ipr_config_table_entry *cfgte;
5992 int found, i;
5993 LIST_HEAD(old_res);
5994
5995 ENTER;
5996 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5997 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5998
5999 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6000 list_move_tail(&res->queue, &old_res);
6001
6002 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
6003 cfgte = &ioa_cfg->cfg_table->dev[i];
6004 found = 0;
6005
6006 list_for_each_entry_safe(res, temp, &old_res, queue) {
6007 if (!memcmp(&res->cfgte.res_addr,
6008 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
6009 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6010 found = 1;
6011 break;
6012 }
6013 }
6014
6015 if (!found) {
6016 if (list_empty(&ioa_cfg->free_res_q)) {
6017 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6018 break;
6019 }
6020
6021 found = 1;
6022 res = list_entry(ioa_cfg->free_res_q.next,
6023 struct ipr_resource_entry, queue);
6024 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6025 ipr_init_res_entry(res);
6026 res->add_to_ml = 1;
6027 }
6028
6029 if (found)
6030 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
6031 }
6032
6033 list_for_each_entry_safe(res, temp, &old_res, queue) {
6034 if (res->sdev) {
6035 res->del_from_ml = 1;
1121b794 6036 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
6037 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6038 } else {
6039 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6040 }
6041 }
6042
ac09c349
BK
6043 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6044 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6045 else
6046 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6047
6048 LEAVE;
6049 return IPR_RC_JOB_CONTINUE;
6050}
6051
6052/**
6053 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6054 * @ipr_cmd: ipr command struct
6055 *
6056 * This function sends a Query IOA Configuration command
6057 * to the adapter to retrieve the IOA configuration table.
6058 *
6059 * Return value:
6060 * IPR_RC_JOB_RETURN
6061 **/
6062static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6063{
6064 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6065 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6066 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6067 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6068 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6069
6070 ENTER;
ac09c349
BK
6071 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6072 ioa_cfg->dual_raid = 1;
1da177e4
LT
6073 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6074 ucode_vpd->major_release, ucode_vpd->card_type,
6075 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6076 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6077 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6078
6079 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6080 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6081 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6082
6083 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6084 ioarcb->read_data_transfer_length =
6085 cpu_to_be32(sizeof(struct ipr_config_table));
6086
6087 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6088 ioadl->flags_and_data_len =
6089 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6090
6091 ipr_cmd->job_step = ipr_init_res_table;
6092
6093 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6094
6095 LEAVE;
6096 return IPR_RC_JOB_RETURN;
6097}
6098
6099/**
6100 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6101 * @ipr_cmd: ipr command struct
6102 *
6103 * This utility function sends an inquiry to the adapter.
6104 *
6105 * Return value:
6106 * none
6107 **/
6108static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6109 u32 dma_addr, u8 xfer_len)
6110{
6111 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6112 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6113
6114 ENTER;
6115 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6116 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6117
6118 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6119 ioarcb->cmd_pkt.cdb[1] = flags;
6120 ioarcb->cmd_pkt.cdb[2] = page;
6121 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6122
6123 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6124 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6125
6126 ioadl->address = cpu_to_be32(dma_addr);
6127 ioadl->flags_and_data_len =
6128 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6129
6130 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6131 LEAVE;
6132}
6133
62275040
BK
6134/**
6135 * ipr_inquiry_page_supported - Is the given inquiry page supported
6136 * @page0: inquiry page 0 buffer
6137 * @page: page code.
6138 *
6139 * This function determines if the specified inquiry page is supported.
6140 *
6141 * Return value:
6142 * 1 if page is supported / 0 if not
6143 **/
6144static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6145{
6146 int i;
6147
6148 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6149 if (page0->page[i] == page)
6150 return 1;
6151
6152 return 0;
6153}
6154
ac09c349
BK
6155/**
6156 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6157 * @ipr_cmd: ipr command struct
6158 *
6159 * This function sends a Page 0xD0 inquiry to the adapter
6160 * to retrieve adapter capabilities.
6161 *
6162 * Return value:
6163 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6164 **/
6165static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6166{
6167 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6168 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6169 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6170
6171 ENTER;
6172 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6173 memset(cap, 0, sizeof(*cap));
6174
6175 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6176 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6177 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6178 sizeof(struct ipr_inquiry_cap));
6179 return IPR_RC_JOB_RETURN;
6180 }
6181
6182 LEAVE;
6183 return IPR_RC_JOB_CONTINUE;
6184}
6185
1da177e4
LT
6186/**
6187 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6188 * @ipr_cmd: ipr command struct
6189 *
6190 * This function sends a Page 3 inquiry to the adapter
6191 * to retrieve software VPD information.
6192 *
6193 * Return value:
6194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6195 **/
6196static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
6197{
6198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6199 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6200
6201 ENTER;
6202
6203 if (!ipr_inquiry_page_supported(page0, 1))
6204 ioa_cfg->cache_state = CACHE_NONE;
6205
ac09c349 6206 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
6207
6208 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6209 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6210 sizeof(struct ipr_inquiry_page3));
6211
6212 LEAVE;
6213 return IPR_RC_JOB_RETURN;
6214}
6215
6216/**
6217 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6218 * @ipr_cmd: ipr command struct
6219 *
6220 * This function sends a Page 0 inquiry to the adapter
6221 * to retrieve supported inquiry pages.
6222 *
6223 * Return value:
6224 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6225 **/
6226static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6227{
6228 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6229 char type[5];
6230
6231 ENTER;
6232
6233 /* Grab the type out of the VPD and store it away */
6234 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6235 type[4] = '\0';
6236 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6237
62275040 6238 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6239
62275040
BK
6240 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6241 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6242 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6243
6244 LEAVE;
6245 return IPR_RC_JOB_RETURN;
6246}
6247
6248/**
6249 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6250 * @ipr_cmd: ipr command struct
6251 *
6252 * This function sends a standard inquiry to the adapter.
6253 *
6254 * Return value:
6255 * IPR_RC_JOB_RETURN
6256 **/
6257static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6258{
6259 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6260
6261 ENTER;
62275040 6262 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6263
6264 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6265 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6266 sizeof(struct ipr_ioa_vpd));
6267
6268 LEAVE;
6269 return IPR_RC_JOB_RETURN;
6270}
6271
6272/**
6273 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6274 * @ipr_cmd: ipr command struct
6275 *
6276 * This function send an Identify Host Request Response Queue
6277 * command to establish the HRRQ with the adapter.
6278 *
6279 * Return value:
6280 * IPR_RC_JOB_RETURN
6281 **/
6282static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6283{
6284 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6285 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6286
6287 ENTER;
6288 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6289
6290 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6291 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6292
6293 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6294 ioarcb->cmd_pkt.cdb[2] =
6295 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6296 ioarcb->cmd_pkt.cdb[3] =
6297 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6298 ioarcb->cmd_pkt.cdb[4] =
6299 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6300 ioarcb->cmd_pkt.cdb[5] =
6301 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6302 ioarcb->cmd_pkt.cdb[7] =
6303 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6304 ioarcb->cmd_pkt.cdb[8] =
6305 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6306
6307 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6308
6309 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6310
6311 LEAVE;
6312 return IPR_RC_JOB_RETURN;
6313}
6314
6315/**
6316 * ipr_reset_timer_done - Adapter reset timer function
6317 * @ipr_cmd: ipr command struct
6318 *
6319 * Description: This function is used in adapter reset processing
6320 * for timing events. If the reset_cmd pointer in the IOA
6321 * config struct is not this adapter's we are doing nested
6322 * resets and fail_all_ops will take care of freeing the
6323 * command block.
6324 *
6325 * Return value:
6326 * none
6327 **/
6328static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6329{
6330 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6331 unsigned long lock_flags = 0;
6332
6333 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6334
6335 if (ioa_cfg->reset_cmd == ipr_cmd) {
6336 list_del(&ipr_cmd->queue);
6337 ipr_cmd->done(ipr_cmd);
6338 }
6339
6340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6341}
6342
6343/**
6344 * ipr_reset_start_timer - Start a timer for adapter reset job
6345 * @ipr_cmd: ipr command struct
6346 * @timeout: timeout value
6347 *
6348 * Description: This function is used in adapter reset processing
6349 * for timing events. If the reset_cmd pointer in the IOA
6350 * config struct is not this adapter's we are doing nested
6351 * resets and fail_all_ops will take care of freeing the
6352 * command block.
6353 *
6354 * Return value:
6355 * none
6356 **/
6357static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6358 unsigned long timeout)
6359{
6360 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6361 ipr_cmd->done = ipr_reset_ioa_job;
6362
6363 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6364 ipr_cmd->timer.expires = jiffies + timeout;
6365 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6366 add_timer(&ipr_cmd->timer);
6367}
6368
6369/**
6370 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6371 * @ioa_cfg: ioa cfg struct
6372 *
6373 * Return value:
6374 * nothing
6375 **/
6376static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6377{
6378 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6379
6380 /* Initialize Host RRQ pointers */
6381 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6382 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6383 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6384 ioa_cfg->toggle_bit = 1;
6385
6386 /* Zero out config table */
6387 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6388}
6389
6390/**
6391 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6392 * @ipr_cmd: ipr command struct
6393 *
6394 * This function reinitializes some control blocks and
6395 * enables destructive diagnostics on the adapter.
6396 *
6397 * Return value:
6398 * IPR_RC_JOB_RETURN
6399 **/
6400static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6401{
6402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6403 volatile u32 int_reg;
6404
6405 ENTER;
6406 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6407 ipr_init_ioa_mem(ioa_cfg);
6408
6409 ioa_cfg->allow_interrupts = 1;
6410 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6411
6412 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6413 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6414 ioa_cfg->regs.clr_interrupt_mask_reg);
6415 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6416 return IPR_RC_JOB_CONTINUE;
6417 }
6418
6419 /* Enable destructive diagnostics on IOA */
3d1d0da6 6420 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
6421
6422 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6423 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6424
6425 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6426
6427 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 6428 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
6429 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6430 ipr_cmd->done = ipr_reset_ioa_job;
6431 add_timer(&ipr_cmd->timer);
6432 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6433
6434 LEAVE;
6435 return IPR_RC_JOB_RETURN;
6436}
6437
6438/**
6439 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6440 * @ipr_cmd: ipr command struct
6441 *
6442 * This function is invoked when an adapter dump has run out
6443 * of processing time.
6444 *
6445 * Return value:
6446 * IPR_RC_JOB_CONTINUE
6447 **/
6448static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6449{
6450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6451
6452 if (ioa_cfg->sdt_state == GET_DUMP)
6453 ioa_cfg->sdt_state = ABORT_DUMP;
6454
6455 ipr_cmd->job_step = ipr_reset_alert;
6456
6457 return IPR_RC_JOB_CONTINUE;
6458}
6459
6460/**
6461 * ipr_unit_check_no_data - Log a unit check/no data error log
6462 * @ioa_cfg: ioa config struct
6463 *
6464 * Logs an error indicating the adapter unit checked, but for some
6465 * reason, we were unable to fetch the unit check buffer.
6466 *
6467 * Return value:
6468 * nothing
6469 **/
6470static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6471{
6472 ioa_cfg->errors_logged++;
6473 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6474}
6475
6476/**
6477 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6478 * @ioa_cfg: ioa config struct
6479 *
6480 * Fetches the unit check buffer from the adapter by clocking the data
6481 * through the mailbox register.
6482 *
6483 * Return value:
6484 * nothing
6485 **/
6486static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6487{
6488 unsigned long mailbox;
6489 struct ipr_hostrcb *hostrcb;
6490 struct ipr_uc_sdt sdt;
6491 int rc, length;
65f56475 6492 u32 ioasc;
1da177e4
LT
6493
6494 mailbox = readl(ioa_cfg->ioa_mailbox);
6495
6496 if (!ipr_sdt_is_fmt2(mailbox)) {
6497 ipr_unit_check_no_data(ioa_cfg);
6498 return;
6499 }
6500
6501 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6502 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6503 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6504
6505 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6506 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6507 ipr_unit_check_no_data(ioa_cfg);
6508 return;
6509 }
6510
6511 /* Find length of the first sdt entry (UC buffer) */
6512 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6513 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6514
6515 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6516 struct ipr_hostrcb, queue);
6517 list_del(&hostrcb->queue);
6518 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6519
6520 rc = ipr_get_ldump_data_section(ioa_cfg,
6521 be32_to_cpu(sdt.entry[0].bar_str_offset),
6522 (__be32 *)&hostrcb->hcam,
6523 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6524
65f56475 6525 if (!rc) {
1da177e4 6526 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
6527 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6528 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6529 ioa_cfg->sdt_state == GET_DUMP)
6530 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6531 } else
1da177e4
LT
6532 ipr_unit_check_no_data(ioa_cfg);
6533
6534 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6535}
6536
6537/**
6538 * ipr_reset_restore_cfg_space - Restore PCI config space.
6539 * @ipr_cmd: ipr command struct
6540 *
6541 * Description: This function restores the saved PCI config space of
6542 * the adapter, fails all outstanding ops back to the callers, and
6543 * fetches the dump/unit check if applicable to this reset.
6544 *
6545 * Return value:
6546 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6547 **/
6548static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6549{
6550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6551 int rc;
6552
6553 ENTER;
6554 rc = pci_restore_state(ioa_cfg->pdev);
6555
6556 if (rc != PCIBIOS_SUCCESSFUL) {
6557 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6558 return IPR_RC_JOB_CONTINUE;
6559 }
6560
6561 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6562 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6563 return IPR_RC_JOB_CONTINUE;
6564 }
6565
6566 ipr_fail_all_ops(ioa_cfg);
6567
6568 if (ioa_cfg->ioa_unit_checked) {
6569 ioa_cfg->ioa_unit_checked = 0;
6570 ipr_get_unit_check_buffer(ioa_cfg);
6571 ipr_cmd->job_step = ipr_reset_alert;
6572 ipr_reset_start_timer(ipr_cmd, 0);
6573 return IPR_RC_JOB_RETURN;
6574 }
6575
6576 if (ioa_cfg->in_ioa_bringdown) {
6577 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6578 } else {
6579 ipr_cmd->job_step = ipr_reset_enable_ioa;
6580
6581 if (GET_DUMP == ioa_cfg->sdt_state) {
6582 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6583 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6584 schedule_work(&ioa_cfg->work_q);
6585 return IPR_RC_JOB_RETURN;
6586 }
6587 }
6588
6589 ENTER;
6590 return IPR_RC_JOB_CONTINUE;
6591}
6592
e619e1a7
BK
6593/**
6594 * ipr_reset_bist_done - BIST has completed on the adapter.
6595 * @ipr_cmd: ipr command struct
6596 *
6597 * Description: Unblock config space and resume the reset process.
6598 *
6599 * Return value:
6600 * IPR_RC_JOB_CONTINUE
6601 **/
6602static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6603{
6604 ENTER;
6605 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6606 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6607 LEAVE;
6608 return IPR_RC_JOB_CONTINUE;
6609}
6610
1da177e4
LT
6611/**
6612 * ipr_reset_start_bist - Run BIST on the adapter.
6613 * @ipr_cmd: ipr command struct
6614 *
6615 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6616 *
6617 * Return value:
6618 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6619 **/
6620static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6621{
6622 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6623 int rc;
6624
6625 ENTER;
b30197d2 6626 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
6627 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6628
6629 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 6630 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
6631 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6632 rc = IPR_RC_JOB_CONTINUE;
6633 } else {
e619e1a7 6634 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
6635 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6636 rc = IPR_RC_JOB_RETURN;
6637 }
6638
6639 LEAVE;
6640 return rc;
6641}
6642
463fc696
BK
6643/**
6644 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6645 * @ipr_cmd: ipr command struct
6646 *
6647 * Description: This clears PCI reset to the adapter and delays two seconds.
6648 *
6649 * Return value:
6650 * IPR_RC_JOB_RETURN
6651 **/
6652static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6653{
6654 ENTER;
6655 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6656 ipr_cmd->job_step = ipr_reset_bist_done;
6657 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6658 LEAVE;
6659 return IPR_RC_JOB_RETURN;
6660}
6661
6662/**
6663 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6664 * @ipr_cmd: ipr command struct
6665 *
6666 * Description: This asserts PCI reset to the adapter.
6667 *
6668 * Return value:
6669 * IPR_RC_JOB_RETURN
6670 **/
6671static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6672{
6673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6674 struct pci_dev *pdev = ioa_cfg->pdev;
6675
6676 ENTER;
6677 pci_block_user_cfg_access(pdev);
6678 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6679 ipr_cmd->job_step = ipr_reset_slot_reset_done;
6680 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6681 LEAVE;
6682 return IPR_RC_JOB_RETURN;
6683}
6684
1da177e4
LT
6685/**
6686 * ipr_reset_allowed - Query whether or not IOA can be reset
6687 * @ioa_cfg: ioa config struct
6688 *
6689 * Return value:
6690 * 0 if reset not allowed / non-zero if reset is allowed
6691 **/
6692static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6693{
6694 volatile u32 temp_reg;
6695
6696 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6697 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6698}
6699
6700/**
6701 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6702 * @ipr_cmd: ipr command struct
6703 *
6704 * Description: This function waits for adapter permission to run BIST,
6705 * then runs BIST. If the adapter does not give permission after a
6706 * reasonable time, we will reset the adapter anyway. The impact of
6707 * resetting the adapter without warning the adapter is the risk of
6708 * losing the persistent error log on the adapter. If the adapter is
6709 * reset while it is writing to the flash on the adapter, the flash
6710 * segment will have bad ECC and be zeroed.
6711 *
6712 * Return value:
6713 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6714 **/
6715static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6716{
6717 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6718 int rc = IPR_RC_JOB_RETURN;
6719
6720 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6721 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6722 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6723 } else {
463fc696 6724 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
6725 rc = IPR_RC_JOB_CONTINUE;
6726 }
6727
6728 return rc;
6729}
6730
6731/**
6732 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6733 * @ipr_cmd: ipr command struct
6734 *
6735 * Description: This function alerts the adapter that it will be reset.
6736 * If memory space is not currently enabled, proceed directly
6737 * to running BIST on the adapter. The timer must always be started
6738 * so we guarantee we do not run BIST from ipr_isr.
6739 *
6740 * Return value:
6741 * IPR_RC_JOB_RETURN
6742 **/
6743static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6744{
6745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6746 u16 cmd_reg;
6747 int rc;
6748
6749 ENTER;
6750 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6751
6752 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6753 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6754 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6755 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6756 } else {
463fc696 6757 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
6758 }
6759
6760 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6761 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6762
6763 LEAVE;
6764 return IPR_RC_JOB_RETURN;
6765}
6766
6767/**
6768 * ipr_reset_ucode_download_done - Microcode download completion
6769 * @ipr_cmd: ipr command struct
6770 *
6771 * Description: This function unmaps the microcode download buffer.
6772 *
6773 * Return value:
6774 * IPR_RC_JOB_CONTINUE
6775 **/
6776static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6777{
6778 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6779 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6780
6781 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6782 sglist->num_sg, DMA_TO_DEVICE);
6783
6784 ipr_cmd->job_step = ipr_reset_alert;
6785 return IPR_RC_JOB_CONTINUE;
6786}
6787
6788/**
6789 * ipr_reset_ucode_download - Download microcode to the adapter
6790 * @ipr_cmd: ipr command struct
6791 *
6792 * Description: This function checks to see if it there is microcode
6793 * to download to the adapter. If there is, a download is performed.
6794 *
6795 * Return value:
6796 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6797 **/
6798static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6799{
6800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6801 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6802
6803 ENTER;
6804 ipr_cmd->job_step = ipr_reset_alert;
6805
6806 if (!sglist)
6807 return IPR_RC_JOB_CONTINUE;
6808
6809 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6810 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6811 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6812 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6813 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6814 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6815 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6816
12baa420 6817 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
6818 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6819
6820 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6821 IPR_WRITE_BUFFER_TIMEOUT);
6822
6823 LEAVE;
6824 return IPR_RC_JOB_RETURN;
6825}
6826
6827/**
6828 * ipr_reset_shutdown_ioa - Shutdown the adapter
6829 * @ipr_cmd: ipr command struct
6830 *
6831 * Description: This function issues an adapter shutdown of the
6832 * specified type to the specified adapter as part of the
6833 * adapter reset job.
6834 *
6835 * Return value:
6836 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6837 **/
6838static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6839{
6840 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6841 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6842 unsigned long timeout;
6843 int rc = IPR_RC_JOB_CONTINUE;
6844
6845 ENTER;
6846 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6847 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6848 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6849 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6850 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6851
ac09c349
BK
6852 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6853 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
6854 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6855 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
6856 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6857 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 6858 else
ac09c349 6859 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
6860
6861 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6862
6863 rc = IPR_RC_JOB_RETURN;
6864 ipr_cmd->job_step = ipr_reset_ucode_download;
6865 } else
6866 ipr_cmd->job_step = ipr_reset_alert;
6867
6868 LEAVE;
6869 return rc;
6870}
6871
6872/**
6873 * ipr_reset_ioa_job - Adapter reset job
6874 * @ipr_cmd: ipr command struct
6875 *
6876 * Description: This function is the job router for the adapter reset job.
6877 *
6878 * Return value:
6879 * none
6880 **/
6881static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6882{
6883 u32 rc, ioasc;
1da177e4
LT
6884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6885
6886 do {
6887 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6888
6889 if (ioa_cfg->reset_cmd != ipr_cmd) {
6890 /*
6891 * We are doing nested adapter resets and this is
6892 * not the current reset job.
6893 */
6894 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6895 return;
6896 }
6897
6898 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
6899 rc = ipr_cmd->job_step_failed(ipr_cmd);
6900 if (rc == IPR_RC_JOB_RETURN)
6901 return;
1da177e4
LT
6902 }
6903
6904 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 6905 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
6906 rc = ipr_cmd->job_step(ipr_cmd);
6907 } while(rc == IPR_RC_JOB_CONTINUE);
6908}
6909
6910/**
6911 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6912 * @ioa_cfg: ioa config struct
6913 * @job_step: first job step of reset job
6914 * @shutdown_type: shutdown type
6915 *
6916 * Description: This function will initiate the reset of the given adapter
6917 * starting at the selected job step.
6918 * If the caller needs to wait on the completion of the reset,
6919 * the caller must sleep on the reset_wait_q.
6920 *
6921 * Return value:
6922 * none
6923 **/
6924static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6925 int (*job_step) (struct ipr_cmnd *),
6926 enum ipr_shutdown_type shutdown_type)
6927{
6928 struct ipr_cmnd *ipr_cmd;
6929
6930 ioa_cfg->in_reset_reload = 1;
6931 ioa_cfg->allow_cmds = 0;
6932 scsi_block_requests(ioa_cfg->host);
6933
6934 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6935 ioa_cfg->reset_cmd = ipr_cmd;
6936 ipr_cmd->job_step = job_step;
6937 ipr_cmd->u.shutdown_type = shutdown_type;
6938
6939 ipr_reset_ioa_job(ipr_cmd);
6940}
6941
6942/**
6943 * ipr_initiate_ioa_reset - Initiate an adapter reset
6944 * @ioa_cfg: ioa config struct
6945 * @shutdown_type: shutdown type
6946 *
6947 * Description: This function will initiate the reset of the given adapter.
6948 * If the caller needs to wait on the completion of the reset,
6949 * the caller must sleep on the reset_wait_q.
6950 *
6951 * Return value:
6952 * none
6953 **/
6954static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6955 enum ipr_shutdown_type shutdown_type)
6956{
6957 if (ioa_cfg->ioa_is_dead)
6958 return;
6959
6960 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6961 ioa_cfg->sdt_state = ABORT_DUMP;
6962
6963 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6964 dev_err(&ioa_cfg->pdev->dev,
6965 "IOA taken offline - error recovery failed\n");
6966
6967 ioa_cfg->reset_retries = 0;
6968 ioa_cfg->ioa_is_dead = 1;
6969
6970 if (ioa_cfg->in_ioa_bringdown) {
6971 ioa_cfg->reset_cmd = NULL;
6972 ioa_cfg->in_reset_reload = 0;
6973 ipr_fail_all_ops(ioa_cfg);
6974 wake_up_all(&ioa_cfg->reset_wait_q);
6975
6976 spin_unlock_irq(ioa_cfg->host->host_lock);
6977 scsi_unblock_requests(ioa_cfg->host);
6978 spin_lock_irq(ioa_cfg->host->host_lock);
6979 return;
6980 } else {
6981 ioa_cfg->in_ioa_bringdown = 1;
6982 shutdown_type = IPR_SHUTDOWN_NONE;
6983 }
6984 }
6985
6986 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6987 shutdown_type);
6988}
6989
f8a88b19
LV
6990/**
6991 * ipr_reset_freeze - Hold off all I/O activity
6992 * @ipr_cmd: ipr command struct
6993 *
6994 * Description: If the PCI slot is frozen, hold off all I/O
6995 * activity; then, as soon as the slot is available again,
6996 * initiate an adapter reset.
6997 */
6998static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6999{
7000 /* Disallow new interrupts, avoid loop */
7001 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7002 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7003 ipr_cmd->done = ipr_reset_ioa_job;
7004 return IPR_RC_JOB_RETURN;
7005}
7006
7007/**
7008 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7009 * @pdev: PCI device struct
7010 *
7011 * Description: This routine is called to tell us that the PCI bus
7012 * is down. Can't do anything here, except put the device driver
7013 * into a holding pattern, waiting for the PCI bus to come back.
7014 */
7015static void ipr_pci_frozen(struct pci_dev *pdev)
7016{
7017 unsigned long flags = 0;
7018 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7019
7020 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7021 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7023}
7024
7025/**
7026 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7027 * @pdev: PCI device struct
7028 *
7029 * Description: This routine is called by the pci error recovery
7030 * code after the PCI slot has been reset, just before we
7031 * should resume normal operations.
7032 */
7033static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7034{
7035 unsigned long flags = 0;
7036 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7037
7038 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
7039 if (ioa_cfg->needs_warm_reset)
7040 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7041 else
7042 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7043 IPR_SHUTDOWN_NONE);
f8a88b19
LV
7044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7045 return PCI_ERS_RESULT_RECOVERED;
7046}
7047
7048/**
7049 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7050 * @pdev: PCI device struct
7051 *
7052 * Description: This routine is called when the PCI bus has
7053 * permanently failed.
7054 */
7055static void ipr_pci_perm_failure(struct pci_dev *pdev)
7056{
7057 unsigned long flags = 0;
7058 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7059
7060 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7061 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7062 ioa_cfg->sdt_state = ABORT_DUMP;
7063 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7064 ioa_cfg->in_ioa_bringdown = 1;
7065 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7067}
7068
7069/**
7070 * ipr_pci_error_detected - Called when a PCI error is detected.
7071 * @pdev: PCI device struct
7072 * @state: PCI channel state
7073 *
7074 * Description: Called when a PCI error is detected.
7075 *
7076 * Return value:
7077 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7078 */
7079static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7080 pci_channel_state_t state)
7081{
7082 switch (state) {
7083 case pci_channel_io_frozen:
7084 ipr_pci_frozen(pdev);
7085 return PCI_ERS_RESULT_NEED_RESET;
7086 case pci_channel_io_perm_failure:
7087 ipr_pci_perm_failure(pdev);
7088 return PCI_ERS_RESULT_DISCONNECT;
7089 break;
7090 default:
7091 break;
7092 }
7093 return PCI_ERS_RESULT_NEED_RESET;
7094}
7095
1da177e4
LT
7096/**
7097 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7098 * @ioa_cfg: ioa cfg struct
7099 *
7100 * Description: This is the second phase of adapter intialization
7101 * This function takes care of initilizing the adapter to the point
7102 * where it can accept new commands.
7103
7104 * Return value:
7105 * 0 on sucess / -EIO on failure
7106 **/
7107static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7108{
7109 int rc = 0;
7110 unsigned long host_lock_flags = 0;
7111
7112 ENTER;
7113 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7114 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
7115 if (ioa_cfg->needs_hard_reset) {
7116 ioa_cfg->needs_hard_reset = 0;
7117 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7118 } else
7119 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7120 IPR_SHUTDOWN_NONE);
1da177e4
LT
7121
7122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7123 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7124 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7125
7126 if (ioa_cfg->ioa_is_dead) {
7127 rc = -EIO;
7128 } else if (ipr_invalid_adapter(ioa_cfg)) {
7129 if (!ipr_testmode)
7130 rc = -EIO;
7131
7132 dev_err(&ioa_cfg->pdev->dev,
7133 "Adapter not supported in this hardware configuration.\n");
7134 }
7135
7136 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7137
7138 LEAVE;
7139 return rc;
7140}
7141
7142/**
7143 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7144 * @ioa_cfg: ioa config struct
7145 *
7146 * Return value:
7147 * none
7148 **/
7149static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7150{
7151 int i;
7152
7153 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7154 if (ioa_cfg->ipr_cmnd_list[i])
7155 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7156 ioa_cfg->ipr_cmnd_list[i],
7157 ioa_cfg->ipr_cmnd_list_dma[i]);
7158
7159 ioa_cfg->ipr_cmnd_list[i] = NULL;
7160 }
7161
7162 if (ioa_cfg->ipr_cmd_pool)
7163 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7164
7165 ioa_cfg->ipr_cmd_pool = NULL;
7166}
7167
7168/**
7169 * ipr_free_mem - Frees memory allocated for an adapter
7170 * @ioa_cfg: ioa cfg struct
7171 *
7172 * Return value:
7173 * nothing
7174 **/
7175static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7176{
7177 int i;
7178
7179 kfree(ioa_cfg->res_entries);
7180 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7181 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7182 ipr_free_cmd_blks(ioa_cfg);
7183 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7184 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7185 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7186 ioa_cfg->cfg_table,
7187 ioa_cfg->cfg_table_dma);
7188
7189 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7190 pci_free_consistent(ioa_cfg->pdev,
7191 sizeof(struct ipr_hostrcb),
7192 ioa_cfg->hostrcb[i],
7193 ioa_cfg->hostrcb_dma[i]);
7194 }
7195
7196 ipr_free_dump(ioa_cfg);
1da177e4
LT
7197 kfree(ioa_cfg->trace);
7198}
7199
7200/**
7201 * ipr_free_all_resources - Free all allocated resources for an adapter.
7202 * @ipr_cmd: ipr command struct
7203 *
7204 * This function frees all allocated resources for the
7205 * specified adapter.
7206 *
7207 * Return value:
7208 * none
7209 **/
7210static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7211{
7212 struct pci_dev *pdev = ioa_cfg->pdev;
7213
7214 ENTER;
7215 free_irq(pdev->irq, ioa_cfg);
7216 iounmap(ioa_cfg->hdw_dma_regs);
7217 pci_release_regions(pdev);
7218 ipr_free_mem(ioa_cfg);
7219 scsi_host_put(ioa_cfg->host);
7220 pci_disable_device(pdev);
7221 LEAVE;
7222}
7223
7224/**
7225 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7226 * @ioa_cfg: ioa config struct
7227 *
7228 * Return value:
7229 * 0 on success / -ENOMEM on allocation failure
7230 **/
7231static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7232{
7233 struct ipr_cmnd *ipr_cmd;
7234 struct ipr_ioarcb *ioarcb;
7235 dma_addr_t dma_addr;
7236 int i;
7237
7238 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7239 sizeof(struct ipr_cmnd), 8, 0);
7240
7241 if (!ioa_cfg->ipr_cmd_pool)
7242 return -ENOMEM;
7243
7244 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 7245 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
7246
7247 if (!ipr_cmd) {
7248 ipr_free_cmd_blks(ioa_cfg);
7249 return -ENOMEM;
7250 }
7251
7252 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7253 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7254 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7255
7256 ioarcb = &ipr_cmd->ioarcb;
7257 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7258 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7259 ioarcb->write_ioadl_addr =
7260 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7261 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7262 ioarcb->ioasa_host_pci_addr =
7263 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7264 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7265 ipr_cmd->cmd_index = i;
7266 ipr_cmd->ioa_cfg = ioa_cfg;
7267 ipr_cmd->sense_buffer_dma = dma_addr +
7268 offsetof(struct ipr_cmnd, sense_buffer);
7269
7270 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7271 }
7272
7273 return 0;
7274}
7275
7276/**
7277 * ipr_alloc_mem - Allocate memory for an adapter
7278 * @ioa_cfg: ioa config struct
7279 *
7280 * Return value:
7281 * 0 on success / non-zero for error
7282 **/
7283static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7284{
7285 struct pci_dev *pdev = ioa_cfg->pdev;
7286 int i, rc = -ENOMEM;
7287
7288 ENTER;
0bc42e35 7289 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
7290 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7291
7292 if (!ioa_cfg->res_entries)
7293 goto out;
7294
1da177e4
LT
7295 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7296 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7297
7298 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7299 sizeof(struct ipr_misc_cbs),
7300 &ioa_cfg->vpd_cbs_dma);
7301
7302 if (!ioa_cfg->vpd_cbs)
7303 goto out_free_res_entries;
7304
7305 if (ipr_alloc_cmd_blks(ioa_cfg))
7306 goto out_free_vpd_cbs;
7307
7308 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7309 sizeof(u32) * IPR_NUM_CMD_BLKS,
7310 &ioa_cfg->host_rrq_dma);
7311
7312 if (!ioa_cfg->host_rrq)
7313 goto out_ipr_free_cmd_blocks;
7314
7315 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7316 sizeof(struct ipr_config_table),
7317 &ioa_cfg->cfg_table_dma);
7318
7319 if (!ioa_cfg->cfg_table)
7320 goto out_free_host_rrq;
7321
7322 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7323 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7324 sizeof(struct ipr_hostrcb),
7325 &ioa_cfg->hostrcb_dma[i]);
7326
7327 if (!ioa_cfg->hostrcb[i])
7328 goto out_free_hostrcb_dma;
7329
7330 ioa_cfg->hostrcb[i]->hostrcb_dma =
7331 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 7332 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
7333 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7334 }
7335
0bc42e35 7336 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
7337 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7338
7339 if (!ioa_cfg->trace)
7340 goto out_free_hostrcb_dma;
7341
1da177e4
LT
7342 rc = 0;
7343out:
7344 LEAVE;
7345 return rc;
7346
7347out_free_hostrcb_dma:
7348 while (i-- > 0) {
7349 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7350 ioa_cfg->hostrcb[i],
7351 ioa_cfg->hostrcb_dma[i]);
7352 }
7353 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7354 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7355out_free_host_rrq:
7356 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7357 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7358out_ipr_free_cmd_blocks:
7359 ipr_free_cmd_blks(ioa_cfg);
7360out_free_vpd_cbs:
7361 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7362 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7363out_free_res_entries:
7364 kfree(ioa_cfg->res_entries);
7365 goto out;
7366}
7367
7368/**
7369 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7370 * @ioa_cfg: ioa config struct
7371 *
7372 * Return value:
7373 * none
7374 **/
7375static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7376{
7377 int i;
7378
7379 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7380 ioa_cfg->bus_attr[i].bus = i;
7381 ioa_cfg->bus_attr[i].qas_enabled = 0;
7382 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7383 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7384 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7385 else
7386 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7387 }
7388}
7389
7390/**
7391 * ipr_init_ioa_cfg - Initialize IOA config struct
7392 * @ioa_cfg: ioa config struct
7393 * @host: scsi host struct
7394 * @pdev: PCI dev struct
7395 *
7396 * Return value:
7397 * none
7398 **/
7399static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7400 struct Scsi_Host *host, struct pci_dev *pdev)
7401{
7402 const struct ipr_interrupt_offsets *p;
7403 struct ipr_interrupts *t;
7404 void __iomem *base;
7405
7406 ioa_cfg->host = host;
7407 ioa_cfg->pdev = pdev;
7408 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 7409 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
7410 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7411 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7412 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7413 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7414 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7415 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7416 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7417 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7418
7419 INIT_LIST_HEAD(&ioa_cfg->free_q);
7420 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7421 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7422 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7423 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7424 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 7425 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4
LT
7426 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7427 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
7428 if (ipr_enable_cache)
7429 ioa_cfg->cache_state = CACHE_ENABLED;
7430 else
7431 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
7432
7433 ipr_initialize_bus_attr(ioa_cfg);
7434
7435 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7436 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7437 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7438 host->unique_id = host->host_no;
7439 host->max_cmd_len = IPR_MAX_CDB_LEN;
7440 pci_set_drvdata(pdev, ioa_cfg);
7441
7442 p = &ioa_cfg->chip_cfg->regs;
7443 t = &ioa_cfg->regs;
7444 base = ioa_cfg->hdw_dma_regs;
7445
7446 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7447 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7448 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7449 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7450 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7451 t->ioarrin_reg = base + p->ioarrin_reg;
7452 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7453 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7454 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7455}
7456
7457/**
7458 * ipr_get_chip_cfg - Find adapter chip configuration
7459 * @dev_id: PCI device id struct
7460 *
7461 * Return value:
7462 * ptr to chip config on success / NULL on failure
7463 **/
7464static const struct ipr_chip_cfg_t * __devinit
7465ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7466{
7467 int i;
7468
1da177e4
LT
7469 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7470 if (ipr_chip[i].vendor == dev_id->vendor &&
7471 ipr_chip[i].device == dev_id->device)
7472 return ipr_chip[i].cfg;
7473 return NULL;
7474}
7475
7476/**
7477 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7478 * @pdev: PCI device struct
7479 * @dev_id: PCI device id struct
7480 *
7481 * Return value:
7482 * 0 on success / non-zero on failure
7483 **/
7484static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7485 const struct pci_device_id *dev_id)
7486{
7487 struct ipr_ioa_cfg *ioa_cfg;
7488 struct Scsi_Host *host;
7489 unsigned long ipr_regs_pci;
7490 void __iomem *ipr_regs;
a2a65a3e 7491 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 7492 volatile u32 mask, uproc, interrupts;
1da177e4
LT
7493
7494 ENTER;
7495
7496 if ((rc = pci_enable_device(pdev))) {
7497 dev_err(&pdev->dev, "Cannot enable adapter\n");
7498 goto out;
7499 }
7500
7501 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7502
7503 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7504
7505 if (!host) {
7506 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7507 rc = -ENOMEM;
7508 goto out_disable;
7509 }
7510
7511 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7512 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
7513 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7514 sata_port_info.flags, &ipr_sata_ops);
1da177e4
LT
7515
7516 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7517
7518 if (!ioa_cfg->chip_cfg) {
7519 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7520 dev_id->vendor, dev_id->device);
7521 goto out_scsi_host_put;
7522 }
7523
5469cb5b
BK
7524 if (ipr_transop_timeout)
7525 ioa_cfg->transop_timeout = ipr_transop_timeout;
7526 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7527 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7528 else
7529 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7530
463fc696
BK
7531 rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
7532
7533 if (rc != PCIBIOS_SUCCESSFUL) {
7534 dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
7535 rc = -EIO;
7536 goto out_scsi_host_put;
7537 }
7538
1da177e4
LT
7539 ipr_regs_pci = pci_resource_start(pdev, 0);
7540
7541 rc = pci_request_regions(pdev, IPR_NAME);
7542 if (rc < 0) {
7543 dev_err(&pdev->dev,
7544 "Couldn't register memory range of registers\n");
7545 goto out_scsi_host_put;
7546 }
7547
7548 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7549
7550 if (!ipr_regs) {
7551 dev_err(&pdev->dev,
7552 "Couldn't map memory range of registers\n");
7553 rc = -ENOMEM;
7554 goto out_release_regions;
7555 }
7556
7557 ioa_cfg->hdw_dma_regs = ipr_regs;
7558 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7559 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7560
7561 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7562
7563 pci_set_master(pdev);
7564
7565 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7566 if (rc < 0) {
7567 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7568 goto cleanup_nomem;
7569 }
7570
7571 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7572 ioa_cfg->chip_cfg->cache_line_size);
7573
7574 if (rc != PCIBIOS_SUCCESSFUL) {
7575 dev_err(&pdev->dev, "Write of cache line size failed\n");
7576 rc = -EIO;
7577 goto cleanup_nomem;
7578 }
7579
7580 /* Save away PCI config space for use following IOA reset */
7581 rc = pci_save_state(pdev);
7582
7583 if (rc != PCIBIOS_SUCCESSFUL) {
7584 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7585 rc = -EIO;
7586 goto cleanup_nomem;
7587 }
7588
7589 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7590 goto cleanup_nomem;
7591
7592 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7593 goto cleanup_nomem;
7594
7595 rc = ipr_alloc_mem(ioa_cfg);
7596 if (rc < 0) {
7597 dev_err(&pdev->dev,
7598 "Couldn't allocate enough memory for device driver!\n");
7599 goto cleanup_nomem;
7600 }
7601
ce155cce
BK
7602 /*
7603 * If HRRQ updated interrupt is not masked, or reset alert is set,
7604 * the card is in an unknown state and needs a hard reset
7605 */
7606 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
473b1e8e 7607 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
ce155cce
BK
7608 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7609 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7610 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
7611 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7612 ioa_cfg->needs_hard_reset = 1;
7613 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7614 ioa_cfg->ioa_unit_checked = 1;
ce155cce 7615
1da177e4 7616 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 7617 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
7618
7619 if (rc) {
7620 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7621 pdev->irq, rc);
7622 goto cleanup_nolog;
7623 }
7624
463fc696
BK
7625 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7626 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7627 ioa_cfg->needs_warm_reset = 1;
7628 ioa_cfg->reset = ipr_reset_slot_reset;
7629 } else
7630 ioa_cfg->reset = ipr_reset_start_bist;
7631
1da177e4
LT
7632 spin_lock(&ipr_driver_lock);
7633 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7634 spin_unlock(&ipr_driver_lock);
7635
7636 LEAVE;
7637out:
7638 return rc;
7639
7640cleanup_nolog:
7641 ipr_free_mem(ioa_cfg);
7642cleanup_nomem:
7643 iounmap(ipr_regs);
7644out_release_regions:
7645 pci_release_regions(pdev);
7646out_scsi_host_put:
7647 scsi_host_put(host);
7648out_disable:
7649 pci_disable_device(pdev);
7650 goto out;
7651}
7652
7653/**
7654 * ipr_scan_vsets - Scans for VSET devices
7655 * @ioa_cfg: ioa config struct
7656 *
7657 * Description: Since the VSET resources do not follow SAM in that we can have
7658 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7659 *
7660 * Return value:
7661 * none
7662 **/
7663static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7664{
7665 int target, lun;
7666
7667 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7668 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7669 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7670}
7671
7672/**
7673 * ipr_initiate_ioa_bringdown - Bring down an adapter
7674 * @ioa_cfg: ioa config struct
7675 * @shutdown_type: shutdown type
7676 *
7677 * Description: This function will initiate bringing down the adapter.
7678 * This consists of issuing an IOA shutdown to the adapter
7679 * to flush the cache, and running BIST.
7680 * If the caller needs to wait on the completion of the reset,
7681 * the caller must sleep on the reset_wait_q.
7682 *
7683 * Return value:
7684 * none
7685 **/
7686static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7687 enum ipr_shutdown_type shutdown_type)
7688{
7689 ENTER;
7690 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7691 ioa_cfg->sdt_state = ABORT_DUMP;
7692 ioa_cfg->reset_retries = 0;
7693 ioa_cfg->in_ioa_bringdown = 1;
7694 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7695 LEAVE;
7696}
7697
7698/**
7699 * __ipr_remove - Remove a single adapter
7700 * @pdev: pci device struct
7701 *
7702 * Adapter hot plug remove entry point.
7703 *
7704 * Return value:
7705 * none
7706 **/
7707static void __ipr_remove(struct pci_dev *pdev)
7708{
7709 unsigned long host_lock_flags = 0;
7710 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7711 ENTER;
7712
7713 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
7714 while(ioa_cfg->in_reset_reload) {
7715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7716 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7717 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7718 }
7719
1da177e4
LT
7720 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7721
7722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7723 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 7724 flush_scheduled_work();
1da177e4
LT
7725 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7726
7727 spin_lock(&ipr_driver_lock);
7728 list_del(&ioa_cfg->queue);
7729 spin_unlock(&ipr_driver_lock);
7730
7731 if (ioa_cfg->sdt_state == ABORT_DUMP)
7732 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7734
7735 ipr_free_all_resources(ioa_cfg);
7736
7737 LEAVE;
7738}
7739
7740/**
7741 * ipr_remove - IOA hot plug remove entry point
7742 * @pdev: pci device struct
7743 *
7744 * Adapter hot plug remove entry point.
7745 *
7746 * Return value:
7747 * none
7748 **/
7749static void ipr_remove(struct pci_dev *pdev)
7750{
7751 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7752
7753 ENTER;
7754
1da177e4
LT
7755 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7756 &ipr_trace_attr);
7757 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7758 &ipr_dump_attr);
7759 scsi_remove_host(ioa_cfg->host);
7760
7761 __ipr_remove(pdev);
7762
7763 LEAVE;
7764}
7765
7766/**
7767 * ipr_probe - Adapter hot plug add entry point
7768 *
7769 * Return value:
7770 * 0 on success / non-zero on failure
7771 **/
7772static int __devinit ipr_probe(struct pci_dev *pdev,
7773 const struct pci_device_id *dev_id)
7774{
7775 struct ipr_ioa_cfg *ioa_cfg;
7776 int rc;
7777
7778 rc = ipr_probe_ioa(pdev, dev_id);
7779
7780 if (rc)
7781 return rc;
7782
7783 ioa_cfg = pci_get_drvdata(pdev);
7784 rc = ipr_probe_ioa_part2(ioa_cfg);
7785
7786 if (rc) {
7787 __ipr_remove(pdev);
7788 return rc;
7789 }
7790
7791 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7792
7793 if (rc) {
7794 __ipr_remove(pdev);
7795 return rc;
7796 }
7797
7798 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7799 &ipr_trace_attr);
7800
7801 if (rc) {
7802 scsi_remove_host(ioa_cfg->host);
7803 __ipr_remove(pdev);
7804 return rc;
7805 }
7806
7807 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7808 &ipr_dump_attr);
7809
7810 if (rc) {
7811 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7812 &ipr_trace_attr);
7813 scsi_remove_host(ioa_cfg->host);
7814 __ipr_remove(pdev);
7815 return rc;
7816 }
7817
7818 scsi_scan_host(ioa_cfg->host);
7819 ipr_scan_vsets(ioa_cfg);
7820 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7821 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 7822 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
7823 schedule_work(&ioa_cfg->work_q);
7824 return 0;
7825}
7826
7827/**
7828 * ipr_shutdown - Shutdown handler.
d18c3db5 7829 * @pdev: pci device struct
1da177e4
LT
7830 *
7831 * This function is invoked upon system shutdown/reboot. It will issue
7832 * an adapter shutdown to the adapter to flush the write cache.
7833 *
7834 * Return value:
7835 * none
7836 **/
d18c3db5 7837static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 7838{
d18c3db5 7839 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
7840 unsigned long lock_flags = 0;
7841
7842 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
7843 while(ioa_cfg->in_reset_reload) {
7844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7845 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7846 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7847 }
7848
1da177e4
LT
7849 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7851 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7852}
7853
7854static struct pci_device_id ipr_pci_table[] __devinitdata = {
7855 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 7857 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7858 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 7859 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 7861 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7862 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 7863 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7864 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 7865 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7866 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 7867 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 7869 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
7870 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7871 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7872 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 7873 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7874 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
7875 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7876 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 7877 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
7878 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7879 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7880 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 7881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7882 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
7883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7884 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 7885 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
7886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7887 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 7888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
7889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7890 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7891 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7892 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7893 IPR_USE_LONG_TRANSOP_TIMEOUT },
7894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 7896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 7897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 7898 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 7899 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 7900 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 7901 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 7902 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 7903 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7904 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7905 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 7906 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7907 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7908 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7909 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7910 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7911 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4
LT
7912 { }
7913};
7914MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7915
f8a88b19
LV
7916static struct pci_error_handlers ipr_err_handler = {
7917 .error_detected = ipr_pci_error_detected,
7918 .slot_reset = ipr_pci_slot_reset,
7919};
7920
1da177e4
LT
7921static struct pci_driver ipr_driver = {
7922 .name = IPR_NAME,
7923 .id_table = ipr_pci_table,
7924 .probe = ipr_probe,
7925 .remove = ipr_remove,
d18c3db5 7926 .shutdown = ipr_shutdown,
f8a88b19 7927 .err_handler = &ipr_err_handler,
68c96e59 7928 .dynids.use_driver_data = 1
1da177e4
LT
7929};
7930
7931/**
7932 * ipr_init - Module entry point
7933 *
7934 * Return value:
7935 * 0 on success / negative value on failure
7936 **/
7937static int __init ipr_init(void)
7938{
7939 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7940 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7941
dcbccbde 7942 return pci_register_driver(&ipr_driver);
1da177e4
LT
7943}
7944
7945/**
7946 * ipr_exit - Module unload
7947 *
7948 * Module unload entry point.
7949 *
7950 * Return value:
7951 * none
7952 **/
7953static void __exit ipr_exit(void)
7954{
7955 pci_unregister_driver(&ipr_driver);
7956}
7957
7958module_init(ipr_init);
7959module_exit(ipr_exit);