]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] sas_scsi_host: Convert to use the kthread API
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
1da177e4
LT
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
1da177e4
LT
82#include "ipr.h"
83
84/*
85 * Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
5469cb5b 92static unsigned int ipr_transop_timeout = 0;
62275040 93static unsigned int ipr_enable_cache = 1;
d3c74871 94static unsigned int ipr_debug = 0;
1da177e4
LT
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 99 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436
BK
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
60e7486b 136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
1da177e4
LT
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
139};
140
141static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
143};
144
145MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147module_param_named(max_speed, ipr_max_speed, uint, 0);
148MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149module_param_named(log_level, ipr_log_level, uint, 0);
150MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151module_param_named(testmode, ipr_testmode, int, 0);
152MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153module_param_named(fastfail, ipr_fastfail, int, 0);
154MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
157module_param_named(enable_cache, ipr_enable_cache, int, 0);
158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871
BK
159module_param_named(debug, ipr_debug, int, 0);
160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
1da177e4
LT
161MODULE_LICENSE("GPL");
162MODULE_VERSION(IPR_DRIVER_VERSION);
163
1da177e4
LT
164/* A constant array of IOASCs/URCs/Error Messages */
165static const
166struct ipr_error_table_t ipr_error_table[] = {
933916f3 167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
168 "8155: An unknown error was received"},
169 {0x00330000, 0, 0,
170 "Soft underlength error"},
171 {0x005A0000, 0, 0,
172 "Command to be cancelled not found"},
173 {0x00808000, 0, 0,
174 "Qualified success"},
933916f3 175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 176 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 178 "4101: Soft device bus fabric error"},
933916f3 179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 180 "FFF9: Device sector reassign successful"},
933916f3 181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 182 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 184 "7001: IOA sector reassignment successful"},
933916f3 185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 186 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 188 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 190 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 192 "FFF6: Device hardware error recovered by the IOA"},
933916f3 193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 194 "FFF6: Device hardware error recovered by the device"},
933916f3 195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 196 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 198 "FFFA: Undefined device response recovered by the IOA"},
933916f3 199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 200 "FFF6: Device bus error, message or command phase"},
933916f3 201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 202 "FFFE: Task Management Function failed"},
933916f3 203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 204 "FFF6: Failure prediction threshold exceeded"},
933916f3 205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
206 "8009: Impending cache battery pack failure"},
207 {0x02040400, 0, 0,
208 "34FF: Disk device format in progress"},
209 {0x023F0000, 0, 0,
210 "Synchronization required"},
211 {0x024E0000, 0, 0,
212 "No ready, IOA shutdown"},
213 {0x025A0000, 0, 0,
214 "Not ready, IOA has been shutdown"},
933916f3 215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
216 "3020: Storage subsystem configuration error"},
217 {0x03110B00, 0, 0,
218 "FFF5: Medium error, data unreadable, recommend reassign"},
219 {0x03110C00, 0, 0,
220 "7000: Medium error, data unreadable, do not reassign"},
933916f3 221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 222 "FFF3: Disk media format bad"},
933916f3 223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 224 "3002: Addressed device failed to respond to selection"},
933916f3 225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 226 "3100: Device bus error"},
933916f3 227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
228 "3109: IOA timed out a device command"},
229 {0x04088000, 0, 0,
230 "3120: SCSI bus is not operational"},
933916f3 231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 232 "4100: Hard device bus fabric error"},
933916f3 233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 234 "9000: IOA reserved area data check"},
933916f3 235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 236 "9001: IOA reserved area invalid data pattern"},
933916f3 237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 238 "9002: IOA reserved area LRC error"},
933916f3 239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 240 "102E: Out of alternate sectors for disk storage"},
933916f3 241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "FFF4: Data transfer underlength error"},
933916f3 243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "FFF4: Data transfer overlength error"},
933916f3 245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "3400: Logical unit failure"},
933916f3 247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "FFF4: Device microcode is corrupt"},
933916f3 249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
250 "8150: PCI bus error"},
251 {0x04430000, 1, 0,
252 "Unsupported device bus message received"},
933916f3 253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF4: Disk device problem"},
933916f3 255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "8150: Permanent IOA failure"},
933916f3 257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "3010: Disk device returned wrong response to IOA"},
933916f3 259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
260 "8151: IOA microcode error"},
261 {0x04448500, 0, 0,
262 "Device bus status error"},
933916f3 263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
265 {0x04448700, 0, 0,
266 "ATA device status error"},
1da177e4
LT
267 {0x04490000, 0, 0,
268 "Message reject received from the device"},
933916f3 269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "8008: A permanent cache battery pack failure occurred"},
933916f3 271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "9090: Disk unit has been modified after the last known status"},
933916f3 273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 274 "9081: IOA detected device error"},
933916f3 275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 276 "9082: IOA detected device error"},
933916f3 277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "3110: Device bus error, message or command phase"},
933916f3 279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 280 "3110: SAS Command / Task Management Function failed"},
933916f3 281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 282 "9091: Incorrect hardware configuration change has been detected"},
933916f3 283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 284 "9073: Invalid multi-adapter configuration"},
933916f3 285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 286 "4010: Incorrect connection between cascaded expanders"},
933916f3 287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 288 "4020: Connections exceed IOA design limits"},
933916f3 289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 290 "4030: Incorrect multipath connection"},
933916f3 291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 292 "4110: Unsupported enclosure function"},
933916f3 293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
294 "FFF4: Command to logical unit failed"},
295 {0x05240000, 1, 0,
296 "Illegal request, invalid request type or request packet"},
297 {0x05250000, 0, 0,
298 "Illegal request, invalid resource handle"},
b0df54bb
BK
299 {0x05258000, 0, 0,
300 "Illegal request, commands not allowed to this device"},
301 {0x05258100, 0, 0,
302 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
303 {0x05260000, 0, 0,
304 "Illegal request, invalid field in parameter list"},
305 {0x05260100, 0, 0,
306 "Illegal request, parameter not supported"},
307 {0x05260200, 0, 0,
308 "Illegal request, parameter value invalid"},
309 {0x052C0000, 0, 0,
310 "Illegal request, command sequence error"},
b0df54bb
BK
311 {0x052C8000, 1, 0,
312 "Illegal request, dual adapter support not enabled"},
933916f3 313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 314 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 316 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 318 "3140: Device bus not ready to ready transition"},
933916f3 319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
320 "FFFB: SCSI bus was reset"},
321 {0x06290500, 0, 0,
322 "FFFE: SCSI bus transition to single ended"},
323 {0x06290600, 0, 0,
324 "FFFE: SCSI bus transition to LVD"},
933916f3 325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "FFFB: SCSI bus was reset by another initiator"},
933916f3 327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "3029: A device replacement has occurred"},
933916f3 329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 330 "9051: IOA cache data exists for a missing or failed device"},
933916f3 331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "9025: Disk unit is not supported at its physical location"},
933916f3 335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "3020: IOA detected a SCSI bus configuration error"},
933916f3 337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 338 "3150: SCSI bus configuration error"},
933916f3 339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 340 "9074: Asymmetric advanced function disk configuration"},
933916f3 341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 342 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 344 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 346 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 348 "9076: Configuration error, missing remote IOA"},
933916f3 349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 350 "4050: Enclosure does not support a required multipath function"},
933916f3 351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 352 "9041: Array protection temporarily suspended"},
933916f3 353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 354 "9042: Corrupt array parity detected on specified device"},
933916f3 355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 358 "9071: Link operational transition"},
933916f3 359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 360 "9072: Link not operational transition"},
933916f3 361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "9032: Array exposed but still protected"},
e435340c
BK
363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
933916f3 365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 366 "4061: Multipath redundancy level got better"},
933916f3 367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 368 "4060: Multipath redundancy level got worse"},
1da177e4
LT
369 {0x07270000, 0, 0,
370 "Failure due to other device"},
933916f3 371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 372 "9008: IOA does not support functions expected by devices"},
933916f3 373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 374 "9010: Cache data associated with attached devices cannot be found"},
933916f3 375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 376 "9011: Cache data belongs to devices other than those attached"},
933916f3 377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 380 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9022: Exposed array is missing a required device"},
933916f3 383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "9023: Array member(s) not at required physical locations"},
933916f3 385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "9024: Array not functional due to present hardware configuration"},
933916f3 387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 388 "9026: Array not functional due to present hardware configuration"},
933916f3 389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 390 "9027: Array is missing a device and parity is out of sync"},
933916f3 391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 392 "9028: Maximum number of arrays already exist"},
933916f3 393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 394 "9050: Required cache data cannot be located for a disk unit"},
933916f3 395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 396 "9052: Cache data exists for a device that has been modified"},
933916f3 397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 398 "9054: IOA resources not available due to previous problems"},
933916f3 399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 400 "9092: Disk unit requires initialization before use"},
933916f3 401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 402 "9029: Incorrect hardware configuration change has been detected"},
933916f3 403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 404 "9060: One or more disk pairs are missing from an array"},
933916f3 405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 406 "9061: One or more disks are missing from an array"},
933916f3 407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "9062: One or more disks are missing from an array"},
933916f3 409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
410 "9063: Maximum number of functional arrays has been exceeded"},
411 {0x0B260000, 0, 0,
412 "Aborted command, invalid descriptor"},
413 {0x0B5A0000, 0, 0,
414 "Command terminated by host"}
415};
416
417static const struct ipr_ses_table_entry ipr_ses_table[] = {
418 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
419 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
421 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
422 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
423 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
424 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
425 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
427 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
429 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
431};
432
433/*
434 * Function Prototypes
435 */
436static int ipr_reset_alert(struct ipr_cmnd *);
437static void ipr_process_ccn(struct ipr_cmnd *);
438static void ipr_process_error(struct ipr_cmnd *);
439static void ipr_reset_ioa_job(struct ipr_cmnd *);
440static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
441 enum ipr_shutdown_type);
442
443#ifdef CONFIG_SCSI_IPR_TRACE
444/**
445 * ipr_trc_hook - Add a trace entry to the driver trace
446 * @ipr_cmd: ipr command struct
447 * @type: trace type
448 * @add_data: additional data
449 *
450 * Return value:
451 * none
452 **/
453static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
454 u8 type, u32 add_data)
455{
456 struct ipr_trace_entry *trace_entry;
457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
458
459 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
460 trace_entry->time = jiffies;
461 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
462 trace_entry->type = type;
35a39691
BK
463 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
464 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
465 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
466 trace_entry->u.add_data = add_data;
467}
468#else
469#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
470#endif
471
472/**
473 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
474 * @ipr_cmd: ipr command struct
475 *
476 * Return value:
477 * none
478 **/
479static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
480{
481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
51b1c7e1 483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
484
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0;
51b1c7e1
BK
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
493 ioasa->ioasc = 0;
494 ioasa->residual_data_len = 0;
35a39691 495 ioasa->u.gata.status = 0;
1da177e4
LT
496
497 ipr_cmd->scsi_cmd = NULL;
35a39691 498 ipr_cmd->qc = NULL;
1da177e4
LT
499 ipr_cmd->sense_buffer[0] = 0;
500 ipr_cmd->dma_use_sg = 0;
501}
502
503/**
504 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
505 * @ipr_cmd: ipr command struct
506 *
507 * Return value:
508 * none
509 **/
510static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
511{
512 ipr_reinit_ipr_cmnd(ipr_cmd);
513 ipr_cmd->u.scratch = 0;
514 ipr_cmd->sibling = NULL;
515 init_timer(&ipr_cmd->timer);
516}
517
518/**
519 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
520 * @ioa_cfg: ioa config struct
521 *
522 * Return value:
523 * pointer to ipr command struct
524 **/
525static
526struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
527{
528 struct ipr_cmnd *ipr_cmd;
529
530 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
531 list_del(&ipr_cmd->queue);
532 ipr_init_ipr_cmnd(ipr_cmd);
533
534 return ipr_cmd;
535}
536
537/**
538 * ipr_unmap_sglist - Unmap scatterlist if mapped
539 * @ioa_cfg: ioa config struct
540 * @ipr_cmd: ipr command struct
541 *
542 * Return value:
543 * nothing
544 **/
545static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
546 struct ipr_cmnd *ipr_cmd)
547{
548 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
549
550 if (ipr_cmd->dma_use_sg) {
551 if (scsi_cmd->use_sg > 0) {
552 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
553 scsi_cmd->use_sg,
554 scsi_cmd->sc_data_direction);
555 } else {
556 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
557 scsi_cmd->request_bufflen,
558 scsi_cmd->sc_data_direction);
559 }
560 }
561}
562
563/**
564 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
565 * @ioa_cfg: ioa config struct
566 * @clr_ints: interrupts to clear
567 *
568 * This function masks all interrupts on the adapter, then clears the
569 * interrupts specified in the mask
570 *
571 * Return value:
572 * none
573 **/
574static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
575 u32 clr_ints)
576{
577 volatile u32 int_reg;
578
579 /* Stop new interrupts */
580 ioa_cfg->allow_interrupts = 0;
581
582 /* Set interrupt mask to stop all new interrupts */
583 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
584
585 /* Clear any pending interrupts */
586 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
587 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
588}
589
590/**
591 * ipr_save_pcix_cmd_reg - Save PCI-X command register
592 * @ioa_cfg: ioa config struct
593 *
594 * Return value:
595 * 0 on success / -EIO on failure
596 **/
597static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
598{
599 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
600
7dce0e1c
BK
601 if (pcix_cmd_reg == 0)
602 return 0;
1da177e4
LT
603
604 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
605 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
606 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
607 return -EIO;
608 }
609
610 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
611 return 0;
612}
613
614/**
615 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
616 * @ioa_cfg: ioa config struct
617 *
618 * Return value:
619 * 0 on success / -EIO on failure
620 **/
621static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
622{
623 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
624
625 if (pcix_cmd_reg) {
626 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
627 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
628 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
629 return -EIO;
630 }
1da177e4
LT
631 }
632
633 return 0;
634}
635
35a39691
BK
636/**
637 * ipr_sata_eh_done - done function for aborted SATA commands
638 * @ipr_cmd: ipr command struct
639 *
640 * This function is invoked for ops generated to SATA
641 * devices which are being aborted.
642 *
643 * Return value:
644 * none
645 **/
646static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
647{
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
649 struct ata_queued_cmd *qc = ipr_cmd->qc;
650 struct ipr_sata_port *sata_port = qc->ap->private_data;
651
652 qc->err_mask |= AC_ERR_OTHER;
653 sata_port->ioasa.status |= ATA_BUSY;
654 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
655 ata_qc_complete(qc);
656}
657
1da177e4
LT
658/**
659 * ipr_scsi_eh_done - mid-layer done function for aborted ops
660 * @ipr_cmd: ipr command struct
661 *
662 * This function is invoked by the interrupt handler for
663 * ops generated by the SCSI mid-layer which are being aborted.
664 *
665 * Return value:
666 * none
667 **/
668static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
669{
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
671 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
672
673 scsi_cmd->result |= (DID_ERROR << 16);
674
675 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
676 scsi_cmd->scsi_done(scsi_cmd);
677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
678}
679
680/**
681 * ipr_fail_all_ops - Fails all outstanding ops.
682 * @ioa_cfg: ioa config struct
683 *
684 * This function fails all outstanding ops.
685 *
686 * Return value:
687 * none
688 **/
689static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
690{
691 struct ipr_cmnd *ipr_cmd, *temp;
692
693 ENTER;
694 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
695 list_del(&ipr_cmd->queue);
696
697 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
698 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
699
700 if (ipr_cmd->scsi_cmd)
701 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
702 else if (ipr_cmd->qc)
703 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
704
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
706 del_timer(&ipr_cmd->timer);
707 ipr_cmd->done(ipr_cmd);
708 }
709
710 LEAVE;
711}
712
713/**
714 * ipr_do_req - Send driver initiated requests.
715 * @ipr_cmd: ipr command struct
716 * @done: done function
717 * @timeout_func: timeout function
718 * @timeout: timeout value
719 *
720 * This function sends the specified command to the adapter with the
721 * timeout given. The done function is invoked on command completion.
722 *
723 * Return value:
724 * none
725 **/
726static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
727 void (*done) (struct ipr_cmnd *),
728 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
729{
730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
731
732 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
733
734 ipr_cmd->done = done;
735
736 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
737 ipr_cmd->timer.expires = jiffies + timeout;
738 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
739
740 add_timer(&ipr_cmd->timer);
741
742 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
743
744 mb();
745 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
746 ioa_cfg->regs.ioarrin_reg);
747}
748
749/**
750 * ipr_internal_cmd_done - Op done function for an internally generated op.
751 * @ipr_cmd: ipr command struct
752 *
753 * This function is the op done function for an internally generated,
754 * blocking op. It simply wakes the sleeping thread.
755 *
756 * Return value:
757 * none
758 **/
759static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
760{
761 if (ipr_cmd->sibling)
762 ipr_cmd->sibling = NULL;
763 else
764 complete(&ipr_cmd->completion);
765}
766
767/**
768 * ipr_send_blocking_cmd - Send command and sleep on its completion.
769 * @ipr_cmd: ipr command struct
770 * @timeout_func: function to invoke if command times out
771 * @timeout: timeout
772 *
773 * Return value:
774 * none
775 **/
776static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
777 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
778 u32 timeout)
779{
780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
781
782 init_completion(&ipr_cmd->completion);
783 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
784
785 spin_unlock_irq(ioa_cfg->host->host_lock);
786 wait_for_completion(&ipr_cmd->completion);
787 spin_lock_irq(ioa_cfg->host->host_lock);
788}
789
790/**
791 * ipr_send_hcam - Send an HCAM to the adapter.
792 * @ioa_cfg: ioa config struct
793 * @type: HCAM type
794 * @hostrcb: hostrcb struct
795 *
796 * This function will send a Host Controlled Async command to the adapter.
797 * If HCAMs are currently not allowed to be issued to the adapter, it will
798 * place the hostrcb on the free queue.
799 *
800 * Return value:
801 * none
802 **/
803static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
804 struct ipr_hostrcb *hostrcb)
805{
806 struct ipr_cmnd *ipr_cmd;
807 struct ipr_ioarcb *ioarcb;
808
809 if (ioa_cfg->allow_cmds) {
810 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
812 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
813
814 ipr_cmd->u.hostrcb = hostrcb;
815 ioarcb = &ipr_cmd->ioarcb;
816
817 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
818 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
819 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
820 ioarcb->cmd_pkt.cdb[1] = type;
821 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
822 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
823
824 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
825 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
826 ipr_cmd->ioadl[0].flags_and_data_len =
827 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
828 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
829
830 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
831 ipr_cmd->done = ipr_process_ccn;
832 else
833 ipr_cmd->done = ipr_process_error;
834
835 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
836
837 mb();
838 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
839 ioa_cfg->regs.ioarrin_reg);
840 } else {
841 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
842 }
843}
844
845/**
846 * ipr_init_res_entry - Initialize a resource entry struct.
847 * @res: resource entry struct
848 *
849 * Return value:
850 * none
851 **/
852static void ipr_init_res_entry(struct ipr_resource_entry *res)
853{
ee0a90fa 854 res->needs_sync_complete = 0;
1da177e4
LT
855 res->in_erp = 0;
856 res->add_to_ml = 0;
857 res->del_from_ml = 0;
858 res->resetting_device = 0;
859 res->sdev = NULL;
35a39691 860 res->sata_port = NULL;
1da177e4
LT
861}
862
863/**
864 * ipr_handle_config_change - Handle a config change from the adapter
865 * @ioa_cfg: ioa config struct
866 * @hostrcb: hostrcb
867 *
868 * Return value:
869 * none
870 **/
871static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
872 struct ipr_hostrcb *hostrcb)
873{
874 struct ipr_resource_entry *res = NULL;
875 struct ipr_config_table_entry *cfgte;
876 u32 is_ndn = 1;
877
878 cfgte = &hostrcb->hcam.u.ccn.cfgte;
879
880 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
881 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
882 sizeof(cfgte->res_addr))) {
883 is_ndn = 0;
884 break;
885 }
886 }
887
888 if (is_ndn) {
889 if (list_empty(&ioa_cfg->free_res_q)) {
890 ipr_send_hcam(ioa_cfg,
891 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
892 hostrcb);
893 return;
894 }
895
896 res = list_entry(ioa_cfg->free_res_q.next,
897 struct ipr_resource_entry, queue);
898
899 list_del(&res->queue);
900 ipr_init_res_entry(res);
901 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
902 }
903
904 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
905
906 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
907 if (res->sdev) {
1da177e4 908 res->del_from_ml = 1;
1121b794 909 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
910 if (ioa_cfg->allow_ml_add_del)
911 schedule_work(&ioa_cfg->work_q);
912 } else
913 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
914 } else if (!res->sdev) {
915 res->add_to_ml = 1;
916 if (ioa_cfg->allow_ml_add_del)
917 schedule_work(&ioa_cfg->work_q);
918 }
919
920 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
921}
922
923/**
924 * ipr_process_ccn - Op done function for a CCN.
925 * @ipr_cmd: ipr command struct
926 *
927 * This function is the op done function for a configuration
928 * change notification host controlled async from the adapter.
929 *
930 * Return value:
931 * none
932 **/
933static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
934{
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
937 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
938
939 list_del(&hostrcb->queue);
940 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
941
942 if (ioasc) {
943 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
944 dev_err(&ioa_cfg->pdev->dev,
945 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
946
947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
948 } else {
949 ipr_handle_config_change(ioa_cfg, hostrcb);
950 }
951}
952
953/**
954 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 955 * @vpd: vendor/product id/sn struct
1da177e4
LT
956 *
957 * Return value:
958 * none
959 **/
cfc32139 960static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
961{
962 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
963 + IPR_SERIAL_NUM_LEN];
964
cfc32139
BK
965 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
966 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
967 IPR_PROD_ID_LEN);
968 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
969 ipr_err("Vendor/Product ID: %s\n", buffer);
970
cfc32139 971 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
972 buffer[IPR_SERIAL_NUM_LEN] = '\0';
973 ipr_err(" Serial Number: %s\n", buffer);
974}
975
ee0f05b8
BK
976/**
977 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
978 * @vpd: vendor/product id/sn/wwn struct
979 *
980 * Return value:
981 * none
982 **/
983static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
984{
985 ipr_log_vpd(&vpd->vpd);
986 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
987 be32_to_cpu(vpd->wwid[1]));
988}
989
990/**
991 * ipr_log_enhanced_cache_error - Log a cache error.
992 * @ioa_cfg: ioa config struct
993 * @hostrcb: hostrcb struct
994 *
995 * Return value:
996 * none
997 **/
998static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
999 struct ipr_hostrcb *hostrcb)
1000{
1001 struct ipr_hostrcb_type_12_error *error =
1002 &hostrcb->hcam.u.error.u.type_12_error;
1003
1004 ipr_err("-----Current Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_ext_vpd(&error->ioa_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_ext_vpd(&error->cfc_vpd);
1009
1010 ipr_err("-----Expected Configuration-----\n");
1011 ipr_err("Cache Directory Card Information:\n");
1012 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1013 ipr_err("Adapter Card Information:\n");
1014 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1015
1016 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1017 be32_to_cpu(error->ioa_data[0]),
1018 be32_to_cpu(error->ioa_data[1]),
1019 be32_to_cpu(error->ioa_data[2]));
1020}
1021
1da177e4
LT
1022/**
1023 * ipr_log_cache_error - Log a cache error.
1024 * @ioa_cfg: ioa config struct
1025 * @hostrcb: hostrcb struct
1026 *
1027 * Return value:
1028 * none
1029 **/
1030static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1031 struct ipr_hostrcb *hostrcb)
1032{
1033 struct ipr_hostrcb_type_02_error *error =
1034 &hostrcb->hcam.u.error.u.type_02_error;
1035
1036 ipr_err("-----Current Configuration-----\n");
1037 ipr_err("Cache Directory Card Information:\n");
cfc32139 1038 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1039 ipr_err("Adapter Card Information:\n");
cfc32139 1040 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1041
1042 ipr_err("-----Expected Configuration-----\n");
1043 ipr_err("Cache Directory Card Information:\n");
cfc32139 1044 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1045 ipr_err("Adapter Card Information:\n");
cfc32139 1046 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1047
1048 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1049 be32_to_cpu(error->ioa_data[0]),
1050 be32_to_cpu(error->ioa_data[1]),
1051 be32_to_cpu(error->ioa_data[2]));
1052}
1053
ee0f05b8
BK
1054/**
1055 * ipr_log_enhanced_config_error - Log a configuration error.
1056 * @ioa_cfg: ioa config struct
1057 * @hostrcb: hostrcb struct
1058 *
1059 * Return value:
1060 * none
1061 **/
1062static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1063 struct ipr_hostrcb *hostrcb)
1064{
1065 int errors_logged, i;
1066 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1067 struct ipr_hostrcb_type_13_error *error;
1068
1069 error = &hostrcb->hcam.u.error.u.type_13_error;
1070 errors_logged = be32_to_cpu(error->errors_logged);
1071
1072 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1073 be32_to_cpu(error->errors_detected), errors_logged);
1074
1075 dev_entry = error->dev;
1076
1077 for (i = 0; i < errors_logged; i++, dev_entry++) {
1078 ipr_err_separator;
1079
1080 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1081 ipr_log_ext_vpd(&dev_entry->vpd);
1082
1083 ipr_err("-----New Device Information-----\n");
1084 ipr_log_ext_vpd(&dev_entry->new_vpd);
1085
1086 ipr_err("Cache Directory Card Information:\n");
1087 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1088
1089 ipr_err("Adapter Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1091 }
1092}
1093
1da177e4
LT
1094/**
1095 * ipr_log_config_error - Log a configuration error.
1096 * @ioa_cfg: ioa config struct
1097 * @hostrcb: hostrcb struct
1098 *
1099 * Return value:
1100 * none
1101 **/
1102static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1103 struct ipr_hostrcb *hostrcb)
1104{
1105 int errors_logged, i;
1106 struct ipr_hostrcb_device_data_entry *dev_entry;
1107 struct ipr_hostrcb_type_03_error *error;
1108
1109 error = &hostrcb->hcam.u.error.u.type_03_error;
1110 errors_logged = be32_to_cpu(error->errors_logged);
1111
1112 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1113 be32_to_cpu(error->errors_detected), errors_logged);
1114
cfc32139 1115 dev_entry = error->dev;
1da177e4
LT
1116
1117 for (i = 0; i < errors_logged; i++, dev_entry++) {
1118 ipr_err_separator;
1119
fa15b1f6 1120 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1121 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1122
1123 ipr_err("-----New Device Information-----\n");
cfc32139 1124 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1125
1126 ipr_err("Cache Directory Card Information:\n");
cfc32139 1127 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1128
1129 ipr_err("Adapter Card Information:\n");
cfc32139 1130 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1131
1132 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1133 be32_to_cpu(dev_entry->ioa_data[0]),
1134 be32_to_cpu(dev_entry->ioa_data[1]),
1135 be32_to_cpu(dev_entry->ioa_data[2]),
1136 be32_to_cpu(dev_entry->ioa_data[3]),
1137 be32_to_cpu(dev_entry->ioa_data[4]));
1138 }
1139}
1140
ee0f05b8
BK
1141/**
1142 * ipr_log_enhanced_array_error - Log an array configuration error.
1143 * @ioa_cfg: ioa config struct
1144 * @hostrcb: hostrcb struct
1145 *
1146 * Return value:
1147 * none
1148 **/
1149static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1150 struct ipr_hostrcb *hostrcb)
1151{
1152 int i, num_entries;
1153 struct ipr_hostrcb_type_14_error *error;
1154 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1155 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1156
1157 error = &hostrcb->hcam.u.error.u.type_14_error;
1158
1159 ipr_err_separator;
1160
1161 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1162 error->protection_level,
1163 ioa_cfg->host->host_no,
1164 error->last_func_vset_res_addr.bus,
1165 error->last_func_vset_res_addr.target,
1166 error->last_func_vset_res_addr.lun);
1167
1168 ipr_err_separator;
1169
1170 array_entry = error->array_member;
1171 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1172 sizeof(error->array_member));
1173
1174 for (i = 0; i < num_entries; i++, array_entry++) {
1175 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1176 continue;
1177
1178 if (be32_to_cpu(error->exposed_mode_adn) == i)
1179 ipr_err("Exposed Array Member %d:\n", i);
1180 else
1181 ipr_err("Array Member %d:\n", i);
1182
1183 ipr_log_ext_vpd(&array_entry->vpd);
1184 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1185 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1186 "Expected Location");
1187
1188 ipr_err_separator;
1189 }
1190}
1191
1da177e4
LT
1192/**
1193 * ipr_log_array_error - Log an array configuration error.
1194 * @ioa_cfg: ioa config struct
1195 * @hostrcb: hostrcb struct
1196 *
1197 * Return value:
1198 * none
1199 **/
1200static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1201 struct ipr_hostrcb *hostrcb)
1202{
1203 int i;
1204 struct ipr_hostrcb_type_04_error *error;
1205 struct ipr_hostrcb_array_data_entry *array_entry;
1206 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1207
1208 error = &hostrcb->hcam.u.error.u.type_04_error;
1209
1210 ipr_err_separator;
1211
1212 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1213 error->protection_level,
1214 ioa_cfg->host->host_no,
1215 error->last_func_vset_res_addr.bus,
1216 error->last_func_vset_res_addr.target,
1217 error->last_func_vset_res_addr.lun);
1218
1219 ipr_err_separator;
1220
1221 array_entry = error->array_member;
1222
1223 for (i = 0; i < 18; i++) {
cfc32139 1224 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1225 continue;
1226
fa15b1f6 1227 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1228 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1229 else
1da177e4 1230 ipr_err("Array Member %d:\n", i);
1da177e4 1231
cfc32139 1232 ipr_log_vpd(&array_entry->vpd);
1da177e4 1233
fa15b1f6
BK
1234 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1235 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1236 "Expected Location");
1da177e4
LT
1237
1238 ipr_err_separator;
1239
1240 if (i == 9)
1241 array_entry = error->array_member2;
1242 else
1243 array_entry++;
1244 }
1245}
1246
1247/**
b0df54bb 1248 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1249 * @ioa_cfg: ioa config struct
b0df54bb
BK
1250 * @data: IOA error data
1251 * @len: data length
1da177e4
LT
1252 *
1253 * Return value:
1254 * none
1255 **/
ac719aba 1256static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1257{
1258 int i;
1da177e4 1259
b0df54bb 1260 if (len == 0)
1da177e4
LT
1261 return;
1262
ac719aba
BK
1263 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1264 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1265
b0df54bb 1266 for (i = 0; i < len / 4; i += 4) {
1da177e4 1267 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1268 be32_to_cpu(data[i]),
1269 be32_to_cpu(data[i+1]),
1270 be32_to_cpu(data[i+2]),
1271 be32_to_cpu(data[i+3]));
1da177e4
LT
1272 }
1273}
1274
ee0f05b8
BK
1275/**
1276 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1277 * @ioa_cfg: ioa config struct
1278 * @hostrcb: hostrcb struct
1279 *
1280 * Return value:
1281 * none
1282 **/
1283static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1284 struct ipr_hostrcb *hostrcb)
1285{
1286 struct ipr_hostrcb_type_17_error *error;
1287
1288 error = &hostrcb->hcam.u.error.u.type_17_error;
1289 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1290
1291 ipr_err("%s\n", error->failure_reason);
1292 ipr_err("Remote Adapter VPD:\n");
1293 ipr_log_ext_vpd(&error->vpd);
ac719aba 1294 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1295 be32_to_cpu(hostrcb->hcam.length) -
1296 (offsetof(struct ipr_hostrcb_error, u) +
1297 offsetof(struct ipr_hostrcb_type_17_error, data)));
1298}
1299
b0df54bb
BK
1300/**
1301 * ipr_log_dual_ioa_error - Log a dual adapter error.
1302 * @ioa_cfg: ioa config struct
1303 * @hostrcb: hostrcb struct
1304 *
1305 * Return value:
1306 * none
1307 **/
1308static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1310{
1311 struct ipr_hostrcb_type_07_error *error;
1312
1313 error = &hostrcb->hcam.u.error.u.type_07_error;
1314 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1315
1316 ipr_err("%s\n", error->failure_reason);
1317 ipr_err("Remote Adapter VPD:\n");
1318 ipr_log_vpd(&error->vpd);
ac719aba 1319 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1320 be32_to_cpu(hostrcb->hcam.length) -
1321 (offsetof(struct ipr_hostrcb_error, u) +
1322 offsetof(struct ipr_hostrcb_type_07_error, data)));
1323}
1324
49dc6a18
BK
1325static const struct {
1326 u8 active;
1327 char *desc;
1328} path_active_desc[] = {
1329 { IPR_PATH_NO_INFO, "Path" },
1330 { IPR_PATH_ACTIVE, "Active path" },
1331 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1332};
1333
1334static const struct {
1335 u8 state;
1336 char *desc;
1337} path_state_desc[] = {
1338 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1339 { IPR_PATH_HEALTHY, "is healthy" },
1340 { IPR_PATH_DEGRADED, "is degraded" },
1341 { IPR_PATH_FAILED, "is failed" }
1342};
1343
1344/**
1345 * ipr_log_fabric_path - Log a fabric path error
1346 * @hostrcb: hostrcb struct
1347 * @fabric: fabric descriptor
1348 *
1349 * Return value:
1350 * none
1351 **/
1352static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1353 struct ipr_hostrcb_fabric_desc *fabric)
1354{
1355 int i, j;
1356 u8 path_state = fabric->path_state;
1357 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1358 u8 state = path_state & IPR_PATH_STATE_MASK;
1359
1360 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1361 if (path_active_desc[i].active != active)
1362 continue;
1363
1364 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1365 if (path_state_desc[j].state != state)
1366 continue;
1367
1368 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1369 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1370 path_active_desc[i].desc, path_state_desc[j].desc,
1371 fabric->ioa_port);
1372 } else if (fabric->cascaded_expander == 0xff) {
1373 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1374 path_active_desc[i].desc, path_state_desc[j].desc,
1375 fabric->ioa_port, fabric->phy);
1376 } else if (fabric->phy == 0xff) {
1377 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1378 path_active_desc[i].desc, path_state_desc[j].desc,
1379 fabric->ioa_port, fabric->cascaded_expander);
1380 } else {
1381 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1382 path_active_desc[i].desc, path_state_desc[j].desc,
1383 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1384 }
1385 return;
1386 }
1387 }
1388
1389 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1390 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1391}
1392
1393static const struct {
1394 u8 type;
1395 char *desc;
1396} path_type_desc[] = {
1397 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1398 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1399 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1400 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1401};
1402
1403static const struct {
1404 u8 status;
1405 char *desc;
1406} path_status_desc[] = {
1407 { IPR_PATH_CFG_NO_PROB, "Functional" },
1408 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1409 { IPR_PATH_CFG_FAILED, "Failed" },
1410 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1411 { IPR_PATH_NOT_DETECTED, "Missing" },
1412 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1413};
1414
1415static const char *link_rate[] = {
1416 "unknown",
1417 "disabled",
1418 "phy reset problem",
1419 "spinup hold",
1420 "port selector",
1421 "unknown",
1422 "unknown",
1423 "unknown",
1424 "1.5Gbps",
1425 "3.0Gbps",
1426 "unknown",
1427 "unknown",
1428 "unknown",
1429 "unknown",
1430 "unknown",
1431 "unknown"
1432};
1433
1434/**
1435 * ipr_log_path_elem - Log a fabric path element.
1436 * @hostrcb: hostrcb struct
1437 * @cfg: fabric path element struct
1438 *
1439 * Return value:
1440 * none
1441 **/
1442static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1443 struct ipr_hostrcb_config_element *cfg)
1444{
1445 int i, j;
1446 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1447 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1448
1449 if (type == IPR_PATH_CFG_NOT_EXIST)
1450 return;
1451
1452 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1453 if (path_type_desc[i].type != type)
1454 continue;
1455
1456 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1457 if (path_status_desc[j].status != status)
1458 continue;
1459
1460 if (type == IPR_PATH_CFG_IOA_PORT) {
1461 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1462 path_status_desc[j].desc, path_type_desc[i].desc,
1463 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1464 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1465 } else {
1466 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1467 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1468 path_status_desc[j].desc, path_type_desc[i].desc,
1469 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1470 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1471 } else if (cfg->cascaded_expander == 0xff) {
1472 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1473 "WWN=%08X%08X\n", path_status_desc[j].desc,
1474 path_type_desc[i].desc, cfg->phy,
1475 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1476 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1477 } else if (cfg->phy == 0xff) {
1478 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1479 "WWN=%08X%08X\n", path_status_desc[j].desc,
1480 path_type_desc[i].desc, cfg->cascaded_expander,
1481 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1482 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1483 } else {
1484 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1485 "WWN=%08X%08X\n", path_status_desc[j].desc,
1486 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1487 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1488 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1489 }
1490 }
1491 return;
1492 }
1493 }
1494
1495 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1496 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1497 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1498 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1499}
1500
1501/**
1502 * ipr_log_fabric_error - Log a fabric error.
1503 * @ioa_cfg: ioa config struct
1504 * @hostrcb: hostrcb struct
1505 *
1506 * Return value:
1507 * none
1508 **/
1509static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1510 struct ipr_hostrcb *hostrcb)
1511{
1512 struct ipr_hostrcb_type_20_error *error;
1513 struct ipr_hostrcb_fabric_desc *fabric;
1514 struct ipr_hostrcb_config_element *cfg;
1515 int i, add_len;
1516
1517 error = &hostrcb->hcam.u.error.u.type_20_error;
1518 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1519 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1520
1521 add_len = be32_to_cpu(hostrcb->hcam.length) -
1522 (offsetof(struct ipr_hostrcb_error, u) +
1523 offsetof(struct ipr_hostrcb_type_20_error, desc));
1524
1525 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1526 ipr_log_fabric_path(hostrcb, fabric);
1527 for_each_fabric_cfg(fabric, cfg)
1528 ipr_log_path_elem(hostrcb, cfg);
1529
1530 add_len -= be16_to_cpu(fabric->length);
1531 fabric = (struct ipr_hostrcb_fabric_desc *)
1532 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1533 }
1534
ac719aba 1535 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
1536}
1537
b0df54bb
BK
1538/**
1539 * ipr_log_generic_error - Log an adapter error.
1540 * @ioa_cfg: ioa config struct
1541 * @hostrcb: hostrcb struct
1542 *
1543 * Return value:
1544 * none
1545 **/
1546static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1547 struct ipr_hostrcb *hostrcb)
1548{
ac719aba 1549 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
1550 be32_to_cpu(hostrcb->hcam.length));
1551}
1552
1da177e4
LT
1553/**
1554 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1555 * @ioasc: IOASC
1556 *
1557 * This function will return the index of into the ipr_error_table
1558 * for the specified IOASC. If the IOASC is not in the table,
1559 * 0 will be returned, which points to the entry used for unknown errors.
1560 *
1561 * Return value:
1562 * index into the ipr_error_table
1563 **/
1564static u32 ipr_get_error(u32 ioasc)
1565{
1566 int i;
1567
1568 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 1569 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
1570 return i;
1571
1572 return 0;
1573}
1574
1575/**
1576 * ipr_handle_log_data - Log an adapter error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1579 *
1580 * This function logs an adapter error to the system.
1581 *
1582 * Return value:
1583 * none
1584 **/
1585static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1586 struct ipr_hostrcb *hostrcb)
1587{
1588 u32 ioasc;
1589 int error_index;
1590
1591 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1592 return;
1593
1594 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1595 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1596
1597 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1598
1599 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1600 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1601 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1602 scsi_report_bus_reset(ioa_cfg->host,
1603 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1604 }
1605
1606 error_index = ipr_get_error(ioasc);
1607
1608 if (!ipr_error_table[error_index].log_hcam)
1609 return;
1610
49dc6a18 1611 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1612
1613 /* Set indication we have logged an error */
1614 ioa_cfg->errors_logged++;
1615
933916f3 1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 1617 return;
cf852037
BK
1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1620
1621 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1622 case IPR_HOST_RCB_OVERLAY_ID_2:
1623 ipr_log_cache_error(ioa_cfg, hostrcb);
1624 break;
1625 case IPR_HOST_RCB_OVERLAY_ID_3:
1626 ipr_log_config_error(ioa_cfg, hostrcb);
1627 break;
1628 case IPR_HOST_RCB_OVERLAY_ID_4:
1629 case IPR_HOST_RCB_OVERLAY_ID_6:
1630 ipr_log_array_error(ioa_cfg, hostrcb);
1631 break;
b0df54bb
BK
1632 case IPR_HOST_RCB_OVERLAY_ID_7:
1633 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1634 break;
ee0f05b8
BK
1635 case IPR_HOST_RCB_OVERLAY_ID_12:
1636 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1637 break;
1638 case IPR_HOST_RCB_OVERLAY_ID_13:
1639 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1640 break;
1641 case IPR_HOST_RCB_OVERLAY_ID_14:
1642 case IPR_HOST_RCB_OVERLAY_ID_16:
1643 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1644 break;
1645 case IPR_HOST_RCB_OVERLAY_ID_17:
1646 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1647 break;
49dc6a18
BK
1648 case IPR_HOST_RCB_OVERLAY_ID_20:
1649 ipr_log_fabric_error(ioa_cfg, hostrcb);
1650 break;
cf852037 1651 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1652 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1653 default:
a9cfca96 1654 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1655 break;
1656 }
1657}
1658
1659/**
1660 * ipr_process_error - Op done function for an adapter error log.
1661 * @ipr_cmd: ipr command struct
1662 *
1663 * This function is the op done function for an error log host
1664 * controlled async from the adapter. It will log the error and
1665 * send the HCAM back to the adapter.
1666 *
1667 * Return value:
1668 * none
1669 **/
1670static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1671{
1672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1673 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1674 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1675
1676 list_del(&hostrcb->queue);
1677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1678
1679 if (!ioasc) {
1680 ipr_handle_log_data(ioa_cfg, hostrcb);
1681 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1682 dev_err(&ioa_cfg->pdev->dev,
1683 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1684 }
1685
1686 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1687}
1688
1689/**
1690 * ipr_timeout - An internally generated op has timed out.
1691 * @ipr_cmd: ipr command struct
1692 *
1693 * This function blocks host requests and initiates an
1694 * adapter reset.
1695 *
1696 * Return value:
1697 * none
1698 **/
1699static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1700{
1701 unsigned long lock_flags = 0;
1702 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1703
1704 ENTER;
1705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1706
1707 ioa_cfg->errors_logged++;
1708 dev_err(&ioa_cfg->pdev->dev,
1709 "Adapter being reset due to command timeout.\n");
1710
1711 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1712 ioa_cfg->sdt_state = GET_DUMP;
1713
1714 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1715 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1716
1717 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1718 LEAVE;
1719}
1720
1721/**
1722 * ipr_oper_timeout - Adapter timed out transitioning to operational
1723 * @ipr_cmd: ipr command struct
1724 *
1725 * This function blocks host requests and initiates an
1726 * adapter reset.
1727 *
1728 * Return value:
1729 * none
1730 **/
1731static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1732{
1733 unsigned long lock_flags = 0;
1734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1735
1736 ENTER;
1737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1738
1739 ioa_cfg->errors_logged++;
1740 dev_err(&ioa_cfg->pdev->dev,
1741 "Adapter timed out transitioning to operational.\n");
1742
1743 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1744 ioa_cfg->sdt_state = GET_DUMP;
1745
1746 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1747 if (ipr_fastfail)
1748 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1750 }
1751
1752 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1753 LEAVE;
1754}
1755
1756/**
1757 * ipr_reset_reload - Reset/Reload the IOA
1758 * @ioa_cfg: ioa config struct
1759 * @shutdown_type: shutdown type
1760 *
1761 * This function resets the adapter and re-initializes it.
1762 * This function assumes that all new host commands have been stopped.
1763 * Return value:
1764 * SUCCESS / FAILED
1765 **/
1766static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1767 enum ipr_shutdown_type shutdown_type)
1768{
1769 if (!ioa_cfg->in_reset_reload)
1770 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1771
1772 spin_unlock_irq(ioa_cfg->host->host_lock);
1773 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1774 spin_lock_irq(ioa_cfg->host->host_lock);
1775
1776 /* If we got hit with a host reset while we were already resetting
1777 the adapter for some reason, and the reset failed. */
1778 if (ioa_cfg->ioa_is_dead) {
1779 ipr_trace;
1780 return FAILED;
1781 }
1782
1783 return SUCCESS;
1784}
1785
1786/**
1787 * ipr_find_ses_entry - Find matching SES in SES table
1788 * @res: resource entry struct of SES
1789 *
1790 * Return value:
1791 * pointer to SES table entry / NULL on failure
1792 **/
1793static const struct ipr_ses_table_entry *
1794ipr_find_ses_entry(struct ipr_resource_entry *res)
1795{
1796 int i, j, matches;
1797 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1798
1799 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1800 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1801 if (ste->compare_product_id_byte[j] == 'X') {
1802 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1803 matches++;
1804 else
1805 break;
1806 } else
1807 matches++;
1808 }
1809
1810 if (matches == IPR_PROD_ID_LEN)
1811 return ste;
1812 }
1813
1814 return NULL;
1815}
1816
1817/**
1818 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1819 * @ioa_cfg: ioa config struct
1820 * @bus: SCSI bus
1821 * @bus_width: bus width
1822 *
1823 * Return value:
1824 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1825 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1826 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1827 * max 160MHz = max 320MB/sec).
1828 **/
1829static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1830{
1831 struct ipr_resource_entry *res;
1832 const struct ipr_ses_table_entry *ste;
1833 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1834
1835 /* Loop through each config table entry in the config table buffer */
1836 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1837 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1838 continue;
1839
1840 if (bus != res->cfgte.res_addr.bus)
1841 continue;
1842
1843 if (!(ste = ipr_find_ses_entry(res)))
1844 continue;
1845
1846 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1847 }
1848
1849 return max_xfer_rate;
1850}
1851
1852/**
1853 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1854 * @ioa_cfg: ioa config struct
1855 * @max_delay: max delay in micro-seconds to wait
1856 *
1857 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1858 *
1859 * Return value:
1860 * 0 on success / other on failure
1861 **/
1862static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1863{
1864 volatile u32 pcii_reg;
1865 int delay = 1;
1866
1867 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1868 while (delay < max_delay) {
1869 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1870
1871 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1872 return 0;
1873
1874 /* udelay cannot be used if delay is more than a few milliseconds */
1875 if ((delay / 1000) > MAX_UDELAY_MS)
1876 mdelay(delay / 1000);
1877 else
1878 udelay(delay);
1879
1880 delay += delay;
1881 }
1882 return -EIO;
1883}
1884
1885/**
1886 * ipr_get_ldump_data_section - Dump IOA memory
1887 * @ioa_cfg: ioa config struct
1888 * @start_addr: adapter address to dump
1889 * @dest: destination kernel buffer
1890 * @length_in_words: length to dump in 4 byte words
1891 *
1892 * Return value:
1893 * 0 on success / -EIO on failure
1894 **/
1895static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1896 u32 start_addr,
1897 __be32 *dest, u32 length_in_words)
1898{
1899 volatile u32 temp_pcii_reg;
1900 int i, delay = 0;
1901
1902 /* Write IOA interrupt reg starting LDUMP state */
1903 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1904 ioa_cfg->regs.set_uproc_interrupt_reg);
1905
1906 /* Wait for IO debug acknowledge */
1907 if (ipr_wait_iodbg_ack(ioa_cfg,
1908 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1909 dev_err(&ioa_cfg->pdev->dev,
1910 "IOA dump long data transfer timeout\n");
1911 return -EIO;
1912 }
1913
1914 /* Signal LDUMP interlocked - clear IO debug ack */
1915 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1916 ioa_cfg->regs.clr_interrupt_reg);
1917
1918 /* Write Mailbox with starting address */
1919 writel(start_addr, ioa_cfg->ioa_mailbox);
1920
1921 /* Signal address valid - clear IOA Reset alert */
1922 writel(IPR_UPROCI_RESET_ALERT,
1923 ioa_cfg->regs.clr_uproc_interrupt_reg);
1924
1925 for (i = 0; i < length_in_words; i++) {
1926 /* Wait for IO debug acknowledge */
1927 if (ipr_wait_iodbg_ack(ioa_cfg,
1928 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1929 dev_err(&ioa_cfg->pdev->dev,
1930 "IOA dump short data transfer timeout\n");
1931 return -EIO;
1932 }
1933
1934 /* Read data from mailbox and increment destination pointer */
1935 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1936 dest++;
1937
1938 /* For all but the last word of data, signal data received */
1939 if (i < (length_in_words - 1)) {
1940 /* Signal dump data received - Clear IO debug Ack */
1941 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1942 ioa_cfg->regs.clr_interrupt_reg);
1943 }
1944 }
1945
1946 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1947 writel(IPR_UPROCI_RESET_ALERT,
1948 ioa_cfg->regs.set_uproc_interrupt_reg);
1949
1950 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1951 ioa_cfg->regs.clr_uproc_interrupt_reg);
1952
1953 /* Signal dump data received - Clear IO debug Ack */
1954 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1955 ioa_cfg->regs.clr_interrupt_reg);
1956
1957 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1958 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1959 temp_pcii_reg =
1960 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1961
1962 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1963 return 0;
1964
1965 udelay(10);
1966 delay += 10;
1967 }
1968
1969 return 0;
1970}
1971
1972#ifdef CONFIG_SCSI_IPR_DUMP
1973/**
1974 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1975 * @ioa_cfg: ioa config struct
1976 * @pci_address: adapter address
1977 * @length: length of data to copy
1978 *
1979 * Copy data from PCI adapter to kernel buffer.
1980 * Note: length MUST be a 4 byte multiple
1981 * Return value:
1982 * 0 on success / other on failure
1983 **/
1984static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1985 unsigned long pci_address, u32 length)
1986{
1987 int bytes_copied = 0;
1988 int cur_len, rc, rem_len, rem_page_len;
1989 __be32 *page;
1990 unsigned long lock_flags = 0;
1991 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1992
1993 while (bytes_copied < length &&
1994 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1995 if (ioa_dump->page_offset >= PAGE_SIZE ||
1996 ioa_dump->page_offset == 0) {
1997 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1998
1999 if (!page) {
2000 ipr_trace;
2001 return bytes_copied;
2002 }
2003
2004 ioa_dump->page_offset = 0;
2005 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2006 ioa_dump->next_page_index++;
2007 } else
2008 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2009
2010 rem_len = length - bytes_copied;
2011 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2012 cur_len = min(rem_len, rem_page_len);
2013
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2016 rc = -EIO;
2017 } else {
2018 rc = ipr_get_ldump_data_section(ioa_cfg,
2019 pci_address + bytes_copied,
2020 &page[ioa_dump->page_offset / 4],
2021 (cur_len / sizeof(u32)));
2022 }
2023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024
2025 if (!rc) {
2026 ioa_dump->page_offset += cur_len;
2027 bytes_copied += cur_len;
2028 } else {
2029 ipr_trace;
2030 break;
2031 }
2032 schedule();
2033 }
2034
2035 return bytes_copied;
2036}
2037
2038/**
2039 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2040 * @hdr: dump entry header struct
2041 *
2042 * Return value:
2043 * nothing
2044 **/
2045static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2046{
2047 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2048 hdr->num_elems = 1;
2049 hdr->offset = sizeof(*hdr);
2050 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2051}
2052
2053/**
2054 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2055 * @ioa_cfg: ioa config struct
2056 * @driver_dump: driver dump struct
2057 *
2058 * Return value:
2059 * nothing
2060 **/
2061static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2062 struct ipr_driver_dump *driver_dump)
2063{
2064 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2065
2066 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2067 driver_dump->ioa_type_entry.hdr.len =
2068 sizeof(struct ipr_dump_ioa_type_entry) -
2069 sizeof(struct ipr_dump_entry_header);
2070 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2071 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2072 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2073 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2074 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2075 ucode_vpd->minor_release[1];
2076 driver_dump->hdr.num_entries++;
2077}
2078
2079/**
2080 * ipr_dump_version_data - Fill in the driver version in the dump.
2081 * @ioa_cfg: ioa config struct
2082 * @driver_dump: driver dump struct
2083 *
2084 * Return value:
2085 * nothing
2086 **/
2087static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2088 struct ipr_driver_dump *driver_dump)
2089{
2090 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2091 driver_dump->version_entry.hdr.len =
2092 sizeof(struct ipr_dump_version_entry) -
2093 sizeof(struct ipr_dump_entry_header);
2094 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2095 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2096 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2097 driver_dump->hdr.num_entries++;
2098}
2099
2100/**
2101 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2102 * @ioa_cfg: ioa config struct
2103 * @driver_dump: driver dump struct
2104 *
2105 * Return value:
2106 * nothing
2107 **/
2108static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2109 struct ipr_driver_dump *driver_dump)
2110{
2111 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2112 driver_dump->trace_entry.hdr.len =
2113 sizeof(struct ipr_dump_trace_entry) -
2114 sizeof(struct ipr_dump_entry_header);
2115 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2116 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2117 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2118 driver_dump->hdr.num_entries++;
2119}
2120
2121/**
2122 * ipr_dump_location_data - Fill in the IOA location in the dump.
2123 * @ioa_cfg: ioa config struct
2124 * @driver_dump: driver dump struct
2125 *
2126 * Return value:
2127 * nothing
2128 **/
2129static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2130 struct ipr_driver_dump *driver_dump)
2131{
2132 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2133 driver_dump->location_entry.hdr.len =
2134 sizeof(struct ipr_dump_location_entry) -
2135 sizeof(struct ipr_dump_entry_header);
2136 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2137 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2138 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2139 driver_dump->hdr.num_entries++;
2140}
2141
2142/**
2143 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2144 * @ioa_cfg: ioa config struct
2145 * @dump: dump struct
2146 *
2147 * Return value:
2148 * nothing
2149 **/
2150static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2151{
2152 unsigned long start_addr, sdt_word;
2153 unsigned long lock_flags = 0;
2154 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2155 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2156 u32 num_entries, start_off, end_off;
2157 u32 bytes_to_copy, bytes_copied, rc;
2158 struct ipr_sdt *sdt;
2159 int i;
2160
2161 ENTER;
2162
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2164
2165 if (ioa_cfg->sdt_state != GET_DUMP) {
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2167 return;
2168 }
2169
2170 start_addr = readl(ioa_cfg->ioa_mailbox);
2171
2172 if (!ipr_sdt_is_fmt2(start_addr)) {
2173 dev_err(&ioa_cfg->pdev->dev,
2174 "Invalid dump table format: %lx\n", start_addr);
2175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2176 return;
2177 }
2178
2179 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2180
2181 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2182
2183 /* Initialize the overall dump header */
2184 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2185 driver_dump->hdr.num_entries = 1;
2186 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2187 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2188 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2189 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2190
2191 ipr_dump_version_data(ioa_cfg, driver_dump);
2192 ipr_dump_location_data(ioa_cfg, driver_dump);
2193 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2194 ipr_dump_trace_data(ioa_cfg, driver_dump);
2195
2196 /* Update dump_header */
2197 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2198
2199 /* IOA Dump entry */
2200 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2201 ioa_dump->format = IPR_SDT_FMT2;
2202 ioa_dump->hdr.len = 0;
2203 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2204 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2205
2206 /* First entries in sdt are actually a list of dump addresses and
2207 lengths to gather the real dump data. sdt represents the pointer
2208 to the ioa generated dump table. Dump data will be extracted based
2209 on entries in this table */
2210 sdt = &ioa_dump->sdt;
2211
2212 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2213 sizeof(struct ipr_sdt) / sizeof(__be32));
2214
2215 /* Smart Dump table is ready to use and the first entry is valid */
2216 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2217 dev_err(&ioa_cfg->pdev->dev,
2218 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2219 rc, be32_to_cpu(sdt->hdr.state));
2220 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2221 ioa_cfg->sdt_state = DUMP_OBTAINED;
2222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2223 return;
2224 }
2225
2226 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2227
2228 if (num_entries > IPR_NUM_SDT_ENTRIES)
2229 num_entries = IPR_NUM_SDT_ENTRIES;
2230
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2232
2233 for (i = 0; i < num_entries; i++) {
2234 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2235 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2236 break;
2237 }
2238
2239 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2240 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2241 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2242 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2243
2244 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2245 bytes_to_copy = end_off - start_off;
2246 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2247 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2248 continue;
2249 }
2250
2251 /* Copy data from adapter to driver buffers */
2252 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2253 bytes_to_copy);
2254
2255 ioa_dump->hdr.len += bytes_copied;
2256
2257 if (bytes_copied != bytes_to_copy) {
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2259 break;
2260 }
2261 }
2262 }
2263 }
2264
2265 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2266
2267 /* Update dump_header */
2268 driver_dump->hdr.len += ioa_dump->hdr.len;
2269 wmb();
2270 ioa_cfg->sdt_state = DUMP_OBTAINED;
2271 LEAVE;
2272}
2273
2274#else
2275#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2276#endif
2277
2278/**
2279 * ipr_release_dump - Free adapter dump memory
2280 * @kref: kref struct
2281 *
2282 * Return value:
2283 * nothing
2284 **/
2285static void ipr_release_dump(struct kref *kref)
2286{
2287 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2288 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2289 unsigned long lock_flags = 0;
2290 int i;
2291
2292 ENTER;
2293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2294 ioa_cfg->dump = NULL;
2295 ioa_cfg->sdt_state = INACTIVE;
2296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2297
2298 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2299 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2300
2301 kfree(dump);
2302 LEAVE;
2303}
2304
2305/**
2306 * ipr_worker_thread - Worker thread
c4028958 2307 * @work: ioa config struct
1da177e4
LT
2308 *
2309 * Called at task level from a work thread. This function takes care
2310 * of adding and removing device from the mid-layer as configuration
2311 * changes are detected by the adapter.
2312 *
2313 * Return value:
2314 * nothing
2315 **/
c4028958 2316static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
2317{
2318 unsigned long lock_flags;
2319 struct ipr_resource_entry *res;
2320 struct scsi_device *sdev;
2321 struct ipr_dump *dump;
c4028958
DH
2322 struct ipr_ioa_cfg *ioa_cfg =
2323 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
2324 u8 bus, target, lun;
2325 int did_work;
2326
2327 ENTER;
2328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2329
2330 if (ioa_cfg->sdt_state == GET_DUMP) {
2331 dump = ioa_cfg->dump;
2332 if (!dump) {
2333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2334 return;
2335 }
2336 kref_get(&dump->kref);
2337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2338 ipr_get_ioa_dump(ioa_cfg, dump);
2339 kref_put(&dump->kref, ipr_release_dump);
2340
2341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2345 return;
2346 }
2347
2348restart:
2349 do {
2350 did_work = 0;
2351 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2353 return;
2354 }
2355
2356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2357 if (res->del_from_ml && res->sdev) {
2358 did_work = 1;
2359 sdev = res->sdev;
2360 if (!scsi_device_get(sdev)) {
1da177e4
LT
2361 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2363 scsi_remove_device(sdev);
2364 scsi_device_put(sdev);
2365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2366 }
2367 break;
2368 }
2369 }
2370 } while(did_work);
2371
2372 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2373 if (res->add_to_ml) {
2374 bus = res->cfgte.res_addr.bus;
2375 target = res->cfgte.res_addr.target;
2376 lun = res->cfgte.res_addr.lun;
1121b794 2377 res->add_to_ml = 0;
1da177e4
LT
2378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2379 scsi_add_device(ioa_cfg->host, bus, target, lun);
2380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2381 goto restart;
2382 }
2383 }
2384
2385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
312c004d 2386 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
1da177e4
LT
2387 LEAVE;
2388}
2389
2390#ifdef CONFIG_SCSI_IPR_TRACE
2391/**
2392 * ipr_read_trace - Dump the adapter trace
2393 * @kobj: kobject struct
2394 * @buf: buffer
2395 * @off: offset
2396 * @count: buffer size
2397 *
2398 * Return value:
2399 * number of bytes printed to buffer
2400 **/
2401static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2402 loff_t off, size_t count)
2403{
2404 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2405 struct Scsi_Host *shost = class_to_shost(cdev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int size = IPR_TRACE_SIZE;
2409 char *src = (char *)ioa_cfg->trace;
2410
2411 if (off > size)
2412 return 0;
2413 if (off + count > size) {
2414 size -= off;
2415 count = size;
2416 }
2417
2418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2419 memcpy(buf, &src[off], count);
2420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2421 return count;
2422}
2423
2424static struct bin_attribute ipr_trace_attr = {
2425 .attr = {
2426 .name = "trace",
2427 .mode = S_IRUGO,
2428 },
2429 .size = 0,
2430 .read = ipr_read_trace,
2431};
2432#endif
2433
62275040
BK
2434static const struct {
2435 enum ipr_cache_state state;
2436 char *name;
2437} cache_state [] = {
2438 { CACHE_NONE, "none" },
2439 { CACHE_DISABLED, "disabled" },
2440 { CACHE_ENABLED, "enabled" }
2441};
2442
2443/**
2444 * ipr_show_write_caching - Show the write caching attribute
2445 * @class_dev: class device struct
2446 * @buf: buffer
2447 *
2448 * Return value:
2449 * number of bytes printed to buffer
2450 **/
2451static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2452{
2453 struct Scsi_Host *shost = class_to_shost(class_dev);
2454 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455 unsigned long lock_flags = 0;
2456 int i, len = 0;
2457
2458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2459 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2460 if (cache_state[i].state == ioa_cfg->cache_state) {
2461 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2462 break;
2463 }
2464 }
2465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2466 return len;
2467}
2468
2469
2470/**
2471 * ipr_store_write_caching - Enable/disable adapter write cache
2472 * @class_dev: class_device struct
2473 * @buf: buffer
2474 * @count: buffer size
2475 *
2476 * This function will enable/disable adapter write cache.
2477 *
2478 * Return value:
2479 * count on success / other on failure
2480 **/
2481static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2482 const char *buf, size_t count)
2483{
2484 struct Scsi_Host *shost = class_to_shost(class_dev);
2485 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2486 unsigned long lock_flags = 0;
2487 enum ipr_cache_state new_state = CACHE_INVALID;
2488 int i;
2489
2490 if (!capable(CAP_SYS_ADMIN))
2491 return -EACCES;
2492 if (ioa_cfg->cache_state == CACHE_NONE)
2493 return -EINVAL;
2494
2495 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2496 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2497 new_state = cache_state[i].state;
2498 break;
2499 }
2500 }
2501
2502 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2503 return -EINVAL;
2504
2505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2506 if (ioa_cfg->cache_state == new_state) {
2507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2508 return count;
2509 }
2510
2511 ioa_cfg->cache_state = new_state;
2512 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2513 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2514 if (!ioa_cfg->in_reset_reload)
2515 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2518
2519 return count;
2520}
2521
2522static struct class_device_attribute ipr_ioa_cache_attr = {
2523 .attr = {
2524 .name = "write_cache",
2525 .mode = S_IRUGO | S_IWUSR,
2526 },
2527 .show = ipr_show_write_caching,
2528 .store = ipr_store_write_caching
2529};
2530
1da177e4
LT
2531/**
2532 * ipr_show_fw_version - Show the firmware version
2533 * @class_dev: class device struct
2534 * @buf: buffer
2535 *
2536 * Return value:
2537 * number of bytes printed to buffer
2538 **/
2539static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2540{
2541 struct Scsi_Host *shost = class_to_shost(class_dev);
2542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2543 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2544 unsigned long lock_flags = 0;
2545 int len;
2546
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2549 ucode_vpd->major_release, ucode_vpd->card_type,
2550 ucode_vpd->minor_release[0],
2551 ucode_vpd->minor_release[1]);
2552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2553 return len;
2554}
2555
2556static struct class_device_attribute ipr_fw_version_attr = {
2557 .attr = {
2558 .name = "fw_version",
2559 .mode = S_IRUGO,
2560 },
2561 .show = ipr_show_fw_version,
2562};
2563
2564/**
2565 * ipr_show_log_level - Show the adapter's error logging level
2566 * @class_dev: class device struct
2567 * @buf: buffer
2568 *
2569 * Return value:
2570 * number of bytes printed to buffer
2571 **/
2572static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2573{
2574 struct Scsi_Host *shost = class_to_shost(class_dev);
2575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2576 unsigned long lock_flags = 0;
2577 int len;
2578
2579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2580 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2582 return len;
2583}
2584
2585/**
2586 * ipr_store_log_level - Change the adapter's error logging level
2587 * @class_dev: class device struct
2588 * @buf: buffer
2589 *
2590 * Return value:
2591 * number of bytes printed to buffer
2592 **/
2593static ssize_t ipr_store_log_level(struct class_device *class_dev,
2594 const char *buf, size_t count)
2595{
2596 struct Scsi_Host *shost = class_to_shost(class_dev);
2597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2598 unsigned long lock_flags = 0;
2599
2600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2601 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2603 return strlen(buf);
2604}
2605
2606static struct class_device_attribute ipr_log_level_attr = {
2607 .attr = {
2608 .name = "log_level",
2609 .mode = S_IRUGO | S_IWUSR,
2610 },
2611 .show = ipr_show_log_level,
2612 .store = ipr_store_log_level
2613};
2614
2615/**
2616 * ipr_store_diagnostics - IOA Diagnostics interface
2617 * @class_dev: class_device struct
2618 * @buf: buffer
2619 * @count: buffer size
2620 *
2621 * This function will reset the adapter and wait a reasonable
2622 * amount of time for any errors that the adapter might log.
2623 *
2624 * Return value:
2625 * count on success / other on failure
2626 **/
2627static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2628 const char *buf, size_t count)
2629{
2630 struct Scsi_Host *shost = class_to_shost(class_dev);
2631 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2632 unsigned long lock_flags = 0;
2633 int rc = count;
2634
2635 if (!capable(CAP_SYS_ADMIN))
2636 return -EACCES;
2637
2638 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2640 ioa_cfg->errors_logged = 0;
2641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2642
2643 if (ioa_cfg->in_reset_reload) {
2644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2645 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2646
2647 /* Wait for a second for any errors to be logged */
2648 msleep(1000);
2649 } else {
2650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2651 return -EIO;
2652 }
2653
2654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2655 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2656 rc = -EIO;
2657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2658
2659 return rc;
2660}
2661
2662static struct class_device_attribute ipr_diagnostics_attr = {
2663 .attr = {
2664 .name = "run_diagnostics",
2665 .mode = S_IWUSR,
2666 },
2667 .store = ipr_store_diagnostics
2668};
2669
f37eb54b
BK
2670/**
2671 * ipr_show_adapter_state - Show the adapter's state
2672 * @class_dev: class device struct
2673 * @buf: buffer
2674 *
2675 * Return value:
2676 * number of bytes printed to buffer
2677 **/
2678static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2679{
2680 struct Scsi_Host *shost = class_to_shost(class_dev);
2681 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2682 unsigned long lock_flags = 0;
2683 int len;
2684
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686 if (ioa_cfg->ioa_is_dead)
2687 len = snprintf(buf, PAGE_SIZE, "offline\n");
2688 else
2689 len = snprintf(buf, PAGE_SIZE, "online\n");
2690 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2691 return len;
2692}
2693
2694/**
2695 * ipr_store_adapter_state - Change adapter state
2696 * @class_dev: class_device struct
2697 * @buf: buffer
2698 * @count: buffer size
2699 *
2700 * This function will change the adapter's state.
2701 *
2702 * Return value:
2703 * count on success / other on failure
2704 **/
2705static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2706 const char *buf, size_t count)
2707{
2708 struct Scsi_Host *shost = class_to_shost(class_dev);
2709 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2710 unsigned long lock_flags;
2711 int result = count;
2712
2713 if (!capable(CAP_SYS_ADMIN))
2714 return -EACCES;
2715
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2718 ioa_cfg->ioa_is_dead = 0;
2719 ioa_cfg->reset_retries = 0;
2720 ioa_cfg->in_ioa_bringdown = 0;
2721 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2722 }
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2725
2726 return result;
2727}
2728
2729static struct class_device_attribute ipr_ioa_state_attr = {
2730 .attr = {
2731 .name = "state",
2732 .mode = S_IRUGO | S_IWUSR,
2733 },
2734 .show = ipr_show_adapter_state,
2735 .store = ipr_store_adapter_state
2736};
2737
1da177e4
LT
2738/**
2739 * ipr_store_reset_adapter - Reset the adapter
2740 * @class_dev: class_device struct
2741 * @buf: buffer
2742 * @count: buffer size
2743 *
2744 * This function will reset the adapter.
2745 *
2746 * Return value:
2747 * count on success / other on failure
2748 **/
2749static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2750 const char *buf, size_t count)
2751{
2752 struct Scsi_Host *shost = class_to_shost(class_dev);
2753 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2754 unsigned long lock_flags;
2755 int result = count;
2756
2757 if (!capable(CAP_SYS_ADMIN))
2758 return -EACCES;
2759
2760 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2761 if (!ioa_cfg->in_reset_reload)
2762 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2764 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2765
2766 return result;
2767}
2768
2769static struct class_device_attribute ipr_ioa_reset_attr = {
2770 .attr = {
2771 .name = "reset_host",
2772 .mode = S_IWUSR,
2773 },
2774 .store = ipr_store_reset_adapter
2775};
2776
2777/**
2778 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2779 * @buf_len: buffer length
2780 *
2781 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2782 * list to use for microcode download
2783 *
2784 * Return value:
2785 * pointer to sglist / NULL on failure
2786 **/
2787static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2788{
2789 int sg_size, order, bsize_elem, num_elem, i, j;
2790 struct ipr_sglist *sglist;
2791 struct scatterlist *scatterlist;
2792 struct page *page;
2793
2794 /* Get the minimum size per scatter/gather element */
2795 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2796
2797 /* Get the actual size per element */
2798 order = get_order(sg_size);
2799
2800 /* Determine the actual number of bytes per element */
2801 bsize_elem = PAGE_SIZE * (1 << order);
2802
2803 /* Determine the actual number of sg entries needed */
2804 if (buf_len % bsize_elem)
2805 num_elem = (buf_len / bsize_elem) + 1;
2806 else
2807 num_elem = buf_len / bsize_elem;
2808
2809 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2810 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2811 (sizeof(struct scatterlist) * (num_elem - 1)),
2812 GFP_KERNEL);
2813
2814 if (sglist == NULL) {
2815 ipr_trace;
2816 return NULL;
2817 }
2818
1da177e4
LT
2819 scatterlist = sglist->scatterlist;
2820
2821 sglist->order = order;
2822 sglist->num_sg = num_elem;
2823
2824 /* Allocate a bunch of sg elements */
2825 for (i = 0; i < num_elem; i++) {
2826 page = alloc_pages(GFP_KERNEL, order);
2827 if (!page) {
2828 ipr_trace;
2829
2830 /* Free up what we already allocated */
2831 for (j = i - 1; j >= 0; j--)
2832 __free_pages(scatterlist[j].page, order);
2833 kfree(sglist);
2834 return NULL;
2835 }
2836
2837 scatterlist[i].page = page;
2838 }
2839
2840 return sglist;
2841}
2842
2843/**
2844 * ipr_free_ucode_buffer - Frees a microcode download buffer
2845 * @p_dnld: scatter/gather list pointer
2846 *
2847 * Free a DMA'able ucode download buffer previously allocated with
2848 * ipr_alloc_ucode_buffer
2849 *
2850 * Return value:
2851 * nothing
2852 **/
2853static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2854{
2855 int i;
2856
2857 for (i = 0; i < sglist->num_sg; i++)
2858 __free_pages(sglist->scatterlist[i].page, sglist->order);
2859
2860 kfree(sglist);
2861}
2862
2863/**
2864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2865 * @sglist: scatter/gather list pointer
2866 * @buffer: buffer pointer
2867 * @len: buffer length
2868 *
2869 * Copy a microcode image from a user buffer into a buffer allocated by
2870 * ipr_alloc_ucode_buffer
2871 *
2872 * Return value:
2873 * 0 on success / other on failure
2874 **/
2875static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2876 u8 *buffer, u32 len)
2877{
2878 int bsize_elem, i, result = 0;
2879 struct scatterlist *scatterlist;
2880 void *kaddr;
2881
2882 /* Determine the actual number of bytes per element */
2883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2884
2885 scatterlist = sglist->scatterlist;
2886
2887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2888 kaddr = kmap(scatterlist[i].page);
2889 memcpy(kaddr, buffer, bsize_elem);
2890 kunmap(scatterlist[i].page);
2891
2892 scatterlist[i].length = bsize_elem;
2893
2894 if (result != 0) {
2895 ipr_trace;
2896 return result;
2897 }
2898 }
2899
2900 if (len % bsize_elem) {
2901 kaddr = kmap(scatterlist[i].page);
2902 memcpy(kaddr, buffer, len % bsize_elem);
2903 kunmap(scatterlist[i].page);
2904
2905 scatterlist[i].length = len % bsize_elem;
2906 }
2907
2908 sglist->buffer_len = len;
2909 return result;
2910}
2911
2912/**
12baa420 2913 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2914 * @ipr_cmd: ipr command struct
2915 * @sglist: scatter/gather list
1da177e4 2916 *
12baa420 2917 * Builds a microcode download IOA data list (IOADL).
1da177e4 2918 *
1da177e4 2919 **/
12baa420
BK
2920static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2921 struct ipr_sglist *sglist)
1da177e4 2922{
1da177e4
LT
2923 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2924 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2925 struct scatterlist *scatterlist = sglist->scatterlist;
2926 int i;
2927
12baa420 2928 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2930 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2931 ioarcb->write_ioadl_len =
2932 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2933
2934 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2935 ioadl[i].flags_and_data_len =
2936 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2937 ioadl[i].address =
2938 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2939 }
2940
12baa420
BK
2941 ioadl[i-1].flags_and_data_len |=
2942 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2943}
2944
2945/**
2946 * ipr_update_ioa_ucode - Update IOA's microcode
2947 * @ioa_cfg: ioa config struct
2948 * @sglist: scatter/gather list
2949 *
2950 * Initiate an adapter reset to update the IOA's microcode
2951 *
2952 * Return value:
2953 * 0 on success / -EIO on failure
2954 **/
2955static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2956 struct ipr_sglist *sglist)
2957{
2958 unsigned long lock_flags;
2959
2960 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2961
2962 if (ioa_cfg->ucode_sglist) {
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 dev_err(&ioa_cfg->pdev->dev,
2965 "Microcode download already in progress\n");
2966 return -EIO;
1da177e4 2967 }
12baa420
BK
2968
2969 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2970 sglist->num_sg, DMA_TO_DEVICE);
2971
2972 if (!sglist->num_dma_sg) {
2973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 dev_err(&ioa_cfg->pdev->dev,
2975 "Failed to map microcode download buffer!\n");
1da177e4
LT
2976 return -EIO;
2977 }
2978
12baa420
BK
2979 ioa_cfg->ucode_sglist = sglist;
2980 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2983
2984 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2985 ioa_cfg->ucode_sglist = NULL;
2986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2987 return 0;
2988}
2989
2990/**
2991 * ipr_store_update_fw - Update the firmware on the adapter
2992 * @class_dev: class_device struct
2993 * @buf: buffer
2994 * @count: buffer size
2995 *
2996 * This function will update the firmware on the adapter.
2997 *
2998 * Return value:
2999 * count on success / other on failure
3000 **/
3001static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3002 const char *buf, size_t count)
3003{
3004 struct Scsi_Host *shost = class_to_shost(class_dev);
3005 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3006 struct ipr_ucode_image_header *image_hdr;
3007 const struct firmware *fw_entry;
3008 struct ipr_sglist *sglist;
1da177e4
LT
3009 char fname[100];
3010 char *src;
3011 int len, result, dnld_size;
3012
3013 if (!capable(CAP_SYS_ADMIN))
3014 return -EACCES;
3015
3016 len = snprintf(fname, 99, "%s", buf);
3017 fname[len-1] = '\0';
3018
3019 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3020 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3021 return -EIO;
3022 }
3023
3024 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3025
3026 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3027 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3028 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3029 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3030 release_firmware(fw_entry);
3031 return -EINVAL;
3032 }
3033
3034 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3035 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3036 sglist = ipr_alloc_ucode_buffer(dnld_size);
3037
3038 if (!sglist) {
3039 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3040 release_firmware(fw_entry);
3041 return -ENOMEM;
3042 }
3043
3044 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3045
3046 if (result) {
3047 dev_err(&ioa_cfg->pdev->dev,
3048 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3049 goto out;
1da177e4
LT
3050 }
3051
12baa420 3052 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3053
12baa420
BK
3054 if (!result)
3055 result = count;
3056out:
1da177e4
LT
3057 ipr_free_ucode_buffer(sglist);
3058 release_firmware(fw_entry);
12baa420 3059 return result;
1da177e4
LT
3060}
3061
3062static struct class_device_attribute ipr_update_fw_attr = {
3063 .attr = {
3064 .name = "update_fw",
3065 .mode = S_IWUSR,
3066 },
3067 .store = ipr_store_update_fw
3068};
3069
3070static struct class_device_attribute *ipr_ioa_attrs[] = {
3071 &ipr_fw_version_attr,
3072 &ipr_log_level_attr,
3073 &ipr_diagnostics_attr,
f37eb54b 3074 &ipr_ioa_state_attr,
1da177e4
LT
3075 &ipr_ioa_reset_attr,
3076 &ipr_update_fw_attr,
62275040 3077 &ipr_ioa_cache_attr,
1da177e4
LT
3078 NULL,
3079};
3080
3081#ifdef CONFIG_SCSI_IPR_DUMP
3082/**
3083 * ipr_read_dump - Dump the adapter
3084 * @kobj: kobject struct
3085 * @buf: buffer
3086 * @off: offset
3087 * @count: buffer size
3088 *
3089 * Return value:
3090 * number of bytes printed to buffer
3091 **/
3092static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3093 loff_t off, size_t count)
3094{
3095 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3096 struct Scsi_Host *shost = class_to_shost(cdev);
3097 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3098 struct ipr_dump *dump;
3099 unsigned long lock_flags = 0;
3100 char *src;
3101 int len;
3102 size_t rc = count;
3103
3104 if (!capable(CAP_SYS_ADMIN))
3105 return -EACCES;
3106
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 dump = ioa_cfg->dump;
3109
3110 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3112 return 0;
3113 }
3114 kref_get(&dump->kref);
3115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3116
3117 if (off > dump->driver_dump.hdr.len) {
3118 kref_put(&dump->kref, ipr_release_dump);
3119 return 0;
3120 }
3121
3122 if (off + count > dump->driver_dump.hdr.len) {
3123 count = dump->driver_dump.hdr.len - off;
3124 rc = count;
3125 }
3126
3127 if (count && off < sizeof(dump->driver_dump)) {
3128 if (off + count > sizeof(dump->driver_dump))
3129 len = sizeof(dump->driver_dump) - off;
3130 else
3131 len = count;
3132 src = (u8 *)&dump->driver_dump + off;
3133 memcpy(buf, src, len);
3134 buf += len;
3135 off += len;
3136 count -= len;
3137 }
3138
3139 off -= sizeof(dump->driver_dump);
3140
3141 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3142 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3143 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3144 else
3145 len = count;
3146 src = (u8 *)&dump->ioa_dump + off;
3147 memcpy(buf, src, len);
3148 buf += len;
3149 off += len;
3150 count -= len;
3151 }
3152
3153 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3154
3155 while (count) {
3156 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3157 len = PAGE_ALIGN(off) - off;
3158 else
3159 len = count;
3160 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3161 src += off & ~PAGE_MASK;
3162 memcpy(buf, src, len);
3163 buf += len;
3164 off += len;
3165 count -= len;
3166 }
3167
3168 kref_put(&dump->kref, ipr_release_dump);
3169 return rc;
3170}
3171
3172/**
3173 * ipr_alloc_dump - Prepare for adapter dump
3174 * @ioa_cfg: ioa config struct
3175 *
3176 * Return value:
3177 * 0 on success / other on failure
3178 **/
3179static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3180{
3181 struct ipr_dump *dump;
3182 unsigned long lock_flags = 0;
3183
0bc42e35 3184 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3185
3186 if (!dump) {
3187 ipr_err("Dump memory allocation failed\n");
3188 return -ENOMEM;
3189 }
3190
1da177e4
LT
3191 kref_init(&dump->kref);
3192 dump->ioa_cfg = ioa_cfg;
3193
3194 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3195
3196 if (INACTIVE != ioa_cfg->sdt_state) {
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 kfree(dump);
3199 return 0;
3200 }
3201
3202 ioa_cfg->dump = dump;
3203 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3204 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3205 ioa_cfg->dump_taken = 1;
3206 schedule_work(&ioa_cfg->work_q);
3207 }
3208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3209
1da177e4
LT
3210 return 0;
3211}
3212
3213/**
3214 * ipr_free_dump - Free adapter dump memory
3215 * @ioa_cfg: ioa config struct
3216 *
3217 * Return value:
3218 * 0 on success / other on failure
3219 **/
3220static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3221{
3222 struct ipr_dump *dump;
3223 unsigned long lock_flags = 0;
3224
3225 ENTER;
3226
3227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228 dump = ioa_cfg->dump;
3229 if (!dump) {
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231 return 0;
3232 }
3233
3234 ioa_cfg->dump = NULL;
3235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3236
3237 kref_put(&dump->kref, ipr_release_dump);
3238
3239 LEAVE;
3240 return 0;
3241}
3242
3243/**
3244 * ipr_write_dump - Setup dump state of adapter
3245 * @kobj: kobject struct
3246 * @buf: buffer
3247 * @off: offset
3248 * @count: buffer size
3249 *
3250 * Return value:
3251 * number of bytes printed to buffer
3252 **/
3253static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3254 loff_t off, size_t count)
3255{
3256 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3257 struct Scsi_Host *shost = class_to_shost(cdev);
3258 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3259 int rc;
3260
3261 if (!capable(CAP_SYS_ADMIN))
3262 return -EACCES;
3263
3264 if (buf[0] == '1')
3265 rc = ipr_alloc_dump(ioa_cfg);
3266 else if (buf[0] == '0')
3267 rc = ipr_free_dump(ioa_cfg);
3268 else
3269 return -EINVAL;
3270
3271 if (rc)
3272 return rc;
3273 else
3274 return count;
3275}
3276
3277static struct bin_attribute ipr_dump_attr = {
3278 .attr = {
3279 .name = "dump",
3280 .mode = S_IRUSR | S_IWUSR,
3281 },
3282 .size = 0,
3283 .read = ipr_read_dump,
3284 .write = ipr_write_dump
3285};
3286#else
3287static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3288#endif
3289
3290/**
3291 * ipr_change_queue_depth - Change the device's queue depth
3292 * @sdev: scsi device struct
3293 * @qdepth: depth to set
3294 *
3295 * Return value:
3296 * actual depth set
3297 **/
3298static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3299{
35a39691
BK
3300 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3301 struct ipr_resource_entry *res;
3302 unsigned long lock_flags = 0;
3303
3304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3305 res = (struct ipr_resource_entry *)sdev->hostdata;
3306
3307 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3308 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3310
1da177e4
LT
3311 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3312 return sdev->queue_depth;
3313}
3314
3315/**
3316 * ipr_change_queue_type - Change the device's queue type
3317 * @dsev: scsi device struct
3318 * @tag_type: type of tags to use
3319 *
3320 * Return value:
3321 * actual queue type set
3322 **/
3323static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3324{
3325 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3326 struct ipr_resource_entry *res;
3327 unsigned long lock_flags = 0;
3328
3329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3330 res = (struct ipr_resource_entry *)sdev->hostdata;
3331
3332 if (res) {
3333 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3334 /*
3335 * We don't bother quiescing the device here since the
3336 * adapter firmware does it for us.
3337 */
3338 scsi_set_tag_type(sdev, tag_type);
3339
3340 if (tag_type)
3341 scsi_activate_tcq(sdev, sdev->queue_depth);
3342 else
3343 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3344 } else
3345 tag_type = 0;
3346 } else
3347 tag_type = 0;
3348
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3350 return tag_type;
3351}
3352
3353/**
3354 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3355 * @dev: device struct
3356 * @buf: buffer
3357 *
3358 * Return value:
3359 * number of bytes printed to buffer
3360 **/
10523b3b 3361static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3362{
3363 struct scsi_device *sdev = to_scsi_device(dev);
3364 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3365 struct ipr_resource_entry *res;
3366 unsigned long lock_flags = 0;
3367 ssize_t len = -ENXIO;
3368
3369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3370 res = (struct ipr_resource_entry *)sdev->hostdata;
3371 if (res)
3372 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 return len;
3375}
3376
3377static struct device_attribute ipr_adapter_handle_attr = {
3378 .attr = {
3379 .name = "adapter_handle",
3380 .mode = S_IRUSR,
3381 },
3382 .show = ipr_show_adapter_handle
3383};
3384
3385static struct device_attribute *ipr_dev_attrs[] = {
3386 &ipr_adapter_handle_attr,
3387 NULL,
3388};
3389
3390/**
3391 * ipr_biosparam - Return the HSC mapping
3392 * @sdev: scsi device struct
3393 * @block_device: block device pointer
3394 * @capacity: capacity of the device
3395 * @parm: Array containing returned HSC values.
3396 *
3397 * This function generates the HSC parms that fdisk uses.
3398 * We want to make sure we return something that places partitions
3399 * on 4k boundaries for best performance with the IOA.
3400 *
3401 * Return value:
3402 * 0 on success
3403 **/
3404static int ipr_biosparam(struct scsi_device *sdev,
3405 struct block_device *block_device,
3406 sector_t capacity, int *parm)
3407{
3408 int heads, sectors;
3409 sector_t cylinders;
3410
3411 heads = 128;
3412 sectors = 32;
3413
3414 cylinders = capacity;
3415 sector_div(cylinders, (128 * 32));
3416
3417 /* return result */
3418 parm[0] = heads;
3419 parm[1] = sectors;
3420 parm[2] = cylinders;
3421
3422 return 0;
3423}
3424
35a39691
BK
3425/**
3426 * ipr_find_starget - Find target based on bus/target.
3427 * @starget: scsi target struct
3428 *
3429 * Return value:
3430 * resource entry pointer if found / NULL if not found
3431 **/
3432static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3433{
3434 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3436 struct ipr_resource_entry *res;
3437
3438 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3439 if ((res->cfgte.res_addr.bus == starget->channel) &&
3440 (res->cfgte.res_addr.target == starget->id) &&
3441 (res->cfgte.res_addr.lun == 0)) {
3442 return res;
3443 }
3444 }
3445
3446 return NULL;
3447}
3448
3449static struct ata_port_info sata_port_info;
3450
3451/**
3452 * ipr_target_alloc - Prepare for commands to a SCSI target
3453 * @starget: scsi target struct
3454 *
3455 * If the device is a SATA device, this function allocates an
3456 * ATA port with libata, else it does nothing.
3457 *
3458 * Return value:
3459 * 0 on success / non-0 on failure
3460 **/
3461static int ipr_target_alloc(struct scsi_target *starget)
3462{
3463 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3464 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3465 struct ipr_sata_port *sata_port;
3466 struct ata_port *ap;
3467 struct ipr_resource_entry *res;
3468 unsigned long lock_flags;
3469
3470 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3471 res = ipr_find_starget(starget);
3472 starget->hostdata = NULL;
3473
3474 if (res && ipr_is_gata(res)) {
3475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3476 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3477 if (!sata_port)
3478 return -ENOMEM;
3479
3480 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3481 if (ap) {
3482 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3483 sata_port->ioa_cfg = ioa_cfg;
3484 sata_port->ap = ap;
3485 sata_port->res = res;
3486
3487 res->sata_port = sata_port;
3488 ap->private_data = sata_port;
3489 starget->hostdata = sata_port;
3490 } else {
3491 kfree(sata_port);
3492 return -ENOMEM;
3493 }
3494 }
3495 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496
3497 return 0;
3498}
3499
3500/**
3501 * ipr_target_destroy - Destroy a SCSI target
3502 * @starget: scsi target struct
3503 *
3504 * If the device was a SATA device, this function frees the libata
3505 * ATA port, else it does nothing.
3506 *
3507 **/
3508static void ipr_target_destroy(struct scsi_target *starget)
3509{
3510 struct ipr_sata_port *sata_port = starget->hostdata;
3511
3512 if (sata_port) {
3513 starget->hostdata = NULL;
3514 ata_sas_port_destroy(sata_port->ap);
3515 kfree(sata_port);
3516 }
3517}
3518
3519/**
3520 * ipr_find_sdev - Find device based on bus/target/lun.
3521 * @sdev: scsi device struct
3522 *
3523 * Return value:
3524 * resource entry pointer if found / NULL if not found
3525 **/
3526static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3527{
3528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3529 struct ipr_resource_entry *res;
3530
3531 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3532 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3533 (res->cfgte.res_addr.target == sdev->id) &&
3534 (res->cfgte.res_addr.lun == sdev->lun))
3535 return res;
3536 }
3537
3538 return NULL;
3539}
3540
1da177e4
LT
3541/**
3542 * ipr_slave_destroy - Unconfigure a SCSI device
3543 * @sdev: scsi device struct
3544 *
3545 * Return value:
3546 * nothing
3547 **/
3548static void ipr_slave_destroy(struct scsi_device *sdev)
3549{
3550 struct ipr_resource_entry *res;
3551 struct ipr_ioa_cfg *ioa_cfg;
3552 unsigned long lock_flags = 0;
3553
3554 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3555
3556 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557 res = (struct ipr_resource_entry *) sdev->hostdata;
3558 if (res) {
35a39691
BK
3559 if (res->sata_port)
3560 ata_port_disable(res->sata_port->ap);
1da177e4
LT
3561 sdev->hostdata = NULL;
3562 res->sdev = NULL;
35a39691 3563 res->sata_port = NULL;
1da177e4
LT
3564 }
3565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3566}
3567
3568/**
3569 * ipr_slave_configure - Configure a SCSI device
3570 * @sdev: scsi device struct
3571 *
3572 * This function configures the specified scsi device.
3573 *
3574 * Return value:
3575 * 0 on success
3576 **/
3577static int ipr_slave_configure(struct scsi_device *sdev)
3578{
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3580 struct ipr_resource_entry *res;
3581 unsigned long lock_flags = 0;
3582
3583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3584 res = sdev->hostdata;
3585 if (res) {
3586 if (ipr_is_af_dasd_device(res))
3587 sdev->type = TYPE_RAID;
0726ce26 3588 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3589 sdev->scsi_level = 4;
0726ce26
BK
3590 sdev->no_uld_attach = 1;
3591 }
1da177e4
LT
3592 if (ipr_is_vset_device(res)) {
3593 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3594 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3595 }
e4fbf44e 3596 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 3597 sdev->allow_restart = 1;
35a39691
BK
3598 if (ipr_is_gata(res) && res->sata_port) {
3599 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3600 ata_sas_slave_configure(sdev, res->sata_port->ap);
3601 } else {
3602 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3603 }
1da177e4
LT
3604 }
3605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3606 return 0;
3607}
3608
35a39691
BK
3609/**
3610 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3611 * @sdev: scsi device struct
3612 *
3613 * This function initializes an ATA port so that future commands
3614 * sent through queuecommand will work.
3615 *
3616 * Return value:
3617 * 0 on success
3618 **/
3619static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3620{
3621 struct ipr_sata_port *sata_port = NULL;
3622 int rc = -ENXIO;
3623
3624 ENTER;
3625 if (sdev->sdev_target)
3626 sata_port = sdev->sdev_target->hostdata;
3627 if (sata_port)
3628 rc = ata_sas_port_init(sata_port->ap);
3629 if (rc)
3630 ipr_slave_destroy(sdev);
3631
3632 LEAVE;
3633 return rc;
3634}
3635
1da177e4
LT
3636/**
3637 * ipr_slave_alloc - Prepare for commands to a device.
3638 * @sdev: scsi device struct
3639 *
3640 * This function saves a pointer to the resource entry
3641 * in the scsi device struct if the device exists. We
3642 * can then use this pointer in ipr_queuecommand when
3643 * handling new commands.
3644 *
3645 * Return value:
692aebfc 3646 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3647 **/
3648static int ipr_slave_alloc(struct scsi_device *sdev)
3649{
3650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3651 struct ipr_resource_entry *res;
3652 unsigned long lock_flags;
692aebfc 3653 int rc = -ENXIO;
1da177e4
LT
3654
3655 sdev->hostdata = NULL;
3656
3657 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3658
35a39691
BK
3659 res = ipr_find_sdev(sdev);
3660 if (res) {
3661 res->sdev = sdev;
3662 res->add_to_ml = 0;
3663 res->in_erp = 0;
3664 sdev->hostdata = res;
3665 if (!ipr_is_naca_model(res))
3666 res->needs_sync_complete = 1;
3667 rc = 0;
3668 if (ipr_is_gata(res)) {
3669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3670 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
3671 }
3672 }
3673
3674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3675
692aebfc 3676 return rc;
1da177e4
LT
3677}
3678
3679/**
3680 * ipr_eh_host_reset - Reset the host adapter
3681 * @scsi_cmd: scsi command struct
3682 *
3683 * Return value:
3684 * SUCCESS / FAILED
3685 **/
df0ae249 3686static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3687{
3688 struct ipr_ioa_cfg *ioa_cfg;
3689 int rc;
3690
3691 ENTER;
3692 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3693
3694 dev_err(&ioa_cfg->pdev->dev,
3695 "Adapter being reset as a result of error recovery.\n");
3696
3697 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3698 ioa_cfg->sdt_state = GET_DUMP;
3699
3700 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3701
3702 LEAVE;
3703 return rc;
3704}
3705
df0ae249
JG
3706static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3707{
3708 int rc;
3709
3710 spin_lock_irq(cmd->device->host->host_lock);
3711 rc = __ipr_eh_host_reset(cmd);
3712 spin_unlock_irq(cmd->device->host->host_lock);
3713
3714 return rc;
3715}
3716
c6513096
BK
3717/**
3718 * ipr_device_reset - Reset the device
3719 * @ioa_cfg: ioa config struct
3720 * @res: resource entry struct
3721 *
3722 * This function issues a device reset to the affected device.
3723 * If the device is a SCSI device, a LUN reset will be sent
3724 * to the device first. If that does not work, a target reset
35a39691
BK
3725 * will be sent. If the device is a SATA device, a PHY reset will
3726 * be sent.
c6513096
BK
3727 *
3728 * Return value:
3729 * 0 on success / non-zero on failure
3730 **/
3731static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3732 struct ipr_resource_entry *res)
3733{
3734 struct ipr_cmnd *ipr_cmd;
3735 struct ipr_ioarcb *ioarcb;
3736 struct ipr_cmd_pkt *cmd_pkt;
35a39691 3737 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
3738 u32 ioasc;
3739
3740 ENTER;
3741 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3742 ioarcb = &ipr_cmd->ioarcb;
3743 cmd_pkt = &ioarcb->cmd_pkt;
35a39691 3744 regs = &ioarcb->add_data.u.regs;
c6513096
BK
3745
3746 ioarcb->res_handle = res->cfgte.res_handle;
3747 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3748 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
3749 if (ipr_is_gata(res)) {
3750 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3751 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3752 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3753 }
c6513096
BK
3754
3755 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3756 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3757 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
3758 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3759 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3760 sizeof(struct ipr_ioasa_gata));
c6513096
BK
3761
3762 LEAVE;
3763 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3764}
3765
35a39691
BK
3766/**
3767 * ipr_sata_reset - Reset the SATA port
3768 * @ap: SATA port to reset
3769 * @classes: class of the attached device
3770 *
3771 * This function issues a SATA phy reset to the affected ATA port.
3772 *
3773 * Return value:
3774 * 0 on success / non-zero on failure
3775 **/
120bda35
AM
3776static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3777 unsigned long deadline)
35a39691
BK
3778{
3779 struct ipr_sata_port *sata_port = ap->private_data;
3780 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3781 struct ipr_resource_entry *res;
3782 unsigned long lock_flags = 0;
3783 int rc = -ENXIO;
3784
3785 ENTER;
3786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
3787 while(ioa_cfg->in_reset_reload) {
3788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3789 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3790 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3791 }
3792
35a39691
BK
3793 res = sata_port->res;
3794 if (res) {
3795 rc = ipr_device_reset(ioa_cfg, res);
3796 switch(res->cfgte.proto) {
3797 case IPR_PROTO_SATA:
3798 case IPR_PROTO_SAS_STP:
3799 *classes = ATA_DEV_ATA;
3800 break;
3801 case IPR_PROTO_SATA_ATAPI:
3802 case IPR_PROTO_SAS_STP_ATAPI:
3803 *classes = ATA_DEV_ATAPI;
3804 break;
3805 default:
3806 *classes = ATA_DEV_UNKNOWN;
3807 break;
3808 };
3809 }
3810
3811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3812 LEAVE;
3813 return rc;
3814}
3815
1da177e4
LT
3816/**
3817 * ipr_eh_dev_reset - Reset the device
3818 * @scsi_cmd: scsi command struct
3819 *
3820 * This function issues a device reset to the affected device.
3821 * A LUN reset will be sent to the device first. If that does
3822 * not work, a target reset will be sent.
3823 *
3824 * Return value:
3825 * SUCCESS / FAILED
3826 **/
94d0e7b8 3827static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3828{
3829 struct ipr_cmnd *ipr_cmd;
3830 struct ipr_ioa_cfg *ioa_cfg;
3831 struct ipr_resource_entry *res;
35a39691
BK
3832 struct ata_port *ap;
3833 int rc = 0;
1da177e4
LT
3834
3835 ENTER;
3836 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3837 res = scsi_cmd->device->hostdata;
3838
eeb88307 3839 if (!res)
1da177e4
LT
3840 return FAILED;
3841
3842 /*
3843 * If we are currently going through reset/reload, return failed. This will force the
3844 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3845 * reset to complete
3846 */
3847 if (ioa_cfg->in_reset_reload)
3848 return FAILED;
3849 if (ioa_cfg->ioa_is_dead)
3850 return FAILED;
3851
3852 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3853 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3854 if (ipr_cmd->scsi_cmd)
3855 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
3856 if (ipr_cmd->qc)
3857 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3861 }
1da177e4
LT
3862 }
3863 }
3864
3865 res->resetting_device = 1;
fb3ed3cb 3866 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
3867
3868 if (ipr_is_gata(res) && res->sata_port) {
3869 ap = res->sata_port->ap;
3870 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3871 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3872 spin_lock_irq(scsi_cmd->device->host->host_lock);
3873 } else
3874 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3875 res->resetting_device = 0;
3876
1da177e4 3877 LEAVE;
c6513096 3878 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3879}
3880
94d0e7b8
JG
3881static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3882{
3883 int rc;
3884
3885 spin_lock_irq(cmd->device->host->host_lock);
3886 rc = __ipr_eh_dev_reset(cmd);
3887 spin_unlock_irq(cmd->device->host->host_lock);
3888
3889 return rc;
3890}
3891
1da177e4
LT
3892/**
3893 * ipr_bus_reset_done - Op done function for bus reset.
3894 * @ipr_cmd: ipr command struct
3895 *
3896 * This function is the op done function for a bus reset
3897 *
3898 * Return value:
3899 * none
3900 **/
3901static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3902{
3903 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3904 struct ipr_resource_entry *res;
3905
3906 ENTER;
3907 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3908 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3909 sizeof(res->cfgte.res_handle))) {
3910 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3911 break;
3912 }
3913 }
3914
3915 /*
3916 * If abort has not completed, indicate the reset has, else call the
3917 * abort's done function to wake the sleeping eh thread
3918 */
3919 if (ipr_cmd->sibling->sibling)
3920 ipr_cmd->sibling->sibling = NULL;
3921 else
3922 ipr_cmd->sibling->done(ipr_cmd->sibling);
3923
3924 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3925 LEAVE;
3926}
3927
3928/**
3929 * ipr_abort_timeout - An abort task has timed out
3930 * @ipr_cmd: ipr command struct
3931 *
3932 * This function handles when an abort task times out. If this
3933 * happens we issue a bus reset since we have resources tied
3934 * up that must be freed before returning to the midlayer.
3935 *
3936 * Return value:
3937 * none
3938 **/
3939static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3940{
3941 struct ipr_cmnd *reset_cmd;
3942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3943 struct ipr_cmd_pkt *cmd_pkt;
3944 unsigned long lock_flags = 0;
3945
3946 ENTER;
3947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3948 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 return;
3951 }
3952
fb3ed3cb 3953 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
3954 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3955 ipr_cmd->sibling = reset_cmd;
3956 reset_cmd->sibling = ipr_cmd;
3957 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3958 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3959 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3960 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3961 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3962
3963 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965 LEAVE;
3966}
3967
3968/**
3969 * ipr_cancel_op - Cancel specified op
3970 * @scsi_cmd: scsi command struct
3971 *
3972 * This function cancels specified op.
3973 *
3974 * Return value:
3975 * SUCCESS / FAILED
3976 **/
3977static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3978{
3979 struct ipr_cmnd *ipr_cmd;
3980 struct ipr_ioa_cfg *ioa_cfg;
3981 struct ipr_resource_entry *res;
3982 struct ipr_cmd_pkt *cmd_pkt;
3983 u32 ioasc;
3984 int op_found = 0;
3985
3986 ENTER;
3987 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3988 res = scsi_cmd->device->hostdata;
3989
8fa728a2
JG
3990 /* If we are currently going through reset/reload, return failed.
3991 * This will force the mid-layer to call ipr_eh_host_reset,
3992 * which will then go to sleep and wait for the reset to complete
3993 */
3994 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3995 return FAILED;
04d9768f 3996 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
3997 return FAILED;
3998
3999 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4000 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4001 ipr_cmd->done = ipr_scsi_eh_done;
4002 op_found = 1;
4003 break;
4004 }
4005 }
4006
4007 if (!op_found)
4008 return SUCCESS;
4009
4010 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4011 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4012 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4013 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4014 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4015 ipr_cmd->u.sdev = scsi_cmd->device;
4016
fb3ed3cb
BK
4017 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4018 scsi_cmd->cmnd[0]);
1da177e4
LT
4019 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4020 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4021
4022 /*
4023 * If the abort task timed out and we sent a bus reset, we will get
4024 * one the following responses to the abort
4025 */
4026 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4027 ioasc = 0;
4028 ipr_trace;
4029 }
4030
4031 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
4032 if (!ipr_is_naca_model(res))
4033 res->needs_sync_complete = 1;
1da177e4
LT
4034
4035 LEAVE;
4036 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4037}
4038
4039/**
4040 * ipr_eh_abort - Abort a single op
4041 * @scsi_cmd: scsi command struct
4042 *
4043 * Return value:
4044 * SUCCESS / FAILED
4045 **/
4046static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4047{
8fa728a2
JG
4048 unsigned long flags;
4049 int rc;
1da177e4
LT
4050
4051 ENTER;
1da177e4 4052
8fa728a2
JG
4053 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4054 rc = ipr_cancel_op(scsi_cmd);
4055 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4056
4057 LEAVE;
8fa728a2 4058 return rc;
1da177e4
LT
4059}
4060
4061/**
4062 * ipr_handle_other_interrupt - Handle "other" interrupts
4063 * @ioa_cfg: ioa config struct
4064 * @int_reg: interrupt register
4065 *
4066 * Return value:
4067 * IRQ_NONE / IRQ_HANDLED
4068 **/
4069static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4070 volatile u32 int_reg)
4071{
4072 irqreturn_t rc = IRQ_HANDLED;
4073
4074 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4075 /* Mask the interrupt */
4076 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4077
4078 /* Clear the interrupt */
4079 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4080 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4081
4082 list_del(&ioa_cfg->reset_cmd->queue);
4083 del_timer(&ioa_cfg->reset_cmd->timer);
4084 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4085 } else {
4086 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4087 ioa_cfg->ioa_unit_checked = 1;
4088 else
4089 dev_err(&ioa_cfg->pdev->dev,
4090 "Permanent IOA failure. 0x%08X\n", int_reg);
4091
4092 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4093 ioa_cfg->sdt_state = GET_DUMP;
4094
4095 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4096 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4097 }
4098
4099 return rc;
4100}
4101
4102/**
4103 * ipr_isr - Interrupt service routine
4104 * @irq: irq number
4105 * @devp: pointer to ioa config struct
1da177e4
LT
4106 *
4107 * Return value:
4108 * IRQ_NONE / IRQ_HANDLED
4109 **/
7d12e780 4110static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4111{
4112 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4113 unsigned long lock_flags = 0;
4114 volatile u32 int_reg, int_mask_reg;
4115 u32 ioasc;
4116 u16 cmd_index;
4117 struct ipr_cmnd *ipr_cmd;
4118 irqreturn_t rc = IRQ_NONE;
4119
4120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4121
4122 /* If interrupts are disabled, ignore the interrupt */
4123 if (!ioa_cfg->allow_interrupts) {
4124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4125 return IRQ_NONE;
4126 }
4127
4128 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4129 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4130
4131 /* If an interrupt on the adapter did not occur, ignore it */
4132 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4134 return IRQ_NONE;
4135 }
4136
4137 while (1) {
4138 ipr_cmd = NULL;
4139
4140 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4141 ioa_cfg->toggle_bit) {
4142
4143 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4144 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4145
4146 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4147 ioa_cfg->errors_logged++;
4148 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4149
4150 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4151 ioa_cfg->sdt_state = GET_DUMP;
4152
4153 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4155 return IRQ_HANDLED;
4156 }
4157
4158 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4159
4160 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4161
4162 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4163
4164 list_del(&ipr_cmd->queue);
4165 del_timer(&ipr_cmd->timer);
4166 ipr_cmd->done(ipr_cmd);
4167
4168 rc = IRQ_HANDLED;
4169
4170 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4171 ioa_cfg->hrrq_curr++;
4172 } else {
4173 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4174 ioa_cfg->toggle_bit ^= 1u;
4175 }
4176 }
4177
4178 if (ipr_cmd != NULL) {
4179 /* Clear the PCI interrupt */
4180 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4181 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4182 } else
4183 break;
4184 }
4185
4186 if (unlikely(rc == IRQ_NONE))
4187 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4188
4189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190 return rc;
4191}
4192
4193/**
4194 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4195 * @ioa_cfg: ioa config struct
4196 * @ipr_cmd: ipr command struct
4197 *
4198 * Return value:
4199 * 0 on success / -1 on failure
4200 **/
4201static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4202 struct ipr_cmnd *ipr_cmd)
4203{
4204 int i;
4205 struct scatterlist *sglist;
4206 u32 length;
4207 u32 ioadl_flags = 0;
4208 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4209 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4210 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4211
4212 length = scsi_cmd->request_bufflen;
4213
4214 if (length == 0)
4215 return 0;
4216
4217 if (scsi_cmd->use_sg) {
4218 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4219 scsi_cmd->request_buffer,
4220 scsi_cmd->use_sg,
4221 scsi_cmd->sc_data_direction);
4222
4223 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4224 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4225 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4226 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4227 ioarcb->write_ioadl_len =
4228 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4229 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4230 ioadl_flags = IPR_IOADL_FLAGS_READ;
4231 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4232 ioarcb->read_ioadl_len =
4233 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4234 }
4235
4236 sglist = scsi_cmd->request_buffer;
4237
51b1c7e1
BK
4238 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4239 ioadl = ioarcb->add_data.u.ioadl;
4240 ioarcb->write_ioadl_addr =
4241 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4242 offsetof(struct ipr_ioarcb, add_data));
4243 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4244 }
4245
1da177e4
LT
4246 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4247 ioadl[i].flags_and_data_len =
4248 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4249 ioadl[i].address =
4250 cpu_to_be32(sg_dma_address(&sglist[i]));
4251 }
4252
4253 if (likely(ipr_cmd->dma_use_sg)) {
4254 ioadl[i-1].flags_and_data_len |=
4255 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4256 return 0;
4257 } else
4258 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4259 } else {
4260 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4261 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4262 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4263 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4264 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4265 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4266 ioadl_flags = IPR_IOADL_FLAGS_READ;
4267 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4268 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4269 }
4270
4271 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4272 scsi_cmd->request_buffer, length,
4273 scsi_cmd->sc_data_direction);
4274
4275 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
51b1c7e1
BK
4276 ioadl = ioarcb->add_data.u.ioadl;
4277 ioarcb->write_ioadl_addr =
4278 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4279 offsetof(struct ipr_ioarcb, add_data));
4280 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
4281 ipr_cmd->dma_use_sg = 1;
4282 ioadl[0].flags_and_data_len =
4283 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4284 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4285 return 0;
4286 } else
4287 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4288 }
4289
4290 return -1;
4291}
4292
4293/**
4294 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4295 * @scsi_cmd: scsi command struct
4296 *
4297 * Return value:
4298 * task attributes
4299 **/
4300static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4301{
4302 u8 tag[2];
4303 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4304
4305 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4306 switch (tag[0]) {
4307 case MSG_SIMPLE_TAG:
4308 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4309 break;
4310 case MSG_HEAD_TAG:
4311 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4312 break;
4313 case MSG_ORDERED_TAG:
4314 rc = IPR_FLAGS_LO_ORDERED_TASK;
4315 break;
4316 };
4317 }
4318
4319 return rc;
4320}
4321
4322/**
4323 * ipr_erp_done - Process completion of ERP for a device
4324 * @ipr_cmd: ipr command struct
4325 *
4326 * This function copies the sense buffer into the scsi_cmd
4327 * struct and pushes the scsi_done function.
4328 *
4329 * Return value:
4330 * nothing
4331 **/
4332static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4333{
4334 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4335 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4337 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4338
4339 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4340 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
4341 scmd_printk(KERN_ERR, scsi_cmd,
4342 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
4343 } else {
4344 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4345 SCSI_SENSE_BUFFERSIZE);
4346 }
4347
4348 if (res) {
ee0a90fa
BK
4349 if (!ipr_is_naca_model(res))
4350 res->needs_sync_complete = 1;
1da177e4
LT
4351 res->in_erp = 0;
4352 }
4353 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4354 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4355 scsi_cmd->scsi_done(scsi_cmd);
4356}
4357
4358/**
4359 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4360 * @ipr_cmd: ipr command struct
4361 *
4362 * Return value:
4363 * none
4364 **/
4365static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4366{
51b1c7e1
BK
4367 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4368 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4369 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
4370
4371 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4372 ioarcb->write_data_transfer_length = 0;
4373 ioarcb->read_data_transfer_length = 0;
4374 ioarcb->write_ioadl_len = 0;
4375 ioarcb->read_ioadl_len = 0;
4376 ioasa->ioasc = 0;
4377 ioasa->residual_data_len = 0;
51b1c7e1
BK
4378 ioarcb->write_ioadl_addr =
4379 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4380 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
4381}
4382
4383/**
4384 * ipr_erp_request_sense - Send request sense to a device
4385 * @ipr_cmd: ipr command struct
4386 *
4387 * This function sends a request sense to a device as a result
4388 * of a check condition.
4389 *
4390 * Return value:
4391 * nothing
4392 **/
4393static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4394{
4395 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4396 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4397
4398 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4399 ipr_erp_done(ipr_cmd);
4400 return;
4401 }
4402
4403 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4404
4405 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4406 cmd_pkt->cdb[0] = REQUEST_SENSE;
4407 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4408 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4409 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4410 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4411
4412 ipr_cmd->ioadl[0].flags_and_data_len =
4413 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4414 ipr_cmd->ioadl[0].address =
4415 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4416
4417 ipr_cmd->ioarcb.read_ioadl_len =
4418 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4419 ipr_cmd->ioarcb.read_data_transfer_length =
4420 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4421
4422 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4423 IPR_REQUEST_SENSE_TIMEOUT * 2);
4424}
4425
4426/**
4427 * ipr_erp_cancel_all - Send cancel all to a device
4428 * @ipr_cmd: ipr command struct
4429 *
4430 * This function sends a cancel all to a device to clear the
4431 * queue. If we are running TCQ on the device, QERR is set to 1,
4432 * which means all outstanding ops have been dropped on the floor.
4433 * Cancel all will return them to us.
4434 *
4435 * Return value:
4436 * nothing
4437 **/
4438static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4439{
4440 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4441 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4442 struct ipr_cmd_pkt *cmd_pkt;
4443
4444 res->in_erp = 1;
4445
4446 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4447
4448 if (!scsi_get_tag_type(scsi_cmd->device)) {
4449 ipr_erp_request_sense(ipr_cmd);
4450 return;
4451 }
4452
4453 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4454 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4455 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4456
4457 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4458 IPR_CANCEL_ALL_TIMEOUT);
4459}
4460
4461/**
4462 * ipr_dump_ioasa - Dump contents of IOASA
4463 * @ioa_cfg: ioa config struct
4464 * @ipr_cmd: ipr command struct
fe964d0a 4465 * @res: resource entry struct
1da177e4
LT
4466 *
4467 * This function is invoked by the interrupt handler when ops
4468 * fail. It will log the IOASA if appropriate. Only called
4469 * for GPDD ops.
4470 *
4471 * Return value:
4472 * none
4473 **/
4474static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 4475 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
4476{
4477 int i;
4478 u16 data_len;
b0692dd4 4479 u32 ioasc, fd_ioasc;
1da177e4
LT
4480 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4481 __be32 *ioasa_data = (__be32 *)ioasa;
4482 int error_index;
4483
4484 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 4485 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
4486
4487 if (0 == ioasc)
4488 return;
4489
4490 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4491 return;
4492
b0692dd4
BK
4493 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4494 error_index = ipr_get_error(fd_ioasc);
4495 else
4496 error_index = ipr_get_error(ioasc);
1da177e4
LT
4497
4498 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4499 /* Don't log an error if the IOA already logged one */
4500 if (ioasa->ilid != 0)
4501 return;
4502
cc9bd5d4
BK
4503 if (!ipr_is_gscsi(res))
4504 return;
4505
1da177e4
LT
4506 if (ipr_error_table[error_index].log_ioasa == 0)
4507 return;
4508 }
4509
fe964d0a 4510 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
4511
4512 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4513 data_len = sizeof(struct ipr_ioasa);
4514 else
4515 data_len = be16_to_cpu(ioasa->ret_stat_len);
4516
4517 ipr_err("IOASA Dump:\n");
4518
4519 for (i = 0; i < data_len / 4; i += 4) {
4520 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4521 be32_to_cpu(ioasa_data[i]),
4522 be32_to_cpu(ioasa_data[i+1]),
4523 be32_to_cpu(ioasa_data[i+2]),
4524 be32_to_cpu(ioasa_data[i+3]));
4525 }
4526}
4527
4528/**
4529 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4530 * @ioasa: IOASA
4531 * @sense_buf: sense data buffer
4532 *
4533 * Return value:
4534 * none
4535 **/
4536static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4537{
4538 u32 failing_lba;
4539 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4540 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4541 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4542 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4543
4544 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4545
4546 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4547 return;
4548
4549 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4550
4551 if (ipr_is_vset_device(res) &&
4552 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4553 ioasa->u.vset.failing_lba_hi != 0) {
4554 sense_buf[0] = 0x72;
4555 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4556 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4557 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4558
4559 sense_buf[7] = 12;
4560 sense_buf[8] = 0;
4561 sense_buf[9] = 0x0A;
4562 sense_buf[10] = 0x80;
4563
4564 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4565
4566 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4567 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4568 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4569 sense_buf[15] = failing_lba & 0x000000ff;
4570
4571 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4572
4573 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4574 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4575 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4576 sense_buf[19] = failing_lba & 0x000000ff;
4577 } else {
4578 sense_buf[0] = 0x70;
4579 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4580 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4581 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4582
4583 /* Illegal request */
4584 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4585 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4586 sense_buf[7] = 10; /* additional length */
4587
4588 /* IOARCB was in error */
4589 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4590 sense_buf[15] = 0xC0;
4591 else /* Parameter data was invalid */
4592 sense_buf[15] = 0x80;
4593
4594 sense_buf[16] =
4595 ((IPR_FIELD_POINTER_MASK &
4596 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4597 sense_buf[17] =
4598 (IPR_FIELD_POINTER_MASK &
4599 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4600 } else {
4601 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4602 if (ipr_is_vset_device(res))
4603 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4604 else
4605 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4606
4607 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4608 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4609 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4610 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4611 sense_buf[6] = failing_lba & 0x000000ff;
4612 }
4613
4614 sense_buf[7] = 6; /* additional length */
4615 }
4616 }
4617}
4618
ee0a90fa
BK
4619/**
4620 * ipr_get_autosense - Copy autosense data to sense buffer
4621 * @ipr_cmd: ipr command struct
4622 *
4623 * This function copies the autosense buffer to the buffer
4624 * in the scsi_cmd, if there is autosense available.
4625 *
4626 * Return value:
4627 * 1 if autosense was available / 0 if not
4628 **/
4629static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4630{
4631 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4632
117d2ce1 4633 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
4634 return 0;
4635
4636 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4637 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4638 SCSI_SENSE_BUFFERSIZE));
4639 return 1;
4640}
4641
1da177e4
LT
4642/**
4643 * ipr_erp_start - Process an error response for a SCSI op
4644 * @ioa_cfg: ioa config struct
4645 * @ipr_cmd: ipr command struct
4646 *
4647 * This function determines whether or not to initiate ERP
4648 * on the affected device.
4649 *
4650 * Return value:
4651 * nothing
4652 **/
4653static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4654 struct ipr_cmnd *ipr_cmd)
4655{
4656 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4657 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4658 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4659
4660 if (!res) {
4661 ipr_scsi_eh_done(ipr_cmd);
4662 return;
4663 }
4664
cc9bd5d4 4665 if (!ipr_is_gscsi(res))
1da177e4
LT
4666 ipr_gen_sense(ipr_cmd);
4667
cc9bd5d4
BK
4668 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4669
1da177e4
LT
4670 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4671 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
4672 if (ipr_is_naca_model(res))
4673 scsi_cmd->result |= (DID_ABORT << 16);
4674 else
4675 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4676 break;
4677 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4678 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4679 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4680 break;
4681 case IPR_IOASC_HW_SEL_TIMEOUT:
4682 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
4683 if (!ipr_is_naca_model(res))
4684 res->needs_sync_complete = 1;
1da177e4
LT
4685 break;
4686 case IPR_IOASC_SYNC_REQUIRED:
4687 if (!res->in_erp)
4688 res->needs_sync_complete = 1;
4689 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4690 break;
4691 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4692 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4693 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4694 break;
4695 case IPR_IOASC_BUS_WAS_RESET:
4696 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4697 /*
4698 * Report the bus reset and ask for a retry. The device
4699 * will give CC/UA the next command.
4700 */
4701 if (!res->resetting_device)
4702 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4703 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
4704 if (!ipr_is_naca_model(res))
4705 res->needs_sync_complete = 1;
1da177e4
LT
4706 break;
4707 case IPR_IOASC_HW_DEV_BUS_STATUS:
4708 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4709 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
4710 if (!ipr_get_autosense(ipr_cmd)) {
4711 if (!ipr_is_naca_model(res)) {
4712 ipr_erp_cancel_all(ipr_cmd);
4713 return;
4714 }
4715 }
1da177e4 4716 }
ee0a90fa
BK
4717 if (!ipr_is_naca_model(res))
4718 res->needs_sync_complete = 1;
1da177e4
LT
4719 break;
4720 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4721 break;
4722 default:
5b7304fb
BK
4723 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4724 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4725 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4726 res->needs_sync_complete = 1;
4727 break;
4728 }
4729
4730 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4731 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4732 scsi_cmd->scsi_done(scsi_cmd);
4733}
4734
4735/**
4736 * ipr_scsi_done - mid-layer done function
4737 * @ipr_cmd: ipr command struct
4738 *
4739 * This function is invoked by the interrupt handler for
4740 * ops generated by the SCSI mid-layer
4741 *
4742 * Return value:
4743 * none
4744 **/
4745static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4746{
4747 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4748 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4749 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4750
4751 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4752
4753 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4754 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4755 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4756 scsi_cmd->scsi_done(scsi_cmd);
4757 } else
4758 ipr_erp_start(ioa_cfg, ipr_cmd);
4759}
4760
1da177e4
LT
4761/**
4762 * ipr_queuecommand - Queue a mid-layer request
4763 * @scsi_cmd: scsi command struct
4764 * @done: done function
4765 *
4766 * This function queues a request generated by the mid-layer.
4767 *
4768 * Return value:
4769 * 0 on success
4770 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4771 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4772 **/
4773static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4774 void (*done) (struct scsi_cmnd *))
4775{
4776 struct ipr_ioa_cfg *ioa_cfg;
4777 struct ipr_resource_entry *res;
4778 struct ipr_ioarcb *ioarcb;
4779 struct ipr_cmnd *ipr_cmd;
4780 int rc = 0;
4781
4782 scsi_cmd->scsi_done = done;
4783 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4784 res = scsi_cmd->device->hostdata;
4785 scsi_cmd->result = (DID_OK << 16);
4786
4787 /*
4788 * We are currently blocking all devices due to a host reset
4789 * We have told the host to stop giving us new requests, but
4790 * ERP ops don't count. FIXME
4791 */
4792 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4793 return SCSI_MLQUEUE_HOST_BUSY;
4794
4795 /*
4796 * FIXME - Create scsi_set_host_offline interface
4797 * and the ioa_is_dead check can be removed
4798 */
4799 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4800 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4801 scsi_cmd->result = (DID_NO_CONNECT << 16);
4802 scsi_cmd->scsi_done(scsi_cmd);
4803 return 0;
4804 }
4805
35a39691
BK
4806 if (ipr_is_gata(res) && res->sata_port)
4807 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4808
1da177e4
LT
4809 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4810 ioarcb = &ipr_cmd->ioarcb;
4811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4812
4813 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4814 ipr_cmd->scsi_cmd = scsi_cmd;
4815 ioarcb->res_handle = res->cfgte.res_handle;
4816 ipr_cmd->done = ipr_scsi_done;
4817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4818
4819 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4820 if (scsi_cmd->underflow == 0)
4821 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4822
4823 if (res->needs_sync_complete) {
4824 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4825 res->needs_sync_complete = 0;
4826 }
4827
4828 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4829 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4830 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4831 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4832 }
4833
4834 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4835 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4836 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4837
1da177e4
LT
4838 if (likely(rc == 0))
4839 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4840
4841 if (likely(rc == 0)) {
4842 mb();
4843 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4844 ioa_cfg->regs.ioarrin_reg);
4845 } else {
4846 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4847 return SCSI_MLQUEUE_HOST_BUSY;
4848 }
4849
4850 return 0;
4851}
4852
35a39691
BK
4853/**
4854 * ipr_ioctl - IOCTL handler
4855 * @sdev: scsi device struct
4856 * @cmd: IOCTL cmd
4857 * @arg: IOCTL arg
4858 *
4859 * Return value:
4860 * 0 on success / other on failure
4861 **/
bd705f2d 4862static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
4863{
4864 struct ipr_resource_entry *res;
4865
4866 res = (struct ipr_resource_entry *)sdev->hostdata;
4867 if (res && ipr_is_gata(res))
4868 return ata_scsi_ioctl(sdev, cmd, arg);
4869
4870 return -EINVAL;
4871}
4872
1da177e4
LT
4873/**
4874 * ipr_info - Get information about the card/driver
4875 * @scsi_host: scsi host struct
4876 *
4877 * Return value:
4878 * pointer to buffer with description string
4879 **/
4880static const char * ipr_ioa_info(struct Scsi_Host *host)
4881{
4882 static char buffer[512];
4883 struct ipr_ioa_cfg *ioa_cfg;
4884 unsigned long lock_flags = 0;
4885
4886 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4887
4888 spin_lock_irqsave(host->host_lock, lock_flags);
4889 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4890 spin_unlock_irqrestore(host->host_lock, lock_flags);
4891
4892 return buffer;
4893}
4894
4895static struct scsi_host_template driver_template = {
4896 .module = THIS_MODULE,
4897 .name = "IPR",
4898 .info = ipr_ioa_info,
35a39691 4899 .ioctl = ipr_ioctl,
1da177e4
LT
4900 .queuecommand = ipr_queuecommand,
4901 .eh_abort_handler = ipr_eh_abort,
4902 .eh_device_reset_handler = ipr_eh_dev_reset,
4903 .eh_host_reset_handler = ipr_eh_host_reset,
4904 .slave_alloc = ipr_slave_alloc,
4905 .slave_configure = ipr_slave_configure,
4906 .slave_destroy = ipr_slave_destroy,
35a39691
BK
4907 .target_alloc = ipr_target_alloc,
4908 .target_destroy = ipr_target_destroy,
1da177e4
LT
4909 .change_queue_depth = ipr_change_queue_depth,
4910 .change_queue_type = ipr_change_queue_type,
4911 .bios_param = ipr_biosparam,
4912 .can_queue = IPR_MAX_COMMANDS,
4913 .this_id = -1,
4914 .sg_tablesize = IPR_MAX_SGLIST,
4915 .max_sectors = IPR_IOA_MAX_SECTORS,
4916 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4917 .use_clustering = ENABLE_CLUSTERING,
4918 .shost_attrs = ipr_ioa_attrs,
4919 .sdev_attrs = ipr_dev_attrs,
4920 .proc_name = IPR_NAME
4921};
4922
35a39691
BK
4923/**
4924 * ipr_ata_phy_reset - libata phy_reset handler
4925 * @ap: ata port to reset
4926 *
4927 **/
4928static void ipr_ata_phy_reset(struct ata_port *ap)
4929{
4930 unsigned long flags;
4931 struct ipr_sata_port *sata_port = ap->private_data;
4932 struct ipr_resource_entry *res = sata_port->res;
4933 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4934 int rc;
4935
4936 ENTER;
4937 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4938 while(ioa_cfg->in_reset_reload) {
4939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4940 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4941 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4942 }
4943
4944 if (!ioa_cfg->allow_cmds)
4945 goto out_unlock;
4946
4947 rc = ipr_device_reset(ioa_cfg, res);
4948
4949 if (rc) {
4950 ap->ops->port_disable(ap);
4951 goto out_unlock;
4952 }
4953
4954 switch(res->cfgte.proto) {
4955 case IPR_PROTO_SATA:
4956 case IPR_PROTO_SAS_STP:
4957 ap->device[0].class = ATA_DEV_ATA;
4958 break;
4959 case IPR_PROTO_SATA_ATAPI:
4960 case IPR_PROTO_SAS_STP_ATAPI:
4961 ap->device[0].class = ATA_DEV_ATAPI;
4962 break;
4963 default:
4964 ap->device[0].class = ATA_DEV_UNKNOWN;
4965 ap->ops->port_disable(ap);
4966 break;
4967 };
4968
4969out_unlock:
4970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4971 LEAVE;
4972}
4973
4974/**
4975 * ipr_ata_post_internal - Cleanup after an internal command
4976 * @qc: ATA queued command
4977 *
4978 * Return value:
4979 * none
4980 **/
4981static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4982{
4983 struct ipr_sata_port *sata_port = qc->ap->private_data;
4984 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4985 struct ipr_cmnd *ipr_cmd;
4986 unsigned long flags;
4987
4988 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
4989 while(ioa_cfg->in_reset_reload) {
4990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4991 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4992 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4993 }
4994
35a39691
BK
4995 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4996 if (ipr_cmd->qc == qc) {
4997 ipr_device_reset(ioa_cfg, sata_port->res);
4998 break;
4999 }
5000 }
5001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5002}
5003
5004/**
5005 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5006 * @ap: ATA port
5007 * @tf: destination ATA taskfile
5008 *
5009 * Return value:
5010 * none
5011 **/
5012static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5013{
5014 struct ipr_sata_port *sata_port = ap->private_data;
5015 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5016
5017 tf->feature = g->error;
5018 tf->nsect = g->nsect;
5019 tf->lbal = g->lbal;
5020 tf->lbam = g->lbam;
5021 tf->lbah = g->lbah;
5022 tf->device = g->device;
5023 tf->command = g->status;
5024 tf->hob_nsect = g->hob_nsect;
5025 tf->hob_lbal = g->hob_lbal;
5026 tf->hob_lbam = g->hob_lbam;
5027 tf->hob_lbah = g->hob_lbah;
5028 tf->ctl = g->alt_status;
5029}
5030
5031/**
5032 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5033 * @regs: destination
5034 * @tf: source ATA taskfile
5035 *
5036 * Return value:
5037 * none
5038 **/
5039static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5040 struct ata_taskfile *tf)
5041{
5042 regs->feature = tf->feature;
5043 regs->nsect = tf->nsect;
5044 regs->lbal = tf->lbal;
5045 regs->lbam = tf->lbam;
5046 regs->lbah = tf->lbah;
5047 regs->device = tf->device;
5048 regs->command = tf->command;
5049 regs->hob_feature = tf->hob_feature;
5050 regs->hob_nsect = tf->hob_nsect;
5051 regs->hob_lbal = tf->hob_lbal;
5052 regs->hob_lbam = tf->hob_lbam;
5053 regs->hob_lbah = tf->hob_lbah;
5054 regs->ctl = tf->ctl;
5055}
5056
5057/**
5058 * ipr_sata_done - done function for SATA commands
5059 * @ipr_cmd: ipr command struct
5060 *
5061 * This function is invoked by the interrupt handler for
5062 * ops generated by the SCSI mid-layer to SATA devices
5063 *
5064 * Return value:
5065 * none
5066 **/
5067static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5068{
5069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5070 struct ata_queued_cmd *qc = ipr_cmd->qc;
5071 struct ipr_sata_port *sata_port = qc->ap->private_data;
5072 struct ipr_resource_entry *res = sata_port->res;
5073 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5074
5075 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5076 sizeof(struct ipr_ioasa_gata));
5077 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5078
5079 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5080 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5081 res->cfgte.res_addr.target);
5082
5083 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5084 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5085 else
5086 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5087 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5088 ata_qc_complete(qc);
5089}
5090
5091/**
5092 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5093 * @ipr_cmd: ipr command struct
5094 * @qc: ATA queued command
5095 *
5096 **/
5097static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5098 struct ata_queued_cmd *qc)
5099{
5100 u32 ioadl_flags = 0;
5101 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5102 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5103 int len = qc->nbytes + qc->pad_len;
5104 struct scatterlist *sg;
5105
5106 if (len == 0)
5107 return;
5108
5109 if (qc->dma_dir == DMA_TO_DEVICE) {
5110 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5111 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5112 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5113 ioarcb->write_ioadl_len =
5114 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5115 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5116 ioadl_flags = IPR_IOADL_FLAGS_READ;
5117 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5118 ioarcb->read_ioadl_len =
5119 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5120 }
5121
5122 ata_for_each_sg(sg, qc) {
5123 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5124 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5125 if (ata_sg_is_last(sg, qc))
5126 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5127 else
5128 ioadl++;
5129 }
5130}
5131
5132/**
5133 * ipr_qc_issue - Issue a SATA qc to a device
5134 * @qc: queued command
5135 *
5136 * Return value:
5137 * 0 if success
5138 **/
5139static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5140{
5141 struct ata_port *ap = qc->ap;
5142 struct ipr_sata_port *sata_port = ap->private_data;
5143 struct ipr_resource_entry *res = sata_port->res;
5144 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5145 struct ipr_cmnd *ipr_cmd;
5146 struct ipr_ioarcb *ioarcb;
5147 struct ipr_ioarcb_ata_regs *regs;
5148
5149 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5150 return AC_ERR_SYSTEM;
35a39691
BK
5151
5152 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5153 ioarcb = &ipr_cmd->ioarcb;
5154 regs = &ioarcb->add_data.u.regs;
5155
5156 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5157 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5158
5159 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5160 ipr_cmd->qc = qc;
5161 ipr_cmd->done = ipr_sata_done;
5162 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5163 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5164 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5165 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5166 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5167
5168 ipr_build_ata_ioadl(ipr_cmd, qc);
5169 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5170 ipr_copy_sata_tf(regs, &qc->tf);
5171 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5172 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5173
5174 switch (qc->tf.protocol) {
5175 case ATA_PROT_NODATA:
5176 case ATA_PROT_PIO:
5177 break;
5178
5179 case ATA_PROT_DMA:
5180 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5181 break;
5182
5183 case ATA_PROT_ATAPI:
5184 case ATA_PROT_ATAPI_NODATA:
5185 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5186 break;
5187
5188 case ATA_PROT_ATAPI_DMA:
5189 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5190 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5191 break;
5192
5193 default:
5194 WARN_ON(1);
0feeed82 5195 return AC_ERR_INVALID;
35a39691
BK
5196 }
5197
5198 mb();
5199 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5200 ioa_cfg->regs.ioarrin_reg);
5201 return 0;
5202}
5203
5204/**
5205 * ipr_ata_check_status - Return last ATA status
5206 * @ap: ATA port
5207 *
5208 * Return value:
5209 * ATA status
5210 **/
5211static u8 ipr_ata_check_status(struct ata_port *ap)
5212{
5213 struct ipr_sata_port *sata_port = ap->private_data;
5214 return sata_port->ioasa.status;
5215}
5216
5217/**
5218 * ipr_ata_check_altstatus - Return last ATA altstatus
5219 * @ap: ATA port
5220 *
5221 * Return value:
5222 * Alt ATA status
5223 **/
5224static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5225{
5226 struct ipr_sata_port *sata_port = ap->private_data;
5227 return sata_port->ioasa.alt_status;
5228}
5229
5230static struct ata_port_operations ipr_sata_ops = {
5231 .port_disable = ata_port_disable,
5232 .check_status = ipr_ata_check_status,
5233 .check_altstatus = ipr_ata_check_altstatus,
5234 .dev_select = ata_noop_dev_select,
5235 .phy_reset = ipr_ata_phy_reset,
5236 .post_internal_cmd = ipr_ata_post_internal,
5237 .tf_read = ipr_tf_read,
5238 .qc_prep = ata_noop_qc_prep,
5239 .qc_issue = ipr_qc_issue,
5240 .port_start = ata_sas_port_start,
5241 .port_stop = ata_sas_port_stop
5242};
5243
5244static struct ata_port_info sata_port_info = {
5245 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5246 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5247 .pio_mask = 0x10, /* pio4 */
5248 .mwdma_mask = 0x07,
5249 .udma_mask = 0x7f, /* udma0-6 */
5250 .port_ops = &ipr_sata_ops
5251};
5252
1da177e4
LT
5253#ifdef CONFIG_PPC_PSERIES
5254static const u16 ipr_blocked_processors[] = {
5255 PV_NORTHSTAR,
5256 PV_PULSAR,
5257 PV_POWER4,
5258 PV_ICESTAR,
5259 PV_SSTAR,
5260 PV_POWER4p,
5261 PV_630,
5262 PV_630p
5263};
5264
5265/**
5266 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5267 * @ioa_cfg: ioa cfg struct
5268 *
5269 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5270 * certain pSeries hardware. This function determines if the given
5271 * adapter is in one of these confgurations or not.
5272 *
5273 * Return value:
5274 * 1 if adapter is not supported / 0 if adapter is supported
5275 **/
5276static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5277{
5278 u8 rev_id;
5279 int i;
5280
5281 if (ioa_cfg->type == 0x5702) {
5282 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5283 &rev_id) == PCIBIOS_SUCCESSFUL) {
5284 if (rev_id < 4) {
5285 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5286 if (__is_processor(ipr_blocked_processors[i]))
5287 return 1;
5288 }
5289 }
5290 }
5291 }
5292 return 0;
5293}
5294#else
5295#define ipr_invalid_adapter(ioa_cfg) 0
5296#endif
5297
5298/**
5299 * ipr_ioa_bringdown_done - IOA bring down completion.
5300 * @ipr_cmd: ipr command struct
5301 *
5302 * This function processes the completion of an adapter bring down.
5303 * It wakes any reset sleepers.
5304 *
5305 * Return value:
5306 * IPR_RC_JOB_RETURN
5307 **/
5308static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5309{
5310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5311
5312 ENTER;
5313 ioa_cfg->in_reset_reload = 0;
5314 ioa_cfg->reset_retries = 0;
5315 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5316 wake_up_all(&ioa_cfg->reset_wait_q);
5317
5318 spin_unlock_irq(ioa_cfg->host->host_lock);
5319 scsi_unblock_requests(ioa_cfg->host);
5320 spin_lock_irq(ioa_cfg->host->host_lock);
5321 LEAVE;
5322
5323 return IPR_RC_JOB_RETURN;
5324}
5325
5326/**
5327 * ipr_ioa_reset_done - IOA reset completion.
5328 * @ipr_cmd: ipr command struct
5329 *
5330 * This function processes the completion of an adapter reset.
5331 * It schedules any necessary mid-layer add/removes and
5332 * wakes any reset sleepers.
5333 *
5334 * Return value:
5335 * IPR_RC_JOB_RETURN
5336 **/
5337static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5338{
5339 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5340 struct ipr_resource_entry *res;
5341 struct ipr_hostrcb *hostrcb, *temp;
5342 int i = 0;
5343
5344 ENTER;
5345 ioa_cfg->in_reset_reload = 0;
5346 ioa_cfg->allow_cmds = 1;
5347 ioa_cfg->reset_cmd = NULL;
3d1d0da6 5348 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
5349
5350 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5351 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5352 ipr_trace;
5353 break;
5354 }
5355 }
5356 schedule_work(&ioa_cfg->work_q);
5357
5358 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5359 list_del(&hostrcb->queue);
5360 if (i++ < IPR_NUM_LOG_HCAMS)
5361 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5362 else
5363 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5364 }
5365
5366 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5367
5368 ioa_cfg->reset_retries = 0;
5369 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5370 wake_up_all(&ioa_cfg->reset_wait_q);
5371
5372 spin_unlock_irq(ioa_cfg->host->host_lock);
5373 scsi_unblock_requests(ioa_cfg->host);
5374 spin_lock_irq(ioa_cfg->host->host_lock);
5375
5376 if (!ioa_cfg->allow_cmds)
5377 scsi_block_requests(ioa_cfg->host);
5378
5379 LEAVE;
5380 return IPR_RC_JOB_RETURN;
5381}
5382
5383/**
5384 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5385 * @supported_dev: supported device struct
5386 * @vpids: vendor product id struct
5387 *
5388 * Return value:
5389 * none
5390 **/
5391static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5392 struct ipr_std_inq_vpids *vpids)
5393{
5394 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5395 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5396 supported_dev->num_records = 1;
5397 supported_dev->data_length =
5398 cpu_to_be16(sizeof(struct ipr_supported_device));
5399 supported_dev->reserved = 0;
5400}
5401
5402/**
5403 * ipr_set_supported_devs - Send Set Supported Devices for a device
5404 * @ipr_cmd: ipr command struct
5405 *
5406 * This function send a Set Supported Devices to the adapter
5407 *
5408 * Return value:
5409 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5410 **/
5411static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5412{
5413 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5414 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5415 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5416 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5417 struct ipr_resource_entry *res = ipr_cmd->u.res;
5418
5419 ipr_cmd->job_step = ipr_ioa_reset_done;
5420
5421 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 5422 if (!ipr_is_scsi_disk(res))
1da177e4
LT
5423 continue;
5424
5425 ipr_cmd->u.res = res;
5426 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5427
5428 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5429 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5430 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5431
5432 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5433 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5434 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5435
5436 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5437 sizeof(struct ipr_supported_device));
5438 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5439 offsetof(struct ipr_misc_cbs, supp_dev));
5440 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5441 ioarcb->write_data_transfer_length =
5442 cpu_to_be32(sizeof(struct ipr_supported_device));
5443
5444 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5445 IPR_SET_SUP_DEVICE_TIMEOUT);
5446
5447 ipr_cmd->job_step = ipr_set_supported_devs;
5448 return IPR_RC_JOB_RETURN;
5449 }
5450
5451 return IPR_RC_JOB_CONTINUE;
5452}
5453
62275040
BK
5454/**
5455 * ipr_setup_write_cache - Disable write cache if needed
5456 * @ipr_cmd: ipr command struct
5457 *
5458 * This function sets up adapters write cache to desired setting
5459 *
5460 * Return value:
5461 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5462 **/
5463static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5464{
5465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5466
5467 ipr_cmd->job_step = ipr_set_supported_devs;
5468 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5469 struct ipr_resource_entry, queue);
5470
5471 if (ioa_cfg->cache_state != CACHE_DISABLED)
5472 return IPR_RC_JOB_CONTINUE;
5473
5474 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5475 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5476 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5477 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5478
5479 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5480
5481 return IPR_RC_JOB_RETURN;
5482}
5483
1da177e4
LT
5484/**
5485 * ipr_get_mode_page - Locate specified mode page
5486 * @mode_pages: mode page buffer
5487 * @page_code: page code to find
5488 * @len: minimum required length for mode page
5489 *
5490 * Return value:
5491 * pointer to mode page / NULL on failure
5492 **/
5493static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5494 u32 page_code, u32 len)
5495{
5496 struct ipr_mode_page_hdr *mode_hdr;
5497 u32 page_length;
5498 u32 length;
5499
5500 if (!mode_pages || (mode_pages->hdr.length == 0))
5501 return NULL;
5502
5503 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5504 mode_hdr = (struct ipr_mode_page_hdr *)
5505 (mode_pages->data + mode_pages->hdr.block_desc_len);
5506
5507 while (length) {
5508 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5509 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5510 return mode_hdr;
5511 break;
5512 } else {
5513 page_length = (sizeof(struct ipr_mode_page_hdr) +
5514 mode_hdr->page_length);
5515 length -= page_length;
5516 mode_hdr = (struct ipr_mode_page_hdr *)
5517 ((unsigned long)mode_hdr + page_length);
5518 }
5519 }
5520 return NULL;
5521}
5522
5523/**
5524 * ipr_check_term_power - Check for term power errors
5525 * @ioa_cfg: ioa config struct
5526 * @mode_pages: IOAFP mode pages buffer
5527 *
5528 * Check the IOAFP's mode page 28 for term power errors
5529 *
5530 * Return value:
5531 * nothing
5532 **/
5533static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5534 struct ipr_mode_pages *mode_pages)
5535{
5536 int i;
5537 int entry_length;
5538 struct ipr_dev_bus_entry *bus;
5539 struct ipr_mode_page28 *mode_page;
5540
5541 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5542 sizeof(struct ipr_mode_page28));
5543
5544 entry_length = mode_page->entry_length;
5545
5546 bus = mode_page->bus;
5547
5548 for (i = 0; i < mode_page->num_entries; i++) {
5549 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5550 dev_err(&ioa_cfg->pdev->dev,
5551 "Term power is absent on scsi bus %d\n",
5552 bus->res_addr.bus);
5553 }
5554
5555 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5556 }
5557}
5558
5559/**
5560 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5561 * @ioa_cfg: ioa config struct
5562 *
5563 * Looks through the config table checking for SES devices. If
5564 * the SES device is in the SES table indicating a maximum SCSI
5565 * bus speed, the speed is limited for the bus.
5566 *
5567 * Return value:
5568 * none
5569 **/
5570static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5571{
5572 u32 max_xfer_rate;
5573 int i;
5574
5575 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5576 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5577 ioa_cfg->bus_attr[i].bus_width);
5578
5579 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5580 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5581 }
5582}
5583
5584/**
5585 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5586 * @ioa_cfg: ioa config struct
5587 * @mode_pages: mode page 28 buffer
5588 *
5589 * Updates mode page 28 based on driver configuration
5590 *
5591 * Return value:
5592 * none
5593 **/
5594static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5595 struct ipr_mode_pages *mode_pages)
5596{
5597 int i, entry_length;
5598 struct ipr_dev_bus_entry *bus;
5599 struct ipr_bus_attributes *bus_attr;
5600 struct ipr_mode_page28 *mode_page;
5601
5602 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5603 sizeof(struct ipr_mode_page28));
5604
5605 entry_length = mode_page->entry_length;
5606
5607 /* Loop for each device bus entry */
5608 for (i = 0, bus = mode_page->bus;
5609 i < mode_page->num_entries;
5610 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5611 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5612 dev_err(&ioa_cfg->pdev->dev,
5613 "Invalid resource address reported: 0x%08X\n",
5614 IPR_GET_PHYS_LOC(bus->res_addr));
5615 continue;
5616 }
5617
5618 bus_attr = &ioa_cfg->bus_attr[i];
5619 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5620 bus->bus_width = bus_attr->bus_width;
5621 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5622 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5623 if (bus_attr->qas_enabled)
5624 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5625 else
5626 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5627 }
5628}
5629
5630/**
5631 * ipr_build_mode_select - Build a mode select command
5632 * @ipr_cmd: ipr command struct
5633 * @res_handle: resource handle to send command to
5634 * @parm: Byte 2 of Mode Sense command
5635 * @dma_addr: DMA buffer address
5636 * @xfer_len: data transfer length
5637 *
5638 * Return value:
5639 * none
5640 **/
5641static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5642 __be32 res_handle, u8 parm, u32 dma_addr,
5643 u8 xfer_len)
5644{
5645 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5646 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5647
5648 ioarcb->res_handle = res_handle;
5649 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5650 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5651 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5652 ioarcb->cmd_pkt.cdb[1] = parm;
5653 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5654
5655 ioadl->flags_and_data_len =
5656 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5657 ioadl->address = cpu_to_be32(dma_addr);
5658 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5659 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5660}
5661
5662/**
5663 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5664 * @ipr_cmd: ipr command struct
5665 *
5666 * This function sets up the SCSI bus attributes and sends
5667 * a Mode Select for Page 28 to activate them.
5668 *
5669 * Return value:
5670 * IPR_RC_JOB_RETURN
5671 **/
5672static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5673{
5674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5675 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5676 int length;
5677
5678 ENTER;
4733804c
BK
5679 ipr_scsi_bus_speed_limit(ioa_cfg);
5680 ipr_check_term_power(ioa_cfg, mode_pages);
5681 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5682 length = mode_pages->hdr.length + 1;
5683 mode_pages->hdr.length = 0;
1da177e4
LT
5684
5685 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5686 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5687 length);
5688
62275040 5689 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
5690 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5691
5692 LEAVE;
5693 return IPR_RC_JOB_RETURN;
5694}
5695
5696/**
5697 * ipr_build_mode_sense - Builds a mode sense command
5698 * @ipr_cmd: ipr command struct
5699 * @res: resource entry struct
5700 * @parm: Byte 2 of mode sense command
5701 * @dma_addr: DMA address of mode sense buffer
5702 * @xfer_len: Size of DMA buffer
5703 *
5704 * Return value:
5705 * none
5706 **/
5707static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5708 __be32 res_handle,
5709 u8 parm, u32 dma_addr, u8 xfer_len)
5710{
5711 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5712 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5713
5714 ioarcb->res_handle = res_handle;
5715 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5716 ioarcb->cmd_pkt.cdb[2] = parm;
5717 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5718 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5719
5720 ioadl->flags_and_data_len =
5721 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5722 ioadl->address = cpu_to_be32(dma_addr);
5723 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5724 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5725}
5726
dfed823e
BK
5727/**
5728 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5729 * @ipr_cmd: ipr command struct
5730 *
5731 * This function handles the failure of an IOA bringup command.
5732 *
5733 * Return value:
5734 * IPR_RC_JOB_RETURN
5735 **/
5736static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5737{
5738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5739 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5740
5741 dev_err(&ioa_cfg->pdev->dev,
5742 "0x%02X failed with IOASC: 0x%08X\n",
5743 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5744
5745 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5746 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5747 return IPR_RC_JOB_RETURN;
5748}
5749
5750/**
5751 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5752 * @ipr_cmd: ipr command struct
5753 *
5754 * This function handles the failure of a Mode Sense to the IOAFP.
5755 * Some adapters do not handle all mode pages.
5756 *
5757 * Return value:
5758 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5759 **/
5760static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5761{
5762 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5763
5764 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5765 ipr_cmd->job_step = ipr_setup_write_cache;
5766 return IPR_RC_JOB_CONTINUE;
5767 }
5768
5769 return ipr_reset_cmd_failed(ipr_cmd);
5770}
5771
1da177e4
LT
5772/**
5773 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5774 * @ipr_cmd: ipr command struct
5775 *
5776 * This function send a Page 28 mode sense to the IOA to
5777 * retrieve SCSI bus attributes.
5778 *
5779 * Return value:
5780 * IPR_RC_JOB_RETURN
5781 **/
5782static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5783{
5784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5785
5786 ENTER;
5787 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5788 0x28, ioa_cfg->vpd_cbs_dma +
5789 offsetof(struct ipr_misc_cbs, mode_pages),
5790 sizeof(struct ipr_mode_pages));
5791
5792 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 5793 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
5794
5795 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5796
5797 LEAVE;
5798 return IPR_RC_JOB_RETURN;
5799}
5800
5801/**
5802 * ipr_init_res_table - Initialize the resource table
5803 * @ipr_cmd: ipr command struct
5804 *
5805 * This function looks through the existing resource table, comparing
5806 * it with the config table. This function will take care of old/new
5807 * devices and schedule adding/removing them from the mid-layer
5808 * as appropriate.
5809 *
5810 * Return value:
5811 * IPR_RC_JOB_CONTINUE
5812 **/
5813static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5814{
5815 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5816 struct ipr_resource_entry *res, *temp;
5817 struct ipr_config_table_entry *cfgte;
5818 int found, i;
5819 LIST_HEAD(old_res);
5820
5821 ENTER;
5822 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5823 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5824
5825 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5826 list_move_tail(&res->queue, &old_res);
5827
5828 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5829 cfgte = &ioa_cfg->cfg_table->dev[i];
5830 found = 0;
5831
5832 list_for_each_entry_safe(res, temp, &old_res, queue) {
5833 if (!memcmp(&res->cfgte.res_addr,
5834 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5835 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5836 found = 1;
5837 break;
5838 }
5839 }
5840
5841 if (!found) {
5842 if (list_empty(&ioa_cfg->free_res_q)) {
5843 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5844 break;
5845 }
5846
5847 found = 1;
5848 res = list_entry(ioa_cfg->free_res_q.next,
5849 struct ipr_resource_entry, queue);
5850 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5851 ipr_init_res_entry(res);
5852 res->add_to_ml = 1;
5853 }
5854
5855 if (found)
5856 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5857 }
5858
5859 list_for_each_entry_safe(res, temp, &old_res, queue) {
5860 if (res->sdev) {
5861 res->del_from_ml = 1;
1121b794 5862 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
5863 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5864 } else {
5865 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5866 }
5867 }
5868
5869 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5870
5871 LEAVE;
5872 return IPR_RC_JOB_CONTINUE;
5873}
5874
5875/**
5876 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5877 * @ipr_cmd: ipr command struct
5878 *
5879 * This function sends a Query IOA Configuration command
5880 * to the adapter to retrieve the IOA configuration table.
5881 *
5882 * Return value:
5883 * IPR_RC_JOB_RETURN
5884 **/
5885static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5886{
5887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5888 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5889 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5890 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5891
5892 ENTER;
5893 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5894 ucode_vpd->major_release, ucode_vpd->card_type,
5895 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5896 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5897 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5898
5899 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5900 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5901 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5902
5903 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5904 ioarcb->read_data_transfer_length =
5905 cpu_to_be32(sizeof(struct ipr_config_table));
5906
5907 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5908 ioadl->flags_and_data_len =
5909 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5910
5911 ipr_cmd->job_step = ipr_init_res_table;
5912
5913 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5914
5915 LEAVE;
5916 return IPR_RC_JOB_RETURN;
5917}
5918
5919/**
5920 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5921 * @ipr_cmd: ipr command struct
5922 *
5923 * This utility function sends an inquiry to the adapter.
5924 *
5925 * Return value:
5926 * none
5927 **/
5928static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5929 u32 dma_addr, u8 xfer_len)
5930{
5931 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5932 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5933
5934 ENTER;
5935 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5936 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5937
5938 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5939 ioarcb->cmd_pkt.cdb[1] = flags;
5940 ioarcb->cmd_pkt.cdb[2] = page;
5941 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5942
5943 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5944 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5945
5946 ioadl->address = cpu_to_be32(dma_addr);
5947 ioadl->flags_and_data_len =
5948 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5949
5950 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5951 LEAVE;
5952}
5953
62275040
BK
5954/**
5955 * ipr_inquiry_page_supported - Is the given inquiry page supported
5956 * @page0: inquiry page 0 buffer
5957 * @page: page code.
5958 *
5959 * This function determines if the specified inquiry page is supported.
5960 *
5961 * Return value:
5962 * 1 if page is supported / 0 if not
5963 **/
5964static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5965{
5966 int i;
5967
5968 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5969 if (page0->page[i] == page)
5970 return 1;
5971
5972 return 0;
5973}
5974
1da177e4
LT
5975/**
5976 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5977 * @ipr_cmd: ipr command struct
5978 *
5979 * This function sends a Page 3 inquiry to the adapter
5980 * to retrieve software VPD information.
5981 *
5982 * Return value:
5983 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5984 **/
5985static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
5986{
5987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5988 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5989
5990 ENTER;
5991
5992 if (!ipr_inquiry_page_supported(page0, 1))
5993 ioa_cfg->cache_state = CACHE_NONE;
5994
5995 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5996
5997 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5998 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5999 sizeof(struct ipr_inquiry_page3));
6000
6001 LEAVE;
6002 return IPR_RC_JOB_RETURN;
6003}
6004
6005/**
6006 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6007 * @ipr_cmd: ipr command struct
6008 *
6009 * This function sends a Page 0 inquiry to the adapter
6010 * to retrieve supported inquiry pages.
6011 *
6012 * Return value:
6013 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6014 **/
6015static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6016{
6017 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6018 char type[5];
6019
6020 ENTER;
6021
6022 /* Grab the type out of the VPD and store it away */
6023 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6024 type[4] = '\0';
6025 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6026
62275040 6027 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6028
62275040
BK
6029 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6030 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6031 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6032
6033 LEAVE;
6034 return IPR_RC_JOB_RETURN;
6035}
6036
6037/**
6038 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6039 * @ipr_cmd: ipr command struct
6040 *
6041 * This function sends a standard inquiry to the adapter.
6042 *
6043 * Return value:
6044 * IPR_RC_JOB_RETURN
6045 **/
6046static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6047{
6048 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6049
6050 ENTER;
62275040 6051 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6052
6053 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6054 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6055 sizeof(struct ipr_ioa_vpd));
6056
6057 LEAVE;
6058 return IPR_RC_JOB_RETURN;
6059}
6060
6061/**
6062 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6063 * @ipr_cmd: ipr command struct
6064 *
6065 * This function send an Identify Host Request Response Queue
6066 * command to establish the HRRQ with the adapter.
6067 *
6068 * Return value:
6069 * IPR_RC_JOB_RETURN
6070 **/
6071static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6072{
6073 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6074 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6075
6076 ENTER;
6077 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6078
6079 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6080 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6081
6082 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6083 ioarcb->cmd_pkt.cdb[2] =
6084 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6085 ioarcb->cmd_pkt.cdb[3] =
6086 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6087 ioarcb->cmd_pkt.cdb[4] =
6088 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6089 ioarcb->cmd_pkt.cdb[5] =
6090 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6091 ioarcb->cmd_pkt.cdb[7] =
6092 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6093 ioarcb->cmd_pkt.cdb[8] =
6094 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6095
6096 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6097
6098 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6099
6100 LEAVE;
6101 return IPR_RC_JOB_RETURN;
6102}
6103
6104/**
6105 * ipr_reset_timer_done - Adapter reset timer function
6106 * @ipr_cmd: ipr command struct
6107 *
6108 * Description: This function is used in adapter reset processing
6109 * for timing events. If the reset_cmd pointer in the IOA
6110 * config struct is not this adapter's we are doing nested
6111 * resets and fail_all_ops will take care of freeing the
6112 * command block.
6113 *
6114 * Return value:
6115 * none
6116 **/
6117static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6118{
6119 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6120 unsigned long lock_flags = 0;
6121
6122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6123
6124 if (ioa_cfg->reset_cmd == ipr_cmd) {
6125 list_del(&ipr_cmd->queue);
6126 ipr_cmd->done(ipr_cmd);
6127 }
6128
6129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6130}
6131
6132/**
6133 * ipr_reset_start_timer - Start a timer for adapter reset job
6134 * @ipr_cmd: ipr command struct
6135 * @timeout: timeout value
6136 *
6137 * Description: This function is used in adapter reset processing
6138 * for timing events. If the reset_cmd pointer in the IOA
6139 * config struct is not this adapter's we are doing nested
6140 * resets and fail_all_ops will take care of freeing the
6141 * command block.
6142 *
6143 * Return value:
6144 * none
6145 **/
6146static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6147 unsigned long timeout)
6148{
6149 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6150 ipr_cmd->done = ipr_reset_ioa_job;
6151
6152 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6153 ipr_cmd->timer.expires = jiffies + timeout;
6154 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6155 add_timer(&ipr_cmd->timer);
6156}
6157
6158/**
6159 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6160 * @ioa_cfg: ioa cfg struct
6161 *
6162 * Return value:
6163 * nothing
6164 **/
6165static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6166{
6167 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6168
6169 /* Initialize Host RRQ pointers */
6170 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6171 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6172 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6173 ioa_cfg->toggle_bit = 1;
6174
6175 /* Zero out config table */
6176 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6177}
6178
6179/**
6180 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6181 * @ipr_cmd: ipr command struct
6182 *
6183 * This function reinitializes some control blocks and
6184 * enables destructive diagnostics on the adapter.
6185 *
6186 * Return value:
6187 * IPR_RC_JOB_RETURN
6188 **/
6189static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6190{
6191 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6192 volatile u32 int_reg;
6193
6194 ENTER;
6195 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6196 ipr_init_ioa_mem(ioa_cfg);
6197
6198 ioa_cfg->allow_interrupts = 1;
6199 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6200
6201 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6202 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6203 ioa_cfg->regs.clr_interrupt_mask_reg);
6204 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6205 return IPR_RC_JOB_CONTINUE;
6206 }
6207
6208 /* Enable destructive diagnostics on IOA */
3d1d0da6 6209 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
6210
6211 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6212 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6213
6214 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6215
6216 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 6217 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
6218 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6219 ipr_cmd->done = ipr_reset_ioa_job;
6220 add_timer(&ipr_cmd->timer);
6221 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6222
6223 LEAVE;
6224 return IPR_RC_JOB_RETURN;
6225}
6226
6227/**
6228 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6229 * @ipr_cmd: ipr command struct
6230 *
6231 * This function is invoked when an adapter dump has run out
6232 * of processing time.
6233 *
6234 * Return value:
6235 * IPR_RC_JOB_CONTINUE
6236 **/
6237static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6238{
6239 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6240
6241 if (ioa_cfg->sdt_state == GET_DUMP)
6242 ioa_cfg->sdt_state = ABORT_DUMP;
6243
6244 ipr_cmd->job_step = ipr_reset_alert;
6245
6246 return IPR_RC_JOB_CONTINUE;
6247}
6248
6249/**
6250 * ipr_unit_check_no_data - Log a unit check/no data error log
6251 * @ioa_cfg: ioa config struct
6252 *
6253 * Logs an error indicating the adapter unit checked, but for some
6254 * reason, we were unable to fetch the unit check buffer.
6255 *
6256 * Return value:
6257 * nothing
6258 **/
6259static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6260{
6261 ioa_cfg->errors_logged++;
6262 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6263}
6264
6265/**
6266 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6267 * @ioa_cfg: ioa config struct
6268 *
6269 * Fetches the unit check buffer from the adapter by clocking the data
6270 * through the mailbox register.
6271 *
6272 * Return value:
6273 * nothing
6274 **/
6275static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6276{
6277 unsigned long mailbox;
6278 struct ipr_hostrcb *hostrcb;
6279 struct ipr_uc_sdt sdt;
6280 int rc, length;
6281
6282 mailbox = readl(ioa_cfg->ioa_mailbox);
6283
6284 if (!ipr_sdt_is_fmt2(mailbox)) {
6285 ipr_unit_check_no_data(ioa_cfg);
6286 return;
6287 }
6288
6289 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6290 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6291 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6292
6293 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6294 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6295 ipr_unit_check_no_data(ioa_cfg);
6296 return;
6297 }
6298
6299 /* Find length of the first sdt entry (UC buffer) */
6300 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6301 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6302
6303 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6304 struct ipr_hostrcb, queue);
6305 list_del(&hostrcb->queue);
6306 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6307
6308 rc = ipr_get_ldump_data_section(ioa_cfg,
6309 be32_to_cpu(sdt.entry[0].bar_str_offset),
6310 (__be32 *)&hostrcb->hcam,
6311 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6312
6313 if (!rc)
6314 ipr_handle_log_data(ioa_cfg, hostrcb);
6315 else
6316 ipr_unit_check_no_data(ioa_cfg);
6317
6318 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6319}
6320
6321/**
6322 * ipr_reset_restore_cfg_space - Restore PCI config space.
6323 * @ipr_cmd: ipr command struct
6324 *
6325 * Description: This function restores the saved PCI config space of
6326 * the adapter, fails all outstanding ops back to the callers, and
6327 * fetches the dump/unit check if applicable to this reset.
6328 *
6329 * Return value:
6330 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6331 **/
6332static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6333{
6334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6335 int rc;
6336
6337 ENTER;
6338 rc = pci_restore_state(ioa_cfg->pdev);
6339
6340 if (rc != PCIBIOS_SUCCESSFUL) {
6341 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6342 return IPR_RC_JOB_CONTINUE;
6343 }
6344
6345 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6346 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6347 return IPR_RC_JOB_CONTINUE;
6348 }
6349
6350 ipr_fail_all_ops(ioa_cfg);
6351
6352 if (ioa_cfg->ioa_unit_checked) {
6353 ioa_cfg->ioa_unit_checked = 0;
6354 ipr_get_unit_check_buffer(ioa_cfg);
6355 ipr_cmd->job_step = ipr_reset_alert;
6356 ipr_reset_start_timer(ipr_cmd, 0);
6357 return IPR_RC_JOB_RETURN;
6358 }
6359
6360 if (ioa_cfg->in_ioa_bringdown) {
6361 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6362 } else {
6363 ipr_cmd->job_step = ipr_reset_enable_ioa;
6364
6365 if (GET_DUMP == ioa_cfg->sdt_state) {
6366 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6367 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6368 schedule_work(&ioa_cfg->work_q);
6369 return IPR_RC_JOB_RETURN;
6370 }
6371 }
6372
6373 ENTER;
6374 return IPR_RC_JOB_CONTINUE;
6375}
6376
e619e1a7
BK
6377/**
6378 * ipr_reset_bist_done - BIST has completed on the adapter.
6379 * @ipr_cmd: ipr command struct
6380 *
6381 * Description: Unblock config space and resume the reset process.
6382 *
6383 * Return value:
6384 * IPR_RC_JOB_CONTINUE
6385 **/
6386static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6387{
6388 ENTER;
6389 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6390 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6391 LEAVE;
6392 return IPR_RC_JOB_CONTINUE;
6393}
6394
1da177e4
LT
6395/**
6396 * ipr_reset_start_bist - Run BIST on the adapter.
6397 * @ipr_cmd: ipr command struct
6398 *
6399 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6400 *
6401 * Return value:
6402 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6403 **/
6404static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6405{
6406 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6407 int rc;
6408
6409 ENTER;
b30197d2 6410 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
6411 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6412
6413 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 6414 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
6415 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6416 rc = IPR_RC_JOB_CONTINUE;
6417 } else {
e619e1a7 6418 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
6419 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6420 rc = IPR_RC_JOB_RETURN;
6421 }
6422
6423 LEAVE;
6424 return rc;
6425}
6426
6427/**
6428 * ipr_reset_allowed - Query whether or not IOA can be reset
6429 * @ioa_cfg: ioa config struct
6430 *
6431 * Return value:
6432 * 0 if reset not allowed / non-zero if reset is allowed
6433 **/
6434static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6435{
6436 volatile u32 temp_reg;
6437
6438 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6439 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6440}
6441
6442/**
6443 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6444 * @ipr_cmd: ipr command struct
6445 *
6446 * Description: This function waits for adapter permission to run BIST,
6447 * then runs BIST. If the adapter does not give permission after a
6448 * reasonable time, we will reset the adapter anyway. The impact of
6449 * resetting the adapter without warning the adapter is the risk of
6450 * losing the persistent error log on the adapter. If the adapter is
6451 * reset while it is writing to the flash on the adapter, the flash
6452 * segment will have bad ECC and be zeroed.
6453 *
6454 * Return value:
6455 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6456 **/
6457static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6458{
6459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6460 int rc = IPR_RC_JOB_RETURN;
6461
6462 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6463 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6464 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6465 } else {
6466 ipr_cmd->job_step = ipr_reset_start_bist;
6467 rc = IPR_RC_JOB_CONTINUE;
6468 }
6469
6470 return rc;
6471}
6472
6473/**
6474 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6475 * @ipr_cmd: ipr command struct
6476 *
6477 * Description: This function alerts the adapter that it will be reset.
6478 * If memory space is not currently enabled, proceed directly
6479 * to running BIST on the adapter. The timer must always be started
6480 * so we guarantee we do not run BIST from ipr_isr.
6481 *
6482 * Return value:
6483 * IPR_RC_JOB_RETURN
6484 **/
6485static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6486{
6487 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6488 u16 cmd_reg;
6489 int rc;
6490
6491 ENTER;
6492 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6493
6494 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6495 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6496 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6497 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6498 } else {
6499 ipr_cmd->job_step = ipr_reset_start_bist;
6500 }
6501
6502 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6503 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6504
6505 LEAVE;
6506 return IPR_RC_JOB_RETURN;
6507}
6508
6509/**
6510 * ipr_reset_ucode_download_done - Microcode download completion
6511 * @ipr_cmd: ipr command struct
6512 *
6513 * Description: This function unmaps the microcode download buffer.
6514 *
6515 * Return value:
6516 * IPR_RC_JOB_CONTINUE
6517 **/
6518static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6519{
6520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6521 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6522
6523 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6524 sglist->num_sg, DMA_TO_DEVICE);
6525
6526 ipr_cmd->job_step = ipr_reset_alert;
6527 return IPR_RC_JOB_CONTINUE;
6528}
6529
6530/**
6531 * ipr_reset_ucode_download - Download microcode to the adapter
6532 * @ipr_cmd: ipr command struct
6533 *
6534 * Description: This function checks to see if it there is microcode
6535 * to download to the adapter. If there is, a download is performed.
6536 *
6537 * Return value:
6538 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6539 **/
6540static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6541{
6542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6543 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6544
6545 ENTER;
6546 ipr_cmd->job_step = ipr_reset_alert;
6547
6548 if (!sglist)
6549 return IPR_RC_JOB_CONTINUE;
6550
6551 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6552 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6553 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6554 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6555 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6556 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6557 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6558
12baa420 6559 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
6560 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6561
6562 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6563 IPR_WRITE_BUFFER_TIMEOUT);
6564
6565 LEAVE;
6566 return IPR_RC_JOB_RETURN;
6567}
6568
6569/**
6570 * ipr_reset_shutdown_ioa - Shutdown the adapter
6571 * @ipr_cmd: ipr command struct
6572 *
6573 * Description: This function issues an adapter shutdown of the
6574 * specified type to the specified adapter as part of the
6575 * adapter reset job.
6576 *
6577 * Return value:
6578 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6579 **/
6580static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6581{
6582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6583 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6584 unsigned long timeout;
6585 int rc = IPR_RC_JOB_CONTINUE;
6586
6587 ENTER;
6588 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6589 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6590 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6591 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6592 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6593
6594 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6595 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6596 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6597 timeout = IPR_INTERNAL_TIMEOUT;
6598 else
6599 timeout = IPR_SHUTDOWN_TIMEOUT;
6600
6601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6602
6603 rc = IPR_RC_JOB_RETURN;
6604 ipr_cmd->job_step = ipr_reset_ucode_download;
6605 } else
6606 ipr_cmd->job_step = ipr_reset_alert;
6607
6608 LEAVE;
6609 return rc;
6610}
6611
6612/**
6613 * ipr_reset_ioa_job - Adapter reset job
6614 * @ipr_cmd: ipr command struct
6615 *
6616 * Description: This function is the job router for the adapter reset job.
6617 *
6618 * Return value:
6619 * none
6620 **/
6621static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6622{
6623 u32 rc, ioasc;
1da177e4
LT
6624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6625
6626 do {
6627 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6628
6629 if (ioa_cfg->reset_cmd != ipr_cmd) {
6630 /*
6631 * We are doing nested adapter resets and this is
6632 * not the current reset job.
6633 */
6634 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6635 return;
6636 }
6637
6638 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
6639 rc = ipr_cmd->job_step_failed(ipr_cmd);
6640 if (rc == IPR_RC_JOB_RETURN)
6641 return;
1da177e4
LT
6642 }
6643
6644 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 6645 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
6646 rc = ipr_cmd->job_step(ipr_cmd);
6647 } while(rc == IPR_RC_JOB_CONTINUE);
6648}
6649
6650/**
6651 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6652 * @ioa_cfg: ioa config struct
6653 * @job_step: first job step of reset job
6654 * @shutdown_type: shutdown type
6655 *
6656 * Description: This function will initiate the reset of the given adapter
6657 * starting at the selected job step.
6658 * If the caller needs to wait on the completion of the reset,
6659 * the caller must sleep on the reset_wait_q.
6660 *
6661 * Return value:
6662 * none
6663 **/
6664static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6665 int (*job_step) (struct ipr_cmnd *),
6666 enum ipr_shutdown_type shutdown_type)
6667{
6668 struct ipr_cmnd *ipr_cmd;
6669
6670 ioa_cfg->in_reset_reload = 1;
6671 ioa_cfg->allow_cmds = 0;
6672 scsi_block_requests(ioa_cfg->host);
6673
6674 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6675 ioa_cfg->reset_cmd = ipr_cmd;
6676 ipr_cmd->job_step = job_step;
6677 ipr_cmd->u.shutdown_type = shutdown_type;
6678
6679 ipr_reset_ioa_job(ipr_cmd);
6680}
6681
6682/**
6683 * ipr_initiate_ioa_reset - Initiate an adapter reset
6684 * @ioa_cfg: ioa config struct
6685 * @shutdown_type: shutdown type
6686 *
6687 * Description: This function will initiate the reset of the given adapter.
6688 * If the caller needs to wait on the completion of the reset,
6689 * the caller must sleep on the reset_wait_q.
6690 *
6691 * Return value:
6692 * none
6693 **/
6694static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6695 enum ipr_shutdown_type shutdown_type)
6696{
6697 if (ioa_cfg->ioa_is_dead)
6698 return;
6699
6700 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6701 ioa_cfg->sdt_state = ABORT_DUMP;
6702
6703 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6704 dev_err(&ioa_cfg->pdev->dev,
6705 "IOA taken offline - error recovery failed\n");
6706
6707 ioa_cfg->reset_retries = 0;
6708 ioa_cfg->ioa_is_dead = 1;
6709
6710 if (ioa_cfg->in_ioa_bringdown) {
6711 ioa_cfg->reset_cmd = NULL;
6712 ioa_cfg->in_reset_reload = 0;
6713 ipr_fail_all_ops(ioa_cfg);
6714 wake_up_all(&ioa_cfg->reset_wait_q);
6715
6716 spin_unlock_irq(ioa_cfg->host->host_lock);
6717 scsi_unblock_requests(ioa_cfg->host);
6718 spin_lock_irq(ioa_cfg->host->host_lock);
6719 return;
6720 } else {
6721 ioa_cfg->in_ioa_bringdown = 1;
6722 shutdown_type = IPR_SHUTDOWN_NONE;
6723 }
6724 }
6725
6726 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6727 shutdown_type);
6728}
6729
f8a88b19
LV
6730/**
6731 * ipr_reset_freeze - Hold off all I/O activity
6732 * @ipr_cmd: ipr command struct
6733 *
6734 * Description: If the PCI slot is frozen, hold off all I/O
6735 * activity; then, as soon as the slot is available again,
6736 * initiate an adapter reset.
6737 */
6738static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6739{
6740 /* Disallow new interrupts, avoid loop */
6741 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6742 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6743 ipr_cmd->done = ipr_reset_ioa_job;
6744 return IPR_RC_JOB_RETURN;
6745}
6746
6747/**
6748 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6749 * @pdev: PCI device struct
6750 *
6751 * Description: This routine is called to tell us that the PCI bus
6752 * is down. Can't do anything here, except put the device driver
6753 * into a holding pattern, waiting for the PCI bus to come back.
6754 */
6755static void ipr_pci_frozen(struct pci_dev *pdev)
6756{
6757 unsigned long flags = 0;
6758 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6759
6760 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6761 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6763}
6764
6765/**
6766 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6767 * @pdev: PCI device struct
6768 *
6769 * Description: This routine is called by the pci error recovery
6770 * code after the PCI slot has been reset, just before we
6771 * should resume normal operations.
6772 */
6773static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6774{
6775 unsigned long flags = 0;
6776 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6777
6778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6780 IPR_SHUTDOWN_NONE);
6781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782 return PCI_ERS_RESULT_RECOVERED;
6783}
6784
6785/**
6786 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6787 * @pdev: PCI device struct
6788 *
6789 * Description: This routine is called when the PCI bus has
6790 * permanently failed.
6791 */
6792static void ipr_pci_perm_failure(struct pci_dev *pdev)
6793{
6794 unsigned long flags = 0;
6795 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6796
6797 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6798 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6799 ioa_cfg->sdt_state = ABORT_DUMP;
6800 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6801 ioa_cfg->in_ioa_bringdown = 1;
6802 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6803 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6804}
6805
6806/**
6807 * ipr_pci_error_detected - Called when a PCI error is detected.
6808 * @pdev: PCI device struct
6809 * @state: PCI channel state
6810 *
6811 * Description: Called when a PCI error is detected.
6812 *
6813 * Return value:
6814 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6815 */
6816static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6817 pci_channel_state_t state)
6818{
6819 switch (state) {
6820 case pci_channel_io_frozen:
6821 ipr_pci_frozen(pdev);
6822 return PCI_ERS_RESULT_NEED_RESET;
6823 case pci_channel_io_perm_failure:
6824 ipr_pci_perm_failure(pdev);
6825 return PCI_ERS_RESULT_DISCONNECT;
6826 break;
6827 default:
6828 break;
6829 }
6830 return PCI_ERS_RESULT_NEED_RESET;
6831}
6832
1da177e4
LT
6833/**
6834 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6835 * @ioa_cfg: ioa cfg struct
6836 *
6837 * Description: This is the second phase of adapter intialization
6838 * This function takes care of initilizing the adapter to the point
6839 * where it can accept new commands.
6840
6841 * Return value:
6842 * 0 on sucess / -EIO on failure
6843 **/
6844static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6845{
6846 int rc = 0;
6847 unsigned long host_lock_flags = 0;
6848
6849 ENTER;
6850 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6851 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
6852 if (ioa_cfg->needs_hard_reset) {
6853 ioa_cfg->needs_hard_reset = 0;
6854 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6855 } else
6856 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6857 IPR_SHUTDOWN_NONE);
1da177e4
LT
6858
6859 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6860 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6861 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6862
6863 if (ioa_cfg->ioa_is_dead) {
6864 rc = -EIO;
6865 } else if (ipr_invalid_adapter(ioa_cfg)) {
6866 if (!ipr_testmode)
6867 rc = -EIO;
6868
6869 dev_err(&ioa_cfg->pdev->dev,
6870 "Adapter not supported in this hardware configuration.\n");
6871 }
6872
6873 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6874
6875 LEAVE;
6876 return rc;
6877}
6878
6879/**
6880 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6881 * @ioa_cfg: ioa config struct
6882 *
6883 * Return value:
6884 * none
6885 **/
6886static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6887{
6888 int i;
6889
6890 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6891 if (ioa_cfg->ipr_cmnd_list[i])
6892 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6893 ioa_cfg->ipr_cmnd_list[i],
6894 ioa_cfg->ipr_cmnd_list_dma[i]);
6895
6896 ioa_cfg->ipr_cmnd_list[i] = NULL;
6897 }
6898
6899 if (ioa_cfg->ipr_cmd_pool)
6900 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6901
6902 ioa_cfg->ipr_cmd_pool = NULL;
6903}
6904
6905/**
6906 * ipr_free_mem - Frees memory allocated for an adapter
6907 * @ioa_cfg: ioa cfg struct
6908 *
6909 * Return value:
6910 * nothing
6911 **/
6912static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6913{
6914 int i;
6915
6916 kfree(ioa_cfg->res_entries);
6917 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6918 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6919 ipr_free_cmd_blks(ioa_cfg);
6920 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6921 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6922 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6923 ioa_cfg->cfg_table,
6924 ioa_cfg->cfg_table_dma);
6925
6926 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6927 pci_free_consistent(ioa_cfg->pdev,
6928 sizeof(struct ipr_hostrcb),
6929 ioa_cfg->hostrcb[i],
6930 ioa_cfg->hostrcb_dma[i]);
6931 }
6932
6933 ipr_free_dump(ioa_cfg);
1da177e4
LT
6934 kfree(ioa_cfg->trace);
6935}
6936
6937/**
6938 * ipr_free_all_resources - Free all allocated resources for an adapter.
6939 * @ipr_cmd: ipr command struct
6940 *
6941 * This function frees all allocated resources for the
6942 * specified adapter.
6943 *
6944 * Return value:
6945 * none
6946 **/
6947static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6948{
6949 struct pci_dev *pdev = ioa_cfg->pdev;
6950
6951 ENTER;
6952 free_irq(pdev->irq, ioa_cfg);
6953 iounmap(ioa_cfg->hdw_dma_regs);
6954 pci_release_regions(pdev);
6955 ipr_free_mem(ioa_cfg);
6956 scsi_host_put(ioa_cfg->host);
6957 pci_disable_device(pdev);
6958 LEAVE;
6959}
6960
6961/**
6962 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6963 * @ioa_cfg: ioa config struct
6964 *
6965 * Return value:
6966 * 0 on success / -ENOMEM on allocation failure
6967 **/
6968static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6969{
6970 struct ipr_cmnd *ipr_cmd;
6971 struct ipr_ioarcb *ioarcb;
6972 dma_addr_t dma_addr;
6973 int i;
6974
6975 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6976 sizeof(struct ipr_cmnd), 8, 0);
6977
6978 if (!ioa_cfg->ipr_cmd_pool)
6979 return -ENOMEM;
6980
6981 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 6982 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
6983
6984 if (!ipr_cmd) {
6985 ipr_free_cmd_blks(ioa_cfg);
6986 return -ENOMEM;
6987 }
6988
6989 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6990 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6991 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6992
6993 ioarcb = &ipr_cmd->ioarcb;
6994 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6995 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6996 ioarcb->write_ioadl_addr =
6997 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6998 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6999 ioarcb->ioasa_host_pci_addr =
7000 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7001 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7002 ipr_cmd->cmd_index = i;
7003 ipr_cmd->ioa_cfg = ioa_cfg;
7004 ipr_cmd->sense_buffer_dma = dma_addr +
7005 offsetof(struct ipr_cmnd, sense_buffer);
7006
7007 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7008 }
7009
7010 return 0;
7011}
7012
7013/**
7014 * ipr_alloc_mem - Allocate memory for an adapter
7015 * @ioa_cfg: ioa config struct
7016 *
7017 * Return value:
7018 * 0 on success / non-zero for error
7019 **/
7020static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7021{
7022 struct pci_dev *pdev = ioa_cfg->pdev;
7023 int i, rc = -ENOMEM;
7024
7025 ENTER;
0bc42e35 7026 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
7027 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7028
7029 if (!ioa_cfg->res_entries)
7030 goto out;
7031
1da177e4
LT
7032 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7033 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7034
7035 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7036 sizeof(struct ipr_misc_cbs),
7037 &ioa_cfg->vpd_cbs_dma);
7038
7039 if (!ioa_cfg->vpd_cbs)
7040 goto out_free_res_entries;
7041
7042 if (ipr_alloc_cmd_blks(ioa_cfg))
7043 goto out_free_vpd_cbs;
7044
7045 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7046 sizeof(u32) * IPR_NUM_CMD_BLKS,
7047 &ioa_cfg->host_rrq_dma);
7048
7049 if (!ioa_cfg->host_rrq)
7050 goto out_ipr_free_cmd_blocks;
7051
7052 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7053 sizeof(struct ipr_config_table),
7054 &ioa_cfg->cfg_table_dma);
7055
7056 if (!ioa_cfg->cfg_table)
7057 goto out_free_host_rrq;
7058
7059 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7060 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7061 sizeof(struct ipr_hostrcb),
7062 &ioa_cfg->hostrcb_dma[i]);
7063
7064 if (!ioa_cfg->hostrcb[i])
7065 goto out_free_hostrcb_dma;
7066
7067 ioa_cfg->hostrcb[i]->hostrcb_dma =
7068 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 7069 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
7070 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7071 }
7072
0bc42e35 7073 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
7074 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7075
7076 if (!ioa_cfg->trace)
7077 goto out_free_hostrcb_dma;
7078
1da177e4
LT
7079 rc = 0;
7080out:
7081 LEAVE;
7082 return rc;
7083
7084out_free_hostrcb_dma:
7085 while (i-- > 0) {
7086 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7087 ioa_cfg->hostrcb[i],
7088 ioa_cfg->hostrcb_dma[i]);
7089 }
7090 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7091 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7092out_free_host_rrq:
7093 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7094 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7095out_ipr_free_cmd_blocks:
7096 ipr_free_cmd_blks(ioa_cfg);
7097out_free_vpd_cbs:
7098 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7099 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7100out_free_res_entries:
7101 kfree(ioa_cfg->res_entries);
7102 goto out;
7103}
7104
7105/**
7106 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7107 * @ioa_cfg: ioa config struct
7108 *
7109 * Return value:
7110 * none
7111 **/
7112static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7113{
7114 int i;
7115
7116 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7117 ioa_cfg->bus_attr[i].bus = i;
7118 ioa_cfg->bus_attr[i].qas_enabled = 0;
7119 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7120 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7121 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7122 else
7123 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7124 }
7125}
7126
7127/**
7128 * ipr_init_ioa_cfg - Initialize IOA config struct
7129 * @ioa_cfg: ioa config struct
7130 * @host: scsi host struct
7131 * @pdev: PCI dev struct
7132 *
7133 * Return value:
7134 * none
7135 **/
7136static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7137 struct Scsi_Host *host, struct pci_dev *pdev)
7138{
7139 const struct ipr_interrupt_offsets *p;
7140 struct ipr_interrupts *t;
7141 void __iomem *base;
7142
7143 ioa_cfg->host = host;
7144 ioa_cfg->pdev = pdev;
7145 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 7146 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
7147 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7148 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7149 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7150 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7151 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7152 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7153 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7154 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7155
7156 INIT_LIST_HEAD(&ioa_cfg->free_q);
7157 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7158 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7159 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7160 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7161 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 7162 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4
LT
7163 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7164 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
7165 if (ipr_enable_cache)
7166 ioa_cfg->cache_state = CACHE_ENABLED;
7167 else
7168 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
7169
7170 ipr_initialize_bus_attr(ioa_cfg);
7171
7172 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7173 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7174 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7175 host->unique_id = host->host_no;
7176 host->max_cmd_len = IPR_MAX_CDB_LEN;
7177 pci_set_drvdata(pdev, ioa_cfg);
7178
7179 p = &ioa_cfg->chip_cfg->regs;
7180 t = &ioa_cfg->regs;
7181 base = ioa_cfg->hdw_dma_regs;
7182
7183 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7184 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7185 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7186 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7187 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7188 t->ioarrin_reg = base + p->ioarrin_reg;
7189 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7190 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7191 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7192}
7193
7194/**
7195 * ipr_get_chip_cfg - Find adapter chip configuration
7196 * @dev_id: PCI device id struct
7197 *
7198 * Return value:
7199 * ptr to chip config on success / NULL on failure
7200 **/
7201static const struct ipr_chip_cfg_t * __devinit
7202ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7203{
7204 int i;
7205
1da177e4
LT
7206 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7207 if (ipr_chip[i].vendor == dev_id->vendor &&
7208 ipr_chip[i].device == dev_id->device)
7209 return ipr_chip[i].cfg;
7210 return NULL;
7211}
7212
7213/**
7214 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7215 * @pdev: PCI device struct
7216 * @dev_id: PCI device id struct
7217 *
7218 * Return value:
7219 * 0 on success / non-zero on failure
7220 **/
7221static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7222 const struct pci_device_id *dev_id)
7223{
7224 struct ipr_ioa_cfg *ioa_cfg;
7225 struct Scsi_Host *host;
7226 unsigned long ipr_regs_pci;
7227 void __iomem *ipr_regs;
a2a65a3e 7228 int rc = PCIBIOS_SUCCESSFUL;
ce155cce 7229 volatile u32 mask, uproc;
1da177e4
LT
7230
7231 ENTER;
7232
7233 if ((rc = pci_enable_device(pdev))) {
7234 dev_err(&pdev->dev, "Cannot enable adapter\n");
7235 goto out;
7236 }
7237
7238 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7239
7240 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7241
7242 if (!host) {
7243 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7244 rc = -ENOMEM;
7245 goto out_disable;
7246 }
7247
7248 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7249 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
7250 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7251 sata_port_info.flags, &ipr_sata_ops);
1da177e4
LT
7252
7253 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7254
7255 if (!ioa_cfg->chip_cfg) {
7256 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7257 dev_id->vendor, dev_id->device);
7258 goto out_scsi_host_put;
7259 }
7260
5469cb5b
BK
7261 if (ipr_transop_timeout)
7262 ioa_cfg->transop_timeout = ipr_transop_timeout;
7263 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7264 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7265 else
7266 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7267
1da177e4
LT
7268 ipr_regs_pci = pci_resource_start(pdev, 0);
7269
7270 rc = pci_request_regions(pdev, IPR_NAME);
7271 if (rc < 0) {
7272 dev_err(&pdev->dev,
7273 "Couldn't register memory range of registers\n");
7274 goto out_scsi_host_put;
7275 }
7276
7277 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7278
7279 if (!ipr_regs) {
7280 dev_err(&pdev->dev,
7281 "Couldn't map memory range of registers\n");
7282 rc = -ENOMEM;
7283 goto out_release_regions;
7284 }
7285
7286 ioa_cfg->hdw_dma_regs = ipr_regs;
7287 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7288 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7289
7290 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7291
7292 pci_set_master(pdev);
7293
7294 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7295 if (rc < 0) {
7296 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7297 goto cleanup_nomem;
7298 }
7299
7300 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7301 ioa_cfg->chip_cfg->cache_line_size);
7302
7303 if (rc != PCIBIOS_SUCCESSFUL) {
7304 dev_err(&pdev->dev, "Write of cache line size failed\n");
7305 rc = -EIO;
7306 goto cleanup_nomem;
7307 }
7308
7309 /* Save away PCI config space for use following IOA reset */
7310 rc = pci_save_state(pdev);
7311
7312 if (rc != PCIBIOS_SUCCESSFUL) {
7313 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7314 rc = -EIO;
7315 goto cleanup_nomem;
7316 }
7317
7318 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7319 goto cleanup_nomem;
7320
7321 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7322 goto cleanup_nomem;
7323
7324 rc = ipr_alloc_mem(ioa_cfg);
7325 if (rc < 0) {
7326 dev_err(&pdev->dev,
7327 "Couldn't allocate enough memory for device driver!\n");
7328 goto cleanup_nomem;
7329 }
7330
ce155cce
BK
7331 /*
7332 * If HRRQ updated interrupt is not masked, or reset alert is set,
7333 * the card is in an unknown state and needs a hard reset
7334 */
7335 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7336 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7337 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7338 ioa_cfg->needs_hard_reset = 1;
7339
1da177e4 7340 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 7341 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
7342
7343 if (rc) {
7344 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7345 pdev->irq, rc);
7346 goto cleanup_nolog;
7347 }
7348
7349 spin_lock(&ipr_driver_lock);
7350 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7351 spin_unlock(&ipr_driver_lock);
7352
7353 LEAVE;
7354out:
7355 return rc;
7356
7357cleanup_nolog:
7358 ipr_free_mem(ioa_cfg);
7359cleanup_nomem:
7360 iounmap(ipr_regs);
7361out_release_regions:
7362 pci_release_regions(pdev);
7363out_scsi_host_put:
7364 scsi_host_put(host);
7365out_disable:
7366 pci_disable_device(pdev);
7367 goto out;
7368}
7369
7370/**
7371 * ipr_scan_vsets - Scans for VSET devices
7372 * @ioa_cfg: ioa config struct
7373 *
7374 * Description: Since the VSET resources do not follow SAM in that we can have
7375 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7376 *
7377 * Return value:
7378 * none
7379 **/
7380static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7381{
7382 int target, lun;
7383
7384 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7385 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7386 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7387}
7388
7389/**
7390 * ipr_initiate_ioa_bringdown - Bring down an adapter
7391 * @ioa_cfg: ioa config struct
7392 * @shutdown_type: shutdown type
7393 *
7394 * Description: This function will initiate bringing down the adapter.
7395 * This consists of issuing an IOA shutdown to the adapter
7396 * to flush the cache, and running BIST.
7397 * If the caller needs to wait on the completion of the reset,
7398 * the caller must sleep on the reset_wait_q.
7399 *
7400 * Return value:
7401 * none
7402 **/
7403static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7404 enum ipr_shutdown_type shutdown_type)
7405{
7406 ENTER;
7407 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7408 ioa_cfg->sdt_state = ABORT_DUMP;
7409 ioa_cfg->reset_retries = 0;
7410 ioa_cfg->in_ioa_bringdown = 1;
7411 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7412 LEAVE;
7413}
7414
7415/**
7416 * __ipr_remove - Remove a single adapter
7417 * @pdev: pci device struct
7418 *
7419 * Adapter hot plug remove entry point.
7420 *
7421 * Return value:
7422 * none
7423 **/
7424static void __ipr_remove(struct pci_dev *pdev)
7425{
7426 unsigned long host_lock_flags = 0;
7427 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7428 ENTER;
7429
7430 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7431 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7432
7433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7434 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 7435 flush_scheduled_work();
1da177e4
LT
7436 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7437
7438 spin_lock(&ipr_driver_lock);
7439 list_del(&ioa_cfg->queue);
7440 spin_unlock(&ipr_driver_lock);
7441
7442 if (ioa_cfg->sdt_state == ABORT_DUMP)
7443 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7445
7446 ipr_free_all_resources(ioa_cfg);
7447
7448 LEAVE;
7449}
7450
7451/**
7452 * ipr_remove - IOA hot plug remove entry point
7453 * @pdev: pci device struct
7454 *
7455 * Adapter hot plug remove entry point.
7456 *
7457 * Return value:
7458 * none
7459 **/
7460static void ipr_remove(struct pci_dev *pdev)
7461{
7462 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7463
7464 ENTER;
7465
1da177e4
LT
7466 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7467 &ipr_trace_attr);
7468 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7469 &ipr_dump_attr);
7470 scsi_remove_host(ioa_cfg->host);
7471
7472 __ipr_remove(pdev);
7473
7474 LEAVE;
7475}
7476
7477/**
7478 * ipr_probe - Adapter hot plug add entry point
7479 *
7480 * Return value:
7481 * 0 on success / non-zero on failure
7482 **/
7483static int __devinit ipr_probe(struct pci_dev *pdev,
7484 const struct pci_device_id *dev_id)
7485{
7486 struct ipr_ioa_cfg *ioa_cfg;
7487 int rc;
7488
7489 rc = ipr_probe_ioa(pdev, dev_id);
7490
7491 if (rc)
7492 return rc;
7493
7494 ioa_cfg = pci_get_drvdata(pdev);
7495 rc = ipr_probe_ioa_part2(ioa_cfg);
7496
7497 if (rc) {
7498 __ipr_remove(pdev);
7499 return rc;
7500 }
7501
7502 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7503
7504 if (rc) {
7505 __ipr_remove(pdev);
7506 return rc;
7507 }
7508
7509 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7510 &ipr_trace_attr);
7511
7512 if (rc) {
7513 scsi_remove_host(ioa_cfg->host);
7514 __ipr_remove(pdev);
7515 return rc;
7516 }
7517
7518 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7519 &ipr_dump_attr);
7520
7521 if (rc) {
7522 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7523 &ipr_trace_attr);
7524 scsi_remove_host(ioa_cfg->host);
7525 __ipr_remove(pdev);
7526 return rc;
7527 }
7528
7529 scsi_scan_host(ioa_cfg->host);
7530 ipr_scan_vsets(ioa_cfg);
7531 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7532 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 7533 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
7534 schedule_work(&ioa_cfg->work_q);
7535 return 0;
7536}
7537
7538/**
7539 * ipr_shutdown - Shutdown handler.
d18c3db5 7540 * @pdev: pci device struct
1da177e4
LT
7541 *
7542 * This function is invoked upon system shutdown/reboot. It will issue
7543 * an adapter shutdown to the adapter to flush the write cache.
7544 *
7545 * Return value:
7546 * none
7547 **/
d18c3db5 7548static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 7549{
d18c3db5 7550 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
7551 unsigned long lock_flags = 0;
7552
7553 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7554 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7556 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7557}
7558
7559static struct pci_device_id ipr_pci_table[] __devinitdata = {
7560 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 7562 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7563 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 7564 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 7566 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7567 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 7568 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7569 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 7570 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7571 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 7572 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7573 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 7574 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
7575 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7576 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7577 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 7578 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7579 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 7580 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
60e7486b 7581 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
7582 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7583 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7584 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7586 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
60e7486b 7588 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7590 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7591 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
7593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7595 IPR_USE_LONG_TRANSOP_TIMEOUT },
7596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 7598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b
BK
7599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7600 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4 7601 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 7604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 7605 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7606 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7607 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 7608 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7609 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7610 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7611 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7612 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7613 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4
LT
7614 { }
7615};
7616MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7617
f8a88b19
LV
7618static struct pci_error_handlers ipr_err_handler = {
7619 .error_detected = ipr_pci_error_detected,
7620 .slot_reset = ipr_pci_slot_reset,
7621};
7622
1da177e4
LT
7623static struct pci_driver ipr_driver = {
7624 .name = IPR_NAME,
7625 .id_table = ipr_pci_table,
7626 .probe = ipr_probe,
7627 .remove = ipr_remove,
d18c3db5 7628 .shutdown = ipr_shutdown,
f8a88b19 7629 .err_handler = &ipr_err_handler,
1da177e4
LT
7630};
7631
7632/**
7633 * ipr_init - Module entry point
7634 *
7635 * Return value:
7636 * 0 on success / negative value on failure
7637 **/
7638static int __init ipr_init(void)
7639{
7640 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7641 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7642
dcbccbde 7643 return pci_register_driver(&ipr_driver);
1da177e4
LT
7644}
7645
7646/**
7647 * ipr_exit - Module unload
7648 *
7649 * Module unload entry point.
7650 *
7651 * Return value:
7652 * none
7653 **/
7654static void __exit ipr_exit(void)
7655{
7656 pci_unregister_driver(&ipr_driver);
7657}
7658
7659module_init(ipr_init);
7660module_exit(ipr_exit);