]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Remove debug trace points from dump code
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
1da177e4
LT
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
1da177e4
LT
82#include "ipr.h"
83
84/*
85 * Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 93static unsigned int ipr_enable_cache = 1;
d3c74871 94static unsigned int ipr_debug = 0;
32d29776 95static int ipr_auto_create = 1;
1da177e4
LT
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
103 {
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
113 }
114 },
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
118 {
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
128 }
129 },
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436
BK
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
60e7486b 137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
1da177e4
LT
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871
BK
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
32d29776
BK
162module_param_named(auto_create, ipr_auto_create, int, 0);
163MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
1da177e4
LT
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
1da177e4
LT
167/* A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
170 {0x00000000, 1, 1,
171 "8155: An unknown error was received"},
172 {0x00330000, 0, 0,
173 "Soft underlength error"},
174 {0x005A0000, 0, 0,
175 "Command to be cancelled not found"},
176 {0x00808000, 0, 0,
177 "Qualified success"},
178 {0x01080000, 1, 1,
179 "FFFE: Soft device bus error recovered by the IOA"},
896bbd21
BK
180 {0x01088100, 0, 1,
181 "4101: Soft device bus fabric error"},
1da177e4
LT
182 {0x01170600, 0, 1,
183 "FFF9: Device sector reassign successful"},
184 {0x01170900, 0, 1,
185 "FFF7: Media error recovered by device rewrite procedures"},
186 {0x01180200, 0, 1,
187 "7001: IOA sector reassignment successful"},
188 {0x01180500, 0, 1,
189 "FFF9: Soft media error. Sector reassignment recommended"},
190 {0x01180600, 0, 1,
191 "FFF7: Media error recovered by IOA rewrite procedures"},
192 {0x01418000, 0, 1,
193 "FF3D: Soft PCI bus error recovered by the IOA"},
194 {0x01440000, 1, 1,
195 "FFF6: Device hardware error recovered by the IOA"},
196 {0x01448100, 0, 1,
197 "FFF6: Device hardware error recovered by the device"},
198 {0x01448200, 1, 1,
199 "FF3D: Soft IOA error recovered by the IOA"},
200 {0x01448300, 0, 1,
201 "FFFA: Undefined device response recovered by the IOA"},
202 {0x014A0000, 1, 1,
203 "FFF6: Device bus error, message or command phase"},
35a39691
BK
204 {0x014A8000, 0, 1,
205 "FFFE: Task Management Function failed"},
1da177e4
LT
206 {0x015D0000, 0, 1,
207 "FFF6: Failure prediction threshold exceeded"},
208 {0x015D9200, 0, 1,
209 "8009: Impending cache battery pack failure"},
210 {0x02040400, 0, 0,
211 "34FF: Disk device format in progress"},
212 {0x023F0000, 0, 0,
213 "Synchronization required"},
214 {0x024E0000, 0, 0,
215 "No ready, IOA shutdown"},
216 {0x025A0000, 0, 0,
217 "Not ready, IOA has been shutdown"},
218 {0x02670100, 0, 1,
219 "3020: Storage subsystem configuration error"},
220 {0x03110B00, 0, 0,
221 "FFF5: Medium error, data unreadable, recommend reassign"},
222 {0x03110C00, 0, 0,
223 "7000: Medium error, data unreadable, do not reassign"},
224 {0x03310000, 0, 1,
225 "FFF3: Disk media format bad"},
226 {0x04050000, 0, 1,
227 "3002: Addressed device failed to respond to selection"},
228 {0x04080000, 1, 1,
229 "3100: Device bus error"},
230 {0x04080100, 0, 1,
231 "3109: IOA timed out a device command"},
232 {0x04088000, 0, 0,
233 "3120: SCSI bus is not operational"},
896bbd21
BK
234 {0x04088100, 0, 1,
235 "4100: Hard device bus fabric error"},
1da177e4
LT
236 {0x04118000, 0, 1,
237 "9000: IOA reserved area data check"},
238 {0x04118100, 0, 1,
239 "9001: IOA reserved area invalid data pattern"},
240 {0x04118200, 0, 1,
241 "9002: IOA reserved area LRC error"},
242 {0x04320000, 0, 1,
243 "102E: Out of alternate sectors for disk storage"},
244 {0x04330000, 1, 1,
245 "FFF4: Data transfer underlength error"},
246 {0x04338000, 1, 1,
247 "FFF4: Data transfer overlength error"},
248 {0x043E0100, 0, 1,
249 "3400: Logical unit failure"},
250 {0x04408500, 0, 1,
251 "FFF4: Device microcode is corrupt"},
252 {0x04418000, 1, 1,
253 "8150: PCI bus error"},
254 {0x04430000, 1, 0,
255 "Unsupported device bus message received"},
256 {0x04440000, 1, 1,
257 "FFF4: Disk device problem"},
258 {0x04448200, 1, 1,
259 "8150: Permanent IOA failure"},
260 {0x04448300, 0, 1,
261 "3010: Disk device returned wrong response to IOA"},
262 {0x04448400, 0, 1,
263 "8151: IOA microcode error"},
264 {0x04448500, 0, 0,
265 "Device bus status error"},
266 {0x04448600, 0, 1,
267 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
268 {0x04448700, 0, 0,
269 "ATA device status error"},
1da177e4
LT
270 {0x04490000, 0, 0,
271 "Message reject received from the device"},
272 {0x04449200, 0, 1,
273 "8008: A permanent cache battery pack failure occurred"},
274 {0x0444A000, 0, 1,
275 "9090: Disk unit has been modified after the last known status"},
276 {0x0444A200, 0, 1,
277 "9081: IOA detected device error"},
278 {0x0444A300, 0, 1,
279 "9082: IOA detected device error"},
280 {0x044A0000, 1, 1,
281 "3110: Device bus error, message or command phase"},
35a39691
BK
282 {0x044A8000, 1, 1,
283 "3110: SAS Command / Task Management Function failed"},
1da177e4
LT
284 {0x04670400, 0, 1,
285 "9091: Incorrect hardware configuration change has been detected"},
b0df54bb
BK
286 {0x04678000, 0, 1,
287 "9073: Invalid multi-adapter configuration"},
896bbd21
BK
288 {0x04678100, 0, 1,
289 "4010: Incorrect connection between cascaded expanders"},
290 {0x04678200, 0, 1,
291 "4020: Connections exceed IOA design limits"},
292 {0x04678300, 0, 1,
293 "4030: Incorrect multipath connection"},
294 {0x04679000, 0, 1,
295 "4110: Unsupported enclosure function"},
1da177e4
LT
296 {0x046E0000, 0, 1,
297 "FFF4: Command to logical unit failed"},
298 {0x05240000, 1, 0,
299 "Illegal request, invalid request type or request packet"},
300 {0x05250000, 0, 0,
301 "Illegal request, invalid resource handle"},
b0df54bb
BK
302 {0x05258000, 0, 0,
303 "Illegal request, commands not allowed to this device"},
304 {0x05258100, 0, 0,
305 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
306 {0x05260000, 0, 0,
307 "Illegal request, invalid field in parameter list"},
308 {0x05260100, 0, 0,
309 "Illegal request, parameter not supported"},
310 {0x05260200, 0, 0,
311 "Illegal request, parameter value invalid"},
312 {0x052C0000, 0, 0,
313 "Illegal request, command sequence error"},
b0df54bb
BK
314 {0x052C8000, 1, 0,
315 "Illegal request, dual adapter support not enabled"},
1da177e4
LT
316 {0x06040500, 0, 1,
317 "9031: Array protection temporarily suspended, protection resuming"},
318 {0x06040600, 0, 1,
319 "9040: Array protection temporarily suspended, protection resuming"},
896bbd21
BK
320 {0x06288000, 0, 1,
321 "3140: Device bus not ready to ready transition"},
1da177e4
LT
322 {0x06290000, 0, 1,
323 "FFFB: SCSI bus was reset"},
324 {0x06290500, 0, 0,
325 "FFFE: SCSI bus transition to single ended"},
326 {0x06290600, 0, 0,
327 "FFFE: SCSI bus transition to LVD"},
328 {0x06298000, 0, 1,
329 "FFFB: SCSI bus was reset by another initiator"},
330 {0x063F0300, 0, 1,
331 "3029: A device replacement has occurred"},
332 {0x064C8000, 0, 1,
333 "9051: IOA cache data exists for a missing or failed device"},
b0df54bb
BK
334 {0x064C8100, 0, 1,
335 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
1da177e4
LT
336 {0x06670100, 0, 1,
337 "9025: Disk unit is not supported at its physical location"},
338 {0x06670600, 0, 1,
339 "3020: IOA detected a SCSI bus configuration error"},
340 {0x06678000, 0, 1,
341 "3150: SCSI bus configuration error"},
b0df54bb
BK
342 {0x06678100, 0, 1,
343 "9074: Asymmetric advanced function disk configuration"},
896bbd21
BK
344 {0x06678300, 0, 1,
345 "4040: Incomplete multipath connection between IOA and enclosure"},
346 {0x06678400, 0, 1,
347 "4041: Incomplete multipath connection between enclosure and device"},
348 {0x06678500, 0, 1,
349 "9075: Incomplete multipath connection between IOA and remote IOA"},
350 {0x06678600, 0, 1,
351 "9076: Configuration error, missing remote IOA"},
352 {0x06679100, 0, 1,
353 "4050: Enclosure does not support a required multipath function"},
1da177e4
LT
354 {0x06690200, 0, 1,
355 "9041: Array protection temporarily suspended"},
356 {0x06698200, 0, 1,
357 "9042: Corrupt array parity detected on specified device"},
358 {0x066B0200, 0, 1,
359 "9030: Array no longer protected due to missing or failed disk unit"},
b0df54bb
BK
360 {0x066B8000, 0, 1,
361 "9071: Link operational transition"},
362 {0x066B8100, 0, 1,
363 "9072: Link not operational transition"},
1da177e4
LT
364 {0x066B8200, 0, 1,
365 "9032: Array exposed but still protected"},
896bbd21
BK
366 {0x066B9100, 0, 1,
367 "4061: Multipath redundancy level got better"},
368 {0x066B9200, 0, 1,
369 "4060: Multipath redundancy level got worse"},
1da177e4
LT
370 {0x07270000, 0, 0,
371 "Failure due to other device"},
372 {0x07278000, 0, 1,
373 "9008: IOA does not support functions expected by devices"},
374 {0x07278100, 0, 1,
375 "9010: Cache data associated with attached devices cannot be found"},
376 {0x07278200, 0, 1,
377 "9011: Cache data belongs to devices other than those attached"},
378 {0x07278400, 0, 1,
379 "9020: Array missing 2 or more devices with only 1 device present"},
380 {0x07278500, 0, 1,
381 "9021: Array missing 2 or more devices with 2 or more devices present"},
382 {0x07278600, 0, 1,
383 "9022: Exposed array is missing a required device"},
384 {0x07278700, 0, 1,
385 "9023: Array member(s) not at required physical locations"},
386 {0x07278800, 0, 1,
387 "9024: Array not functional due to present hardware configuration"},
388 {0x07278900, 0, 1,
389 "9026: Array not functional due to present hardware configuration"},
390 {0x07278A00, 0, 1,
391 "9027: Array is missing a device and parity is out of sync"},
392 {0x07278B00, 0, 1,
393 "9028: Maximum number of arrays already exist"},
394 {0x07278C00, 0, 1,
395 "9050: Required cache data cannot be located for a disk unit"},
396 {0x07278D00, 0, 1,
397 "9052: Cache data exists for a device that has been modified"},
398 {0x07278F00, 0, 1,
399 "9054: IOA resources not available due to previous problems"},
400 {0x07279100, 0, 1,
401 "9092: Disk unit requires initialization before use"},
402 {0x07279200, 0, 1,
403 "9029: Incorrect hardware configuration change has been detected"},
404 {0x07279600, 0, 1,
405 "9060: One or more disk pairs are missing from an array"},
406 {0x07279700, 0, 1,
407 "9061: One or more disks are missing from an array"},
408 {0x07279800, 0, 1,
409 "9062: One or more disks are missing from an array"},
410 {0x07279900, 0, 1,
411 "9063: Maximum number of functional arrays has been exceeded"},
412 {0x0B260000, 0, 0,
413 "Aborted command, invalid descriptor"},
414 {0x0B5A0000, 0, 0,
415 "Command terminated by host"}
416};
417
418static const struct ipr_ses_table_entry ipr_ses_table[] = {
419 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
421 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
422 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
423 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
424 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
425 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
427 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
430 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
431 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
432};
433
434/*
435 * Function Prototypes
436 */
437static int ipr_reset_alert(struct ipr_cmnd *);
438static void ipr_process_ccn(struct ipr_cmnd *);
439static void ipr_process_error(struct ipr_cmnd *);
440static void ipr_reset_ioa_job(struct ipr_cmnd *);
441static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
442 enum ipr_shutdown_type);
443
444#ifdef CONFIG_SCSI_IPR_TRACE
445/**
446 * ipr_trc_hook - Add a trace entry to the driver trace
447 * @ipr_cmd: ipr command struct
448 * @type: trace type
449 * @add_data: additional data
450 *
451 * Return value:
452 * none
453 **/
454static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
455 u8 type, u32 add_data)
456{
457 struct ipr_trace_entry *trace_entry;
458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459
460 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
461 trace_entry->time = jiffies;
462 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
463 trace_entry->type = type;
35a39691
BK
464 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
465 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
466 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
467 trace_entry->u.add_data = add_data;
468}
469#else
470#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
471#endif
472
473/**
474 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
475 * @ipr_cmd: ipr command struct
476 *
477 * Return value:
478 * none
479 **/
480static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481{
482 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
483 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
484
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0;
490 ioasa->ioasc = 0;
491 ioasa->residual_data_len = 0;
35a39691 492 ioasa->u.gata.status = 0;
1da177e4
LT
493
494 ipr_cmd->scsi_cmd = NULL;
35a39691 495 ipr_cmd->qc = NULL;
1da177e4
LT
496 ipr_cmd->sense_buffer[0] = 0;
497 ipr_cmd->dma_use_sg = 0;
498}
499
500/**
501 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
502 * @ipr_cmd: ipr command struct
503 *
504 * Return value:
505 * none
506 **/
507static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
508{
509 ipr_reinit_ipr_cmnd(ipr_cmd);
510 ipr_cmd->u.scratch = 0;
511 ipr_cmd->sibling = NULL;
512 init_timer(&ipr_cmd->timer);
513}
514
515/**
516 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
517 * @ioa_cfg: ioa config struct
518 *
519 * Return value:
520 * pointer to ipr command struct
521 **/
522static
523struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
524{
525 struct ipr_cmnd *ipr_cmd;
526
527 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
528 list_del(&ipr_cmd->queue);
529 ipr_init_ipr_cmnd(ipr_cmd);
530
531 return ipr_cmd;
532}
533
534/**
535 * ipr_unmap_sglist - Unmap scatterlist if mapped
536 * @ioa_cfg: ioa config struct
537 * @ipr_cmd: ipr command struct
538 *
539 * Return value:
540 * nothing
541 **/
542static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
543 struct ipr_cmnd *ipr_cmd)
544{
545 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
546
547 if (ipr_cmd->dma_use_sg) {
548 if (scsi_cmd->use_sg > 0) {
549 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
550 scsi_cmd->use_sg,
551 scsi_cmd->sc_data_direction);
552 } else {
553 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
554 scsi_cmd->request_bufflen,
555 scsi_cmd->sc_data_direction);
556 }
557 }
558}
559
560/**
561 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
562 * @ioa_cfg: ioa config struct
563 * @clr_ints: interrupts to clear
564 *
565 * This function masks all interrupts on the adapter, then clears the
566 * interrupts specified in the mask
567 *
568 * Return value:
569 * none
570 **/
571static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
572 u32 clr_ints)
573{
574 volatile u32 int_reg;
575
576 /* Stop new interrupts */
577 ioa_cfg->allow_interrupts = 0;
578
579 /* Set interrupt mask to stop all new interrupts */
580 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
581
582 /* Clear any pending interrupts */
583 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
584 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
585}
586
587/**
588 * ipr_save_pcix_cmd_reg - Save PCI-X command register
589 * @ioa_cfg: ioa config struct
590 *
591 * Return value:
592 * 0 on success / -EIO on failure
593 **/
594static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
595{
596 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
597
598 if (pcix_cmd_reg == 0) {
599 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
600 return -EIO;
601 }
602
603 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
604 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
605 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
606 return -EIO;
607 }
608
609 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
610 return 0;
611}
612
613/**
614 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
615 * @ioa_cfg: ioa config struct
616 *
617 * Return value:
618 * 0 on success / -EIO on failure
619 **/
620static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
621{
622 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
623
624 if (pcix_cmd_reg) {
625 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
626 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
627 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
628 return -EIO;
629 }
630 } else {
631 dev_err(&ioa_cfg->pdev->dev,
632 "Failed to setup PCI-X command register\n");
633 return -EIO;
634 }
635
636 return 0;
637}
638
35a39691
BK
639/**
640 * ipr_sata_eh_done - done function for aborted SATA commands
641 * @ipr_cmd: ipr command struct
642 *
643 * This function is invoked for ops generated to SATA
644 * devices which are being aborted.
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
652 struct ata_queued_cmd *qc = ipr_cmd->qc;
653 struct ipr_sata_port *sata_port = qc->ap->private_data;
654
655 qc->err_mask |= AC_ERR_OTHER;
656 sata_port->ioasa.status |= ATA_BUSY;
657 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658 ata_qc_complete(qc);
659}
660
1da177e4
LT
661/**
662 * ipr_scsi_eh_done - mid-layer done function for aborted ops
663 * @ipr_cmd: ipr command struct
664 *
665 * This function is invoked by the interrupt handler for
666 * ops generated by the SCSI mid-layer which are being aborted.
667 *
668 * Return value:
669 * none
670 **/
671static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
672{
673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
674 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
675
676 scsi_cmd->result |= (DID_ERROR << 16);
677
678 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
679 scsi_cmd->scsi_done(scsi_cmd);
680 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
681}
682
683/**
684 * ipr_fail_all_ops - Fails all outstanding ops.
685 * @ioa_cfg: ioa config struct
686 *
687 * This function fails all outstanding ops.
688 *
689 * Return value:
690 * none
691 **/
692static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693{
694 struct ipr_cmnd *ipr_cmd, *temp;
695
696 ENTER;
697 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
698 list_del(&ipr_cmd->queue);
699
700 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
701 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
702
703 if (ipr_cmd->scsi_cmd)
704 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
705 else if (ipr_cmd->qc)
706 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
707
708 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
709 del_timer(&ipr_cmd->timer);
710 ipr_cmd->done(ipr_cmd);
711 }
712
713 LEAVE;
714}
715
716/**
717 * ipr_do_req - Send driver initiated requests.
718 * @ipr_cmd: ipr command struct
719 * @done: done function
720 * @timeout_func: timeout function
721 * @timeout: timeout value
722 *
723 * This function sends the specified command to the adapter with the
724 * timeout given. The done function is invoked on command completion.
725 *
726 * Return value:
727 * none
728 **/
729static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
730 void (*done) (struct ipr_cmnd *),
731 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
732{
733 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
734
735 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
736
737 ipr_cmd->done = done;
738
739 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
740 ipr_cmd->timer.expires = jiffies + timeout;
741 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
742
743 add_timer(&ipr_cmd->timer);
744
745 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
746
747 mb();
748 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
749 ioa_cfg->regs.ioarrin_reg);
750}
751
752/**
753 * ipr_internal_cmd_done - Op done function for an internally generated op.
754 * @ipr_cmd: ipr command struct
755 *
756 * This function is the op done function for an internally generated,
757 * blocking op. It simply wakes the sleeping thread.
758 *
759 * Return value:
760 * none
761 **/
762static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
763{
764 if (ipr_cmd->sibling)
765 ipr_cmd->sibling = NULL;
766 else
767 complete(&ipr_cmd->completion);
768}
769
770/**
771 * ipr_send_blocking_cmd - Send command and sleep on its completion.
772 * @ipr_cmd: ipr command struct
773 * @timeout_func: function to invoke if command times out
774 * @timeout: timeout
775 *
776 * Return value:
777 * none
778 **/
779static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
780 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
781 u32 timeout)
782{
783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
784
785 init_completion(&ipr_cmd->completion);
786 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
787
788 spin_unlock_irq(ioa_cfg->host->host_lock);
789 wait_for_completion(&ipr_cmd->completion);
790 spin_lock_irq(ioa_cfg->host->host_lock);
791}
792
793/**
794 * ipr_send_hcam - Send an HCAM to the adapter.
795 * @ioa_cfg: ioa config struct
796 * @type: HCAM type
797 * @hostrcb: hostrcb struct
798 *
799 * This function will send a Host Controlled Async command to the adapter.
800 * If HCAMs are currently not allowed to be issued to the adapter, it will
801 * place the hostrcb on the free queue.
802 *
803 * Return value:
804 * none
805 **/
806static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
807 struct ipr_hostrcb *hostrcb)
808{
809 struct ipr_cmnd *ipr_cmd;
810 struct ipr_ioarcb *ioarcb;
811
812 if (ioa_cfg->allow_cmds) {
813 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
814 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
815 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
816
817 ipr_cmd->u.hostrcb = hostrcb;
818 ioarcb = &ipr_cmd->ioarcb;
819
820 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
821 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
822 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
823 ioarcb->cmd_pkt.cdb[1] = type;
824 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
825 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
826
827 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
828 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
829 ipr_cmd->ioadl[0].flags_and_data_len =
830 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
831 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
832
833 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
834 ipr_cmd->done = ipr_process_ccn;
835 else
836 ipr_cmd->done = ipr_process_error;
837
838 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
839
840 mb();
841 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
842 ioa_cfg->regs.ioarrin_reg);
843 } else {
844 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
845 }
846}
847
848/**
849 * ipr_init_res_entry - Initialize a resource entry struct.
850 * @res: resource entry struct
851 *
852 * Return value:
853 * none
854 **/
855static void ipr_init_res_entry(struct ipr_resource_entry *res)
856{
ee0a90fa 857 res->needs_sync_complete = 0;
1da177e4
LT
858 res->in_erp = 0;
859 res->add_to_ml = 0;
860 res->del_from_ml = 0;
861 res->resetting_device = 0;
862 res->sdev = NULL;
35a39691 863 res->sata_port = NULL;
1da177e4
LT
864}
865
866/**
867 * ipr_handle_config_change - Handle a config change from the adapter
868 * @ioa_cfg: ioa config struct
869 * @hostrcb: hostrcb
870 *
871 * Return value:
872 * none
873 **/
874static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
875 struct ipr_hostrcb *hostrcb)
876{
877 struct ipr_resource_entry *res = NULL;
878 struct ipr_config_table_entry *cfgte;
879 u32 is_ndn = 1;
880
881 cfgte = &hostrcb->hcam.u.ccn.cfgte;
882
883 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
884 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
885 sizeof(cfgte->res_addr))) {
886 is_ndn = 0;
887 break;
888 }
889 }
890
891 if (is_ndn) {
892 if (list_empty(&ioa_cfg->free_res_q)) {
893 ipr_send_hcam(ioa_cfg,
894 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
895 hostrcb);
896 return;
897 }
898
899 res = list_entry(ioa_cfg->free_res_q.next,
900 struct ipr_resource_entry, queue);
901
902 list_del(&res->queue);
903 ipr_init_res_entry(res);
904 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
905 }
906
907 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
908
909 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
910 if (res->sdev) {
1da177e4 911 res->del_from_ml = 1;
1121b794 912 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
913 if (ioa_cfg->allow_ml_add_del)
914 schedule_work(&ioa_cfg->work_q);
915 } else
916 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
917 } else if (!res->sdev) {
918 res->add_to_ml = 1;
919 if (ioa_cfg->allow_ml_add_del)
920 schedule_work(&ioa_cfg->work_q);
921 }
922
923 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
924}
925
926/**
927 * ipr_process_ccn - Op done function for a CCN.
928 * @ipr_cmd: ipr command struct
929 *
930 * This function is the op done function for a configuration
931 * change notification host controlled async from the adapter.
932 *
933 * Return value:
934 * none
935 **/
936static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
937{
938 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
939 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
940 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
941
942 list_del(&hostrcb->queue);
943 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
944
945 if (ioasc) {
946 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
947 dev_err(&ioa_cfg->pdev->dev,
948 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
949
950 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
951 } else {
952 ipr_handle_config_change(ioa_cfg, hostrcb);
953 }
954}
955
956/**
957 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 958 * @vpd: vendor/product id/sn struct
1da177e4
LT
959 *
960 * Return value:
961 * none
962 **/
cfc32139 963static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
964{
965 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
966 + IPR_SERIAL_NUM_LEN];
967
cfc32139
BK
968 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
969 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
970 IPR_PROD_ID_LEN);
971 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
972 ipr_err("Vendor/Product ID: %s\n", buffer);
973
cfc32139 974 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
975 buffer[IPR_SERIAL_NUM_LEN] = '\0';
976 ipr_err(" Serial Number: %s\n", buffer);
977}
978
ee0f05b8
BK
979/**
980 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
981 * @vpd: vendor/product id/sn/wwn struct
982 *
983 * Return value:
984 * none
985 **/
986static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
987{
988 ipr_log_vpd(&vpd->vpd);
989 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
990 be32_to_cpu(vpd->wwid[1]));
991}
992
993/**
994 * ipr_log_enhanced_cache_error - Log a cache error.
995 * @ioa_cfg: ioa config struct
996 * @hostrcb: hostrcb struct
997 *
998 * Return value:
999 * none
1000 **/
1001static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1002 struct ipr_hostrcb *hostrcb)
1003{
1004 struct ipr_hostrcb_type_12_error *error =
1005 &hostrcb->hcam.u.error.u.type_12_error;
1006
1007 ipr_err("-----Current Configuration-----\n");
1008 ipr_err("Cache Directory Card Information:\n");
1009 ipr_log_ext_vpd(&error->ioa_vpd);
1010 ipr_err("Adapter Card Information:\n");
1011 ipr_log_ext_vpd(&error->cfc_vpd);
1012
1013 ipr_err("-----Expected Configuration-----\n");
1014 ipr_err("Cache Directory Card Information:\n");
1015 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1016 ipr_err("Adapter Card Information:\n");
1017 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1018
1019 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1020 be32_to_cpu(error->ioa_data[0]),
1021 be32_to_cpu(error->ioa_data[1]),
1022 be32_to_cpu(error->ioa_data[2]));
1023}
1024
1da177e4
LT
1025/**
1026 * ipr_log_cache_error - Log a cache error.
1027 * @ioa_cfg: ioa config struct
1028 * @hostrcb: hostrcb struct
1029 *
1030 * Return value:
1031 * none
1032 **/
1033static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1034 struct ipr_hostrcb *hostrcb)
1035{
1036 struct ipr_hostrcb_type_02_error *error =
1037 &hostrcb->hcam.u.error.u.type_02_error;
1038
1039 ipr_err("-----Current Configuration-----\n");
1040 ipr_err("Cache Directory Card Information:\n");
cfc32139 1041 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1042 ipr_err("Adapter Card Information:\n");
cfc32139 1043 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1044
1045 ipr_err("-----Expected Configuration-----\n");
1046 ipr_err("Cache Directory Card Information:\n");
cfc32139 1047 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1048 ipr_err("Adapter Card Information:\n");
cfc32139 1049 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1050
1051 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1052 be32_to_cpu(error->ioa_data[0]),
1053 be32_to_cpu(error->ioa_data[1]),
1054 be32_to_cpu(error->ioa_data[2]));
1055}
1056
ee0f05b8
BK
1057/**
1058 * ipr_log_enhanced_config_error - Log a configuration error.
1059 * @ioa_cfg: ioa config struct
1060 * @hostrcb: hostrcb struct
1061 *
1062 * Return value:
1063 * none
1064 **/
1065static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1066 struct ipr_hostrcb *hostrcb)
1067{
1068 int errors_logged, i;
1069 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1070 struct ipr_hostrcb_type_13_error *error;
1071
1072 error = &hostrcb->hcam.u.error.u.type_13_error;
1073 errors_logged = be32_to_cpu(error->errors_logged);
1074
1075 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1076 be32_to_cpu(error->errors_detected), errors_logged);
1077
1078 dev_entry = error->dev;
1079
1080 for (i = 0; i < errors_logged; i++, dev_entry++) {
1081 ipr_err_separator;
1082
1083 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1084 ipr_log_ext_vpd(&dev_entry->vpd);
1085
1086 ipr_err("-----New Device Information-----\n");
1087 ipr_log_ext_vpd(&dev_entry->new_vpd);
1088
1089 ipr_err("Cache Directory Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1091
1092 ipr_err("Adapter Card Information:\n");
1093 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1094 }
1095}
1096
1da177e4
LT
1097/**
1098 * ipr_log_config_error - Log a configuration error.
1099 * @ioa_cfg: ioa config struct
1100 * @hostrcb: hostrcb struct
1101 *
1102 * Return value:
1103 * none
1104 **/
1105static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1106 struct ipr_hostrcb *hostrcb)
1107{
1108 int errors_logged, i;
1109 struct ipr_hostrcb_device_data_entry *dev_entry;
1110 struct ipr_hostrcb_type_03_error *error;
1111
1112 error = &hostrcb->hcam.u.error.u.type_03_error;
1113 errors_logged = be32_to_cpu(error->errors_logged);
1114
1115 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1116 be32_to_cpu(error->errors_detected), errors_logged);
1117
cfc32139 1118 dev_entry = error->dev;
1da177e4
LT
1119
1120 for (i = 0; i < errors_logged; i++, dev_entry++) {
1121 ipr_err_separator;
1122
fa15b1f6 1123 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1124 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1125
1126 ipr_err("-----New Device Information-----\n");
cfc32139 1127 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1128
1129 ipr_err("Cache Directory Card Information:\n");
cfc32139 1130 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1131
1132 ipr_err("Adapter Card Information:\n");
cfc32139 1133 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1134
1135 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1136 be32_to_cpu(dev_entry->ioa_data[0]),
1137 be32_to_cpu(dev_entry->ioa_data[1]),
1138 be32_to_cpu(dev_entry->ioa_data[2]),
1139 be32_to_cpu(dev_entry->ioa_data[3]),
1140 be32_to_cpu(dev_entry->ioa_data[4]));
1141 }
1142}
1143
ee0f05b8
BK
1144/**
1145 * ipr_log_enhanced_array_error - Log an array configuration error.
1146 * @ioa_cfg: ioa config struct
1147 * @hostrcb: hostrcb struct
1148 *
1149 * Return value:
1150 * none
1151 **/
1152static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1153 struct ipr_hostrcb *hostrcb)
1154{
1155 int i, num_entries;
1156 struct ipr_hostrcb_type_14_error *error;
1157 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1158 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1159
1160 error = &hostrcb->hcam.u.error.u.type_14_error;
1161
1162 ipr_err_separator;
1163
1164 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1165 error->protection_level,
1166 ioa_cfg->host->host_no,
1167 error->last_func_vset_res_addr.bus,
1168 error->last_func_vset_res_addr.target,
1169 error->last_func_vset_res_addr.lun);
1170
1171 ipr_err_separator;
1172
1173 array_entry = error->array_member;
1174 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1175 sizeof(error->array_member));
1176
1177 for (i = 0; i < num_entries; i++, array_entry++) {
1178 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1179 continue;
1180
1181 if (be32_to_cpu(error->exposed_mode_adn) == i)
1182 ipr_err("Exposed Array Member %d:\n", i);
1183 else
1184 ipr_err("Array Member %d:\n", i);
1185
1186 ipr_log_ext_vpd(&array_entry->vpd);
1187 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1188 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1189 "Expected Location");
1190
1191 ipr_err_separator;
1192 }
1193}
1194
1da177e4
LT
1195/**
1196 * ipr_log_array_error - Log an array configuration error.
1197 * @ioa_cfg: ioa config struct
1198 * @hostrcb: hostrcb struct
1199 *
1200 * Return value:
1201 * none
1202 **/
1203static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1204 struct ipr_hostrcb *hostrcb)
1205{
1206 int i;
1207 struct ipr_hostrcb_type_04_error *error;
1208 struct ipr_hostrcb_array_data_entry *array_entry;
1209 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1210
1211 error = &hostrcb->hcam.u.error.u.type_04_error;
1212
1213 ipr_err_separator;
1214
1215 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1216 error->protection_level,
1217 ioa_cfg->host->host_no,
1218 error->last_func_vset_res_addr.bus,
1219 error->last_func_vset_res_addr.target,
1220 error->last_func_vset_res_addr.lun);
1221
1222 ipr_err_separator;
1223
1224 array_entry = error->array_member;
1225
1226 for (i = 0; i < 18; i++) {
cfc32139 1227 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1228 continue;
1229
fa15b1f6 1230 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1231 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1232 else
1da177e4 1233 ipr_err("Array Member %d:\n", i);
1da177e4 1234
cfc32139 1235 ipr_log_vpd(&array_entry->vpd);
1da177e4 1236
fa15b1f6
BK
1237 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1238 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1239 "Expected Location");
1da177e4
LT
1240
1241 ipr_err_separator;
1242
1243 if (i == 9)
1244 array_entry = error->array_member2;
1245 else
1246 array_entry++;
1247 }
1248}
1249
1250/**
b0df54bb
BK
1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @data: IOA error data
1253 * @len: data length
1da177e4
LT
1254 *
1255 * Return value:
1256 * none
1257 **/
b0df54bb 1258static void ipr_log_hex_data(u32 *data, int len)
1da177e4
LT
1259{
1260 int i;
1da177e4 1261
b0df54bb 1262 if (len == 0)
1da177e4
LT
1263 return;
1264
b0df54bb 1265 for (i = 0; i < len / 4; i += 4) {
1da177e4 1266 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1267 be32_to_cpu(data[i]),
1268 be32_to_cpu(data[i+1]),
1269 be32_to_cpu(data[i+2]),
1270 be32_to_cpu(data[i+3]));
1da177e4
LT
1271 }
1272}
1273
ee0f05b8
BK
1274/**
1275 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1276 * @ioa_cfg: ioa config struct
1277 * @hostrcb: hostrcb struct
1278 *
1279 * Return value:
1280 * none
1281 **/
1282static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1283 struct ipr_hostrcb *hostrcb)
1284{
1285 struct ipr_hostrcb_type_17_error *error;
1286
1287 error = &hostrcb->hcam.u.error.u.type_17_error;
1288 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1289
1290 ipr_err("%s\n", error->failure_reason);
1291 ipr_err("Remote Adapter VPD:\n");
1292 ipr_log_ext_vpd(&error->vpd);
1293 ipr_log_hex_data(error->data,
1294 be32_to_cpu(hostrcb->hcam.length) -
1295 (offsetof(struct ipr_hostrcb_error, u) +
1296 offsetof(struct ipr_hostrcb_type_17_error, data)));
1297}
1298
b0df54bb
BK
1299/**
1300 * ipr_log_dual_ioa_error - Log a dual adapter error.
1301 * @ioa_cfg: ioa config struct
1302 * @hostrcb: hostrcb struct
1303 *
1304 * Return value:
1305 * none
1306 **/
1307static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1308 struct ipr_hostrcb *hostrcb)
1309{
1310 struct ipr_hostrcb_type_07_error *error;
1311
1312 error = &hostrcb->hcam.u.error.u.type_07_error;
1313 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1314
1315 ipr_err("%s\n", error->failure_reason);
1316 ipr_err("Remote Adapter VPD:\n");
1317 ipr_log_vpd(&error->vpd);
1318 ipr_log_hex_data(error->data,
1319 be32_to_cpu(hostrcb->hcam.length) -
1320 (offsetof(struct ipr_hostrcb_error, u) +
1321 offsetof(struct ipr_hostrcb_type_07_error, data)));
1322}
1323
1324/**
1325 * ipr_log_generic_error - Log an adapter error.
1326 * @ioa_cfg: ioa config struct
1327 * @hostrcb: hostrcb struct
1328 *
1329 * Return value:
1330 * none
1331 **/
1332static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1333 struct ipr_hostrcb *hostrcb)
1334{
1335 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1336 be32_to_cpu(hostrcb->hcam.length));
1337}
1338
1da177e4
LT
1339/**
1340 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1341 * @ioasc: IOASC
1342 *
1343 * This function will return the index of into the ipr_error_table
1344 * for the specified IOASC. If the IOASC is not in the table,
1345 * 0 will be returned, which points to the entry used for unknown errors.
1346 *
1347 * Return value:
1348 * index into the ipr_error_table
1349 **/
1350static u32 ipr_get_error(u32 ioasc)
1351{
1352 int i;
1353
1354 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 1355 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
1356 return i;
1357
1358 return 0;
1359}
1360
1361/**
1362 * ipr_handle_log_data - Log an adapter error.
1363 * @ioa_cfg: ioa config struct
1364 * @hostrcb: hostrcb struct
1365 *
1366 * This function logs an adapter error to the system.
1367 *
1368 * Return value:
1369 * none
1370 **/
1371static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1372 struct ipr_hostrcb *hostrcb)
1373{
1374 u32 ioasc;
1375 int error_index;
1376
1377 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1378 return;
1379
1380 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1381 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1382
1383 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1384
1385 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1386 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1387 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1388 scsi_report_bus_reset(ioa_cfg->host,
1389 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1390 }
1391
1392 error_index = ipr_get_error(ioasc);
1393
1394 if (!ipr_error_table[error_index].log_hcam)
1395 return;
1396
1397 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
fb3ed3cb
BK
1398 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1399 "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1400 } else {
1401 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1402 ipr_error_table[error_index].error);
1403 }
1404
1405 /* Set indication we have logged an error */
1406 ioa_cfg->errors_logged++;
1407
1408 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1409 return;
cf852037
BK
1410 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1411 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1412
1413 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1414 case IPR_HOST_RCB_OVERLAY_ID_2:
1415 ipr_log_cache_error(ioa_cfg, hostrcb);
1416 break;
1417 case IPR_HOST_RCB_OVERLAY_ID_3:
1418 ipr_log_config_error(ioa_cfg, hostrcb);
1419 break;
1420 case IPR_HOST_RCB_OVERLAY_ID_4:
1421 case IPR_HOST_RCB_OVERLAY_ID_6:
1422 ipr_log_array_error(ioa_cfg, hostrcb);
1423 break;
b0df54bb
BK
1424 case IPR_HOST_RCB_OVERLAY_ID_7:
1425 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1426 break;
ee0f05b8
BK
1427 case IPR_HOST_RCB_OVERLAY_ID_12:
1428 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1429 break;
1430 case IPR_HOST_RCB_OVERLAY_ID_13:
1431 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1432 break;
1433 case IPR_HOST_RCB_OVERLAY_ID_14:
1434 case IPR_HOST_RCB_OVERLAY_ID_16:
1435 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1436 break;
1437 case IPR_HOST_RCB_OVERLAY_ID_17:
1438 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1439 break;
cf852037 1440 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1441 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1442 default:
a9cfca96 1443 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1444 break;
1445 }
1446}
1447
1448/**
1449 * ipr_process_error - Op done function for an adapter error log.
1450 * @ipr_cmd: ipr command struct
1451 *
1452 * This function is the op done function for an error log host
1453 * controlled async from the adapter. It will log the error and
1454 * send the HCAM back to the adapter.
1455 *
1456 * Return value:
1457 * none
1458 **/
1459static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1460{
1461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1462 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1463 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1464
1465 list_del(&hostrcb->queue);
1466 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1467
1468 if (!ioasc) {
1469 ipr_handle_log_data(ioa_cfg, hostrcb);
1470 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1471 dev_err(&ioa_cfg->pdev->dev,
1472 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1473 }
1474
1475 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1476}
1477
1478/**
1479 * ipr_timeout - An internally generated op has timed out.
1480 * @ipr_cmd: ipr command struct
1481 *
1482 * This function blocks host requests and initiates an
1483 * adapter reset.
1484 *
1485 * Return value:
1486 * none
1487 **/
1488static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1489{
1490 unsigned long lock_flags = 0;
1491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1492
1493 ENTER;
1494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1495
1496 ioa_cfg->errors_logged++;
1497 dev_err(&ioa_cfg->pdev->dev,
1498 "Adapter being reset due to command timeout.\n");
1499
1500 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1501 ioa_cfg->sdt_state = GET_DUMP;
1502
1503 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1504 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1505
1506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1507 LEAVE;
1508}
1509
1510/**
1511 * ipr_oper_timeout - Adapter timed out transitioning to operational
1512 * @ipr_cmd: ipr command struct
1513 *
1514 * This function blocks host requests and initiates an
1515 * adapter reset.
1516 *
1517 * Return value:
1518 * none
1519 **/
1520static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1521{
1522 unsigned long lock_flags = 0;
1523 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1524
1525 ENTER;
1526 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1527
1528 ioa_cfg->errors_logged++;
1529 dev_err(&ioa_cfg->pdev->dev,
1530 "Adapter timed out transitioning to operational.\n");
1531
1532 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1533 ioa_cfg->sdt_state = GET_DUMP;
1534
1535 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1536 if (ipr_fastfail)
1537 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1538 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1539 }
1540
1541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1542 LEAVE;
1543}
1544
1545/**
1546 * ipr_reset_reload - Reset/Reload the IOA
1547 * @ioa_cfg: ioa config struct
1548 * @shutdown_type: shutdown type
1549 *
1550 * This function resets the adapter and re-initializes it.
1551 * This function assumes that all new host commands have been stopped.
1552 * Return value:
1553 * SUCCESS / FAILED
1554 **/
1555static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1556 enum ipr_shutdown_type shutdown_type)
1557{
1558 if (!ioa_cfg->in_reset_reload)
1559 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1560
1561 spin_unlock_irq(ioa_cfg->host->host_lock);
1562 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1563 spin_lock_irq(ioa_cfg->host->host_lock);
1564
1565 /* If we got hit with a host reset while we were already resetting
1566 the adapter for some reason, and the reset failed. */
1567 if (ioa_cfg->ioa_is_dead) {
1568 ipr_trace;
1569 return FAILED;
1570 }
1571
1572 return SUCCESS;
1573}
1574
1575/**
1576 * ipr_find_ses_entry - Find matching SES in SES table
1577 * @res: resource entry struct of SES
1578 *
1579 * Return value:
1580 * pointer to SES table entry / NULL on failure
1581 **/
1582static const struct ipr_ses_table_entry *
1583ipr_find_ses_entry(struct ipr_resource_entry *res)
1584{
1585 int i, j, matches;
1586 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1587
1588 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1589 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1590 if (ste->compare_product_id_byte[j] == 'X') {
1591 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1592 matches++;
1593 else
1594 break;
1595 } else
1596 matches++;
1597 }
1598
1599 if (matches == IPR_PROD_ID_LEN)
1600 return ste;
1601 }
1602
1603 return NULL;
1604}
1605
1606/**
1607 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1608 * @ioa_cfg: ioa config struct
1609 * @bus: SCSI bus
1610 * @bus_width: bus width
1611 *
1612 * Return value:
1613 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1614 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1615 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1616 * max 160MHz = max 320MB/sec).
1617 **/
1618static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1619{
1620 struct ipr_resource_entry *res;
1621 const struct ipr_ses_table_entry *ste;
1622 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1623
1624 /* Loop through each config table entry in the config table buffer */
1625 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1626 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1627 continue;
1628
1629 if (bus != res->cfgte.res_addr.bus)
1630 continue;
1631
1632 if (!(ste = ipr_find_ses_entry(res)))
1633 continue;
1634
1635 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1636 }
1637
1638 return max_xfer_rate;
1639}
1640
1641/**
1642 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1643 * @ioa_cfg: ioa config struct
1644 * @max_delay: max delay in micro-seconds to wait
1645 *
1646 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1647 *
1648 * Return value:
1649 * 0 on success / other on failure
1650 **/
1651static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1652{
1653 volatile u32 pcii_reg;
1654 int delay = 1;
1655
1656 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1657 while (delay < max_delay) {
1658 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1659
1660 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1661 return 0;
1662
1663 /* udelay cannot be used if delay is more than a few milliseconds */
1664 if ((delay / 1000) > MAX_UDELAY_MS)
1665 mdelay(delay / 1000);
1666 else
1667 udelay(delay);
1668
1669 delay += delay;
1670 }
1671 return -EIO;
1672}
1673
1674/**
1675 * ipr_get_ldump_data_section - Dump IOA memory
1676 * @ioa_cfg: ioa config struct
1677 * @start_addr: adapter address to dump
1678 * @dest: destination kernel buffer
1679 * @length_in_words: length to dump in 4 byte words
1680 *
1681 * Return value:
1682 * 0 on success / -EIO on failure
1683 **/
1684static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1685 u32 start_addr,
1686 __be32 *dest, u32 length_in_words)
1687{
1688 volatile u32 temp_pcii_reg;
1689 int i, delay = 0;
1690
1691 /* Write IOA interrupt reg starting LDUMP state */
1692 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1693 ioa_cfg->regs.set_uproc_interrupt_reg);
1694
1695 /* Wait for IO debug acknowledge */
1696 if (ipr_wait_iodbg_ack(ioa_cfg,
1697 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1698 dev_err(&ioa_cfg->pdev->dev,
1699 "IOA dump long data transfer timeout\n");
1700 return -EIO;
1701 }
1702
1703 /* Signal LDUMP interlocked - clear IO debug ack */
1704 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1705 ioa_cfg->regs.clr_interrupt_reg);
1706
1707 /* Write Mailbox with starting address */
1708 writel(start_addr, ioa_cfg->ioa_mailbox);
1709
1710 /* Signal address valid - clear IOA Reset alert */
1711 writel(IPR_UPROCI_RESET_ALERT,
1712 ioa_cfg->regs.clr_uproc_interrupt_reg);
1713
1714 for (i = 0; i < length_in_words; i++) {
1715 /* Wait for IO debug acknowledge */
1716 if (ipr_wait_iodbg_ack(ioa_cfg,
1717 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1718 dev_err(&ioa_cfg->pdev->dev,
1719 "IOA dump short data transfer timeout\n");
1720 return -EIO;
1721 }
1722
1723 /* Read data from mailbox and increment destination pointer */
1724 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1725 dest++;
1726
1727 /* For all but the last word of data, signal data received */
1728 if (i < (length_in_words - 1)) {
1729 /* Signal dump data received - Clear IO debug Ack */
1730 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1731 ioa_cfg->regs.clr_interrupt_reg);
1732 }
1733 }
1734
1735 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1736 writel(IPR_UPROCI_RESET_ALERT,
1737 ioa_cfg->regs.set_uproc_interrupt_reg);
1738
1739 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1740 ioa_cfg->regs.clr_uproc_interrupt_reg);
1741
1742 /* Signal dump data received - Clear IO debug Ack */
1743 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1744 ioa_cfg->regs.clr_interrupt_reg);
1745
1746 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1747 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1748 temp_pcii_reg =
1749 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1750
1751 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1752 return 0;
1753
1754 udelay(10);
1755 delay += 10;
1756 }
1757
1758 return 0;
1759}
1760
1761#ifdef CONFIG_SCSI_IPR_DUMP
1762/**
1763 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1764 * @ioa_cfg: ioa config struct
1765 * @pci_address: adapter address
1766 * @length: length of data to copy
1767 *
1768 * Copy data from PCI adapter to kernel buffer.
1769 * Note: length MUST be a 4 byte multiple
1770 * Return value:
1771 * 0 on success / other on failure
1772 **/
1773static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1774 unsigned long pci_address, u32 length)
1775{
1776 int bytes_copied = 0;
1777 int cur_len, rc, rem_len, rem_page_len;
1778 __be32 *page;
1779 unsigned long lock_flags = 0;
1780 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1781
1782 while (bytes_copied < length &&
1783 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1784 if (ioa_dump->page_offset >= PAGE_SIZE ||
1785 ioa_dump->page_offset == 0) {
1786 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1787
1788 if (!page) {
1789 ipr_trace;
1790 return bytes_copied;
1791 }
1792
1793 ioa_dump->page_offset = 0;
1794 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1795 ioa_dump->next_page_index++;
1796 } else
1797 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1798
1799 rem_len = length - bytes_copied;
1800 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1801 cur_len = min(rem_len, rem_page_len);
1802
1803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1804 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1805 rc = -EIO;
1806 } else {
1807 rc = ipr_get_ldump_data_section(ioa_cfg,
1808 pci_address + bytes_copied,
1809 &page[ioa_dump->page_offset / 4],
1810 (cur_len / sizeof(u32)));
1811 }
1812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1813
1814 if (!rc) {
1815 ioa_dump->page_offset += cur_len;
1816 bytes_copied += cur_len;
1817 } else {
1818 ipr_trace;
1819 break;
1820 }
1821 schedule();
1822 }
1823
1824 return bytes_copied;
1825}
1826
1827/**
1828 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1829 * @hdr: dump entry header struct
1830 *
1831 * Return value:
1832 * nothing
1833 **/
1834static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1835{
1836 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1837 hdr->num_elems = 1;
1838 hdr->offset = sizeof(*hdr);
1839 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1840}
1841
1842/**
1843 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1844 * @ioa_cfg: ioa config struct
1845 * @driver_dump: driver dump struct
1846 *
1847 * Return value:
1848 * nothing
1849 **/
1850static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1851 struct ipr_driver_dump *driver_dump)
1852{
1853 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1854
1855 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1856 driver_dump->ioa_type_entry.hdr.len =
1857 sizeof(struct ipr_dump_ioa_type_entry) -
1858 sizeof(struct ipr_dump_entry_header);
1859 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1860 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1861 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1862 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1863 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1864 ucode_vpd->minor_release[1];
1865 driver_dump->hdr.num_entries++;
1866}
1867
1868/**
1869 * ipr_dump_version_data - Fill in the driver version in the dump.
1870 * @ioa_cfg: ioa config struct
1871 * @driver_dump: driver dump struct
1872 *
1873 * Return value:
1874 * nothing
1875 **/
1876static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_driver_dump *driver_dump)
1878{
1879 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1880 driver_dump->version_entry.hdr.len =
1881 sizeof(struct ipr_dump_version_entry) -
1882 sizeof(struct ipr_dump_entry_header);
1883 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1884 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1885 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1886 driver_dump->hdr.num_entries++;
1887}
1888
1889/**
1890 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1891 * @ioa_cfg: ioa config struct
1892 * @driver_dump: driver dump struct
1893 *
1894 * Return value:
1895 * nothing
1896 **/
1897static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1898 struct ipr_driver_dump *driver_dump)
1899{
1900 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1901 driver_dump->trace_entry.hdr.len =
1902 sizeof(struct ipr_dump_trace_entry) -
1903 sizeof(struct ipr_dump_entry_header);
1904 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1905 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1906 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1907 driver_dump->hdr.num_entries++;
1908}
1909
1910/**
1911 * ipr_dump_location_data - Fill in the IOA location in the dump.
1912 * @ioa_cfg: ioa config struct
1913 * @driver_dump: driver dump struct
1914 *
1915 * Return value:
1916 * nothing
1917 **/
1918static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1919 struct ipr_driver_dump *driver_dump)
1920{
1921 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1922 driver_dump->location_entry.hdr.len =
1923 sizeof(struct ipr_dump_location_entry) -
1924 sizeof(struct ipr_dump_entry_header);
1925 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1926 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1927 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1928 driver_dump->hdr.num_entries++;
1929}
1930
1931/**
1932 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1933 * @ioa_cfg: ioa config struct
1934 * @dump: dump struct
1935 *
1936 * Return value:
1937 * nothing
1938 **/
1939static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1940{
1941 unsigned long start_addr, sdt_word;
1942 unsigned long lock_flags = 0;
1943 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1944 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1945 u32 num_entries, start_off, end_off;
1946 u32 bytes_to_copy, bytes_copied, rc;
1947 struct ipr_sdt *sdt;
1948 int i;
1949
1950 ENTER;
1951
1952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1953
1954 if (ioa_cfg->sdt_state != GET_DUMP) {
1955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1956 return;
1957 }
1958
1959 start_addr = readl(ioa_cfg->ioa_mailbox);
1960
1961 if (!ipr_sdt_is_fmt2(start_addr)) {
1962 dev_err(&ioa_cfg->pdev->dev,
1963 "Invalid dump table format: %lx\n", start_addr);
1964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1965 return;
1966 }
1967
1968 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1969
1970 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1971
1972 /* Initialize the overall dump header */
1973 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1974 driver_dump->hdr.num_entries = 1;
1975 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1976 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1977 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1978 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1979
1980 ipr_dump_version_data(ioa_cfg, driver_dump);
1981 ipr_dump_location_data(ioa_cfg, driver_dump);
1982 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1983 ipr_dump_trace_data(ioa_cfg, driver_dump);
1984
1985 /* Update dump_header */
1986 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1987
1988 /* IOA Dump entry */
1989 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1990 ioa_dump->format = IPR_SDT_FMT2;
1991 ioa_dump->hdr.len = 0;
1992 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1993 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1994
1995 /* First entries in sdt are actually a list of dump addresses and
1996 lengths to gather the real dump data. sdt represents the pointer
1997 to the ioa generated dump table. Dump data will be extracted based
1998 on entries in this table */
1999 sdt = &ioa_dump->sdt;
2000
2001 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2002 sizeof(struct ipr_sdt) / sizeof(__be32));
2003
2004 /* Smart Dump table is ready to use and the first entry is valid */
2005 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2006 dev_err(&ioa_cfg->pdev->dev,
2007 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2008 rc, be32_to_cpu(sdt->hdr.state));
2009 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2010 ioa_cfg->sdt_state = DUMP_OBTAINED;
2011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2012 return;
2013 }
2014
2015 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2016
2017 if (num_entries > IPR_NUM_SDT_ENTRIES)
2018 num_entries = IPR_NUM_SDT_ENTRIES;
2019
2020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2021
2022 for (i = 0; i < num_entries; i++) {
2023 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2024 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2025 break;
2026 }
2027
2028 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2029 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2030 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2031 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2032
2033 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2034 bytes_to_copy = end_off - start_off;
2035 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2036 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2037 continue;
2038 }
2039
2040 /* Copy data from adapter to driver buffers */
2041 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2042 bytes_to_copy);
2043
2044 ioa_dump->hdr.len += bytes_copied;
2045
2046 if (bytes_copied != bytes_to_copy) {
2047 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2048 break;
2049 }
2050 }
2051 }
2052 }
2053
2054 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2055
2056 /* Update dump_header */
2057 driver_dump->hdr.len += ioa_dump->hdr.len;
2058 wmb();
2059 ioa_cfg->sdt_state = DUMP_OBTAINED;
2060 LEAVE;
2061}
2062
2063#else
2064#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2065#endif
2066
2067/**
2068 * ipr_release_dump - Free adapter dump memory
2069 * @kref: kref struct
2070 *
2071 * Return value:
2072 * nothing
2073 **/
2074static void ipr_release_dump(struct kref *kref)
2075{
2076 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2077 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2078 unsigned long lock_flags = 0;
2079 int i;
2080
2081 ENTER;
2082 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2083 ioa_cfg->dump = NULL;
2084 ioa_cfg->sdt_state = INACTIVE;
2085 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2086
2087 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2088 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2089
2090 kfree(dump);
2091 LEAVE;
2092}
2093
2094/**
2095 * ipr_worker_thread - Worker thread
2096 * @data: ioa config struct
2097 *
2098 * Called at task level from a work thread. This function takes care
2099 * of adding and removing device from the mid-layer as configuration
2100 * changes are detected by the adapter.
2101 *
2102 * Return value:
2103 * nothing
2104 **/
2105static void ipr_worker_thread(void *data)
2106{
2107 unsigned long lock_flags;
2108 struct ipr_resource_entry *res;
2109 struct scsi_device *sdev;
2110 struct ipr_dump *dump;
2111 struct ipr_ioa_cfg *ioa_cfg = data;
2112 u8 bus, target, lun;
2113 int did_work;
2114
2115 ENTER;
2116 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2117
2118 if (ioa_cfg->sdt_state == GET_DUMP) {
2119 dump = ioa_cfg->dump;
2120 if (!dump) {
2121 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2122 return;
2123 }
2124 kref_get(&dump->kref);
2125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2126 ipr_get_ioa_dump(ioa_cfg, dump);
2127 kref_put(&dump->kref, ipr_release_dump);
2128
2129 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2130 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2131 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2133 return;
2134 }
2135
2136restart:
2137 do {
2138 did_work = 0;
2139 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2141 return;
2142 }
2143
2144 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2145 if (res->del_from_ml && res->sdev) {
2146 did_work = 1;
2147 sdev = res->sdev;
2148 if (!scsi_device_get(sdev)) {
1da177e4
LT
2149 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2150 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2151 scsi_remove_device(sdev);
2152 scsi_device_put(sdev);
2153 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2154 }
2155 break;
2156 }
2157 }
2158 } while(did_work);
2159
2160 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2161 if (res->add_to_ml) {
2162 bus = res->cfgte.res_addr.bus;
2163 target = res->cfgte.res_addr.target;
2164 lun = res->cfgte.res_addr.lun;
1121b794 2165 res->add_to_ml = 0;
1da177e4
LT
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2167 scsi_add_device(ioa_cfg->host, bus, target, lun);
2168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2169 goto restart;
2170 }
2171 }
2172
2173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
312c004d 2174 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
1da177e4
LT
2175 LEAVE;
2176}
2177
2178#ifdef CONFIG_SCSI_IPR_TRACE
2179/**
2180 * ipr_read_trace - Dump the adapter trace
2181 * @kobj: kobject struct
2182 * @buf: buffer
2183 * @off: offset
2184 * @count: buffer size
2185 *
2186 * Return value:
2187 * number of bytes printed to buffer
2188 **/
2189static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2190 loff_t off, size_t count)
2191{
2192 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2193 struct Scsi_Host *shost = class_to_shost(cdev);
2194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2195 unsigned long lock_flags = 0;
2196 int size = IPR_TRACE_SIZE;
2197 char *src = (char *)ioa_cfg->trace;
2198
2199 if (off > size)
2200 return 0;
2201 if (off + count > size) {
2202 size -= off;
2203 count = size;
2204 }
2205
2206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2207 memcpy(buf, &src[off], count);
2208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2209 return count;
2210}
2211
2212static struct bin_attribute ipr_trace_attr = {
2213 .attr = {
2214 .name = "trace",
2215 .mode = S_IRUGO,
2216 },
2217 .size = 0,
2218 .read = ipr_read_trace,
2219};
2220#endif
2221
62275040
BK
2222static const struct {
2223 enum ipr_cache_state state;
2224 char *name;
2225} cache_state [] = {
2226 { CACHE_NONE, "none" },
2227 { CACHE_DISABLED, "disabled" },
2228 { CACHE_ENABLED, "enabled" }
2229};
2230
2231/**
2232 * ipr_show_write_caching - Show the write caching attribute
2233 * @class_dev: class device struct
2234 * @buf: buffer
2235 *
2236 * Return value:
2237 * number of bytes printed to buffer
2238 **/
2239static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2240{
2241 struct Scsi_Host *shost = class_to_shost(class_dev);
2242 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2243 unsigned long lock_flags = 0;
2244 int i, len = 0;
2245
2246 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2247 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2248 if (cache_state[i].state == ioa_cfg->cache_state) {
2249 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2250 break;
2251 }
2252 }
2253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2254 return len;
2255}
2256
2257
2258/**
2259 * ipr_store_write_caching - Enable/disable adapter write cache
2260 * @class_dev: class_device struct
2261 * @buf: buffer
2262 * @count: buffer size
2263 *
2264 * This function will enable/disable adapter write cache.
2265 *
2266 * Return value:
2267 * count on success / other on failure
2268 **/
2269static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2270 const char *buf, size_t count)
2271{
2272 struct Scsi_Host *shost = class_to_shost(class_dev);
2273 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2274 unsigned long lock_flags = 0;
2275 enum ipr_cache_state new_state = CACHE_INVALID;
2276 int i;
2277
2278 if (!capable(CAP_SYS_ADMIN))
2279 return -EACCES;
2280 if (ioa_cfg->cache_state == CACHE_NONE)
2281 return -EINVAL;
2282
2283 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2284 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2285 new_state = cache_state[i].state;
2286 break;
2287 }
2288 }
2289
2290 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2291 return -EINVAL;
2292
2293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2294 if (ioa_cfg->cache_state == new_state) {
2295 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2296 return count;
2297 }
2298
2299 ioa_cfg->cache_state = new_state;
2300 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2301 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2302 if (!ioa_cfg->in_reset_reload)
2303 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2305 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2306
2307 return count;
2308}
2309
2310static struct class_device_attribute ipr_ioa_cache_attr = {
2311 .attr = {
2312 .name = "write_cache",
2313 .mode = S_IRUGO | S_IWUSR,
2314 },
2315 .show = ipr_show_write_caching,
2316 .store = ipr_store_write_caching
2317};
2318
1da177e4
LT
2319/**
2320 * ipr_show_fw_version - Show the firmware version
2321 * @class_dev: class device struct
2322 * @buf: buffer
2323 *
2324 * Return value:
2325 * number of bytes printed to buffer
2326 **/
2327static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2328{
2329 struct Scsi_Host *shost = class_to_shost(class_dev);
2330 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2331 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2332 unsigned long lock_flags = 0;
2333 int len;
2334
2335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2336 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2337 ucode_vpd->major_release, ucode_vpd->card_type,
2338 ucode_vpd->minor_release[0],
2339 ucode_vpd->minor_release[1]);
2340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2341 return len;
2342}
2343
2344static struct class_device_attribute ipr_fw_version_attr = {
2345 .attr = {
2346 .name = "fw_version",
2347 .mode = S_IRUGO,
2348 },
2349 .show = ipr_show_fw_version,
2350};
2351
2352/**
2353 * ipr_show_log_level - Show the adapter's error logging level
2354 * @class_dev: class device struct
2355 * @buf: buffer
2356 *
2357 * Return value:
2358 * number of bytes printed to buffer
2359 **/
2360static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2361{
2362 struct Scsi_Host *shost = class_to_shost(class_dev);
2363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2364 unsigned long lock_flags = 0;
2365 int len;
2366
2367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2368 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2370 return len;
2371}
2372
2373/**
2374 * ipr_store_log_level - Change the adapter's error logging level
2375 * @class_dev: class device struct
2376 * @buf: buffer
2377 *
2378 * Return value:
2379 * number of bytes printed to buffer
2380 **/
2381static ssize_t ipr_store_log_level(struct class_device *class_dev,
2382 const char *buf, size_t count)
2383{
2384 struct Scsi_Host *shost = class_to_shost(class_dev);
2385 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2386 unsigned long lock_flags = 0;
2387
2388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2389 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2391 return strlen(buf);
2392}
2393
2394static struct class_device_attribute ipr_log_level_attr = {
2395 .attr = {
2396 .name = "log_level",
2397 .mode = S_IRUGO | S_IWUSR,
2398 },
2399 .show = ipr_show_log_level,
2400 .store = ipr_store_log_level
2401};
2402
2403/**
2404 * ipr_store_diagnostics - IOA Diagnostics interface
2405 * @class_dev: class_device struct
2406 * @buf: buffer
2407 * @count: buffer size
2408 *
2409 * This function will reset the adapter and wait a reasonable
2410 * amount of time for any errors that the adapter might log.
2411 *
2412 * Return value:
2413 * count on success / other on failure
2414 **/
2415static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2416 const char *buf, size_t count)
2417{
2418 struct Scsi_Host *shost = class_to_shost(class_dev);
2419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2420 unsigned long lock_flags = 0;
2421 int rc = count;
2422
2423 if (!capable(CAP_SYS_ADMIN))
2424 return -EACCES;
2425
2426 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2428 ioa_cfg->errors_logged = 0;
2429 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2430
2431 if (ioa_cfg->in_reset_reload) {
2432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2433 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2434
2435 /* Wait for a second for any errors to be logged */
2436 msleep(1000);
2437 } else {
2438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2439 return -EIO;
2440 }
2441
2442 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2444 rc = -EIO;
2445 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2446
2447 return rc;
2448}
2449
2450static struct class_device_attribute ipr_diagnostics_attr = {
2451 .attr = {
2452 .name = "run_diagnostics",
2453 .mode = S_IWUSR,
2454 },
2455 .store = ipr_store_diagnostics
2456};
2457
f37eb54b
BK
2458/**
2459 * ipr_show_adapter_state - Show the adapter's state
2460 * @class_dev: class device struct
2461 * @buf: buffer
2462 *
2463 * Return value:
2464 * number of bytes printed to buffer
2465 **/
2466static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2467{
2468 struct Scsi_Host *shost = class_to_shost(class_dev);
2469 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2470 unsigned long lock_flags = 0;
2471 int len;
2472
2473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2474 if (ioa_cfg->ioa_is_dead)
2475 len = snprintf(buf, PAGE_SIZE, "offline\n");
2476 else
2477 len = snprintf(buf, PAGE_SIZE, "online\n");
2478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2479 return len;
2480}
2481
2482/**
2483 * ipr_store_adapter_state - Change adapter state
2484 * @class_dev: class_device struct
2485 * @buf: buffer
2486 * @count: buffer size
2487 *
2488 * This function will change the adapter's state.
2489 *
2490 * Return value:
2491 * count on success / other on failure
2492 **/
2493static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2494 const char *buf, size_t count)
2495{
2496 struct Scsi_Host *shost = class_to_shost(class_dev);
2497 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2498 unsigned long lock_flags;
2499 int result = count;
2500
2501 if (!capable(CAP_SYS_ADMIN))
2502 return -EACCES;
2503
2504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2505 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2506 ioa_cfg->ioa_is_dead = 0;
2507 ioa_cfg->reset_retries = 0;
2508 ioa_cfg->in_ioa_bringdown = 0;
2509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2510 }
2511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2512 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2513
2514 return result;
2515}
2516
2517static struct class_device_attribute ipr_ioa_state_attr = {
2518 .attr = {
2519 .name = "state",
2520 .mode = S_IRUGO | S_IWUSR,
2521 },
2522 .show = ipr_show_adapter_state,
2523 .store = ipr_store_adapter_state
2524};
2525
1da177e4
LT
2526/**
2527 * ipr_store_reset_adapter - Reset the adapter
2528 * @class_dev: class_device struct
2529 * @buf: buffer
2530 * @count: buffer size
2531 *
2532 * This function will reset the adapter.
2533 *
2534 * Return value:
2535 * count on success / other on failure
2536 **/
2537static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2538 const char *buf, size_t count)
2539{
2540 struct Scsi_Host *shost = class_to_shost(class_dev);
2541 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2542 unsigned long lock_flags;
2543 int result = count;
2544
2545 if (!capable(CAP_SYS_ADMIN))
2546 return -EACCES;
2547
2548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2549 if (!ioa_cfg->in_reset_reload)
2550 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2552 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2553
2554 return result;
2555}
2556
2557static struct class_device_attribute ipr_ioa_reset_attr = {
2558 .attr = {
2559 .name = "reset_host",
2560 .mode = S_IWUSR,
2561 },
2562 .store = ipr_store_reset_adapter
2563};
2564
2565/**
2566 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2567 * @buf_len: buffer length
2568 *
2569 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2570 * list to use for microcode download
2571 *
2572 * Return value:
2573 * pointer to sglist / NULL on failure
2574 **/
2575static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2576{
2577 int sg_size, order, bsize_elem, num_elem, i, j;
2578 struct ipr_sglist *sglist;
2579 struct scatterlist *scatterlist;
2580 struct page *page;
2581
2582 /* Get the minimum size per scatter/gather element */
2583 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2584
2585 /* Get the actual size per element */
2586 order = get_order(sg_size);
2587
2588 /* Determine the actual number of bytes per element */
2589 bsize_elem = PAGE_SIZE * (1 << order);
2590
2591 /* Determine the actual number of sg entries needed */
2592 if (buf_len % bsize_elem)
2593 num_elem = (buf_len / bsize_elem) + 1;
2594 else
2595 num_elem = buf_len / bsize_elem;
2596
2597 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2598 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2599 (sizeof(struct scatterlist) * (num_elem - 1)),
2600 GFP_KERNEL);
2601
2602 if (sglist == NULL) {
2603 ipr_trace;
2604 return NULL;
2605 }
2606
1da177e4
LT
2607 scatterlist = sglist->scatterlist;
2608
2609 sglist->order = order;
2610 sglist->num_sg = num_elem;
2611
2612 /* Allocate a bunch of sg elements */
2613 for (i = 0; i < num_elem; i++) {
2614 page = alloc_pages(GFP_KERNEL, order);
2615 if (!page) {
2616 ipr_trace;
2617
2618 /* Free up what we already allocated */
2619 for (j = i - 1; j >= 0; j--)
2620 __free_pages(scatterlist[j].page, order);
2621 kfree(sglist);
2622 return NULL;
2623 }
2624
2625 scatterlist[i].page = page;
2626 }
2627
2628 return sglist;
2629}
2630
2631/**
2632 * ipr_free_ucode_buffer - Frees a microcode download buffer
2633 * @p_dnld: scatter/gather list pointer
2634 *
2635 * Free a DMA'able ucode download buffer previously allocated with
2636 * ipr_alloc_ucode_buffer
2637 *
2638 * Return value:
2639 * nothing
2640 **/
2641static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2642{
2643 int i;
2644
2645 for (i = 0; i < sglist->num_sg; i++)
2646 __free_pages(sglist->scatterlist[i].page, sglist->order);
2647
2648 kfree(sglist);
2649}
2650
2651/**
2652 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2653 * @sglist: scatter/gather list pointer
2654 * @buffer: buffer pointer
2655 * @len: buffer length
2656 *
2657 * Copy a microcode image from a user buffer into a buffer allocated by
2658 * ipr_alloc_ucode_buffer
2659 *
2660 * Return value:
2661 * 0 on success / other on failure
2662 **/
2663static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2664 u8 *buffer, u32 len)
2665{
2666 int bsize_elem, i, result = 0;
2667 struct scatterlist *scatterlist;
2668 void *kaddr;
2669
2670 /* Determine the actual number of bytes per element */
2671 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2672
2673 scatterlist = sglist->scatterlist;
2674
2675 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2676 kaddr = kmap(scatterlist[i].page);
2677 memcpy(kaddr, buffer, bsize_elem);
2678 kunmap(scatterlist[i].page);
2679
2680 scatterlist[i].length = bsize_elem;
2681
2682 if (result != 0) {
2683 ipr_trace;
2684 return result;
2685 }
2686 }
2687
2688 if (len % bsize_elem) {
2689 kaddr = kmap(scatterlist[i].page);
2690 memcpy(kaddr, buffer, len % bsize_elem);
2691 kunmap(scatterlist[i].page);
2692
2693 scatterlist[i].length = len % bsize_elem;
2694 }
2695
2696 sglist->buffer_len = len;
2697 return result;
2698}
2699
2700/**
12baa420 2701 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2702 * @ipr_cmd: ipr command struct
2703 * @sglist: scatter/gather list
1da177e4 2704 *
12baa420 2705 * Builds a microcode download IOA data list (IOADL).
1da177e4 2706 *
1da177e4 2707 **/
12baa420
BK
2708static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2709 struct ipr_sglist *sglist)
1da177e4 2710{
1da177e4
LT
2711 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2712 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2713 struct scatterlist *scatterlist = sglist->scatterlist;
2714 int i;
2715
12baa420 2716 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2717 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2718 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2719 ioarcb->write_ioadl_len =
2720 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2721
2722 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2723 ioadl[i].flags_and_data_len =
2724 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2725 ioadl[i].address =
2726 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2727 }
2728
12baa420
BK
2729 ioadl[i-1].flags_and_data_len |=
2730 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2731}
2732
2733/**
2734 * ipr_update_ioa_ucode - Update IOA's microcode
2735 * @ioa_cfg: ioa config struct
2736 * @sglist: scatter/gather list
2737 *
2738 * Initiate an adapter reset to update the IOA's microcode
2739 *
2740 * Return value:
2741 * 0 on success / -EIO on failure
2742 **/
2743static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2744 struct ipr_sglist *sglist)
2745{
2746 unsigned long lock_flags;
2747
2748 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2749
2750 if (ioa_cfg->ucode_sglist) {
2751 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2752 dev_err(&ioa_cfg->pdev->dev,
2753 "Microcode download already in progress\n");
2754 return -EIO;
1da177e4 2755 }
12baa420
BK
2756
2757 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2758 sglist->num_sg, DMA_TO_DEVICE);
2759
2760 if (!sglist->num_dma_sg) {
2761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2762 dev_err(&ioa_cfg->pdev->dev,
2763 "Failed to map microcode download buffer!\n");
1da177e4
LT
2764 return -EIO;
2765 }
2766
12baa420
BK
2767 ioa_cfg->ucode_sglist = sglist;
2768 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2769 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2770 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2771
2772 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2773 ioa_cfg->ucode_sglist = NULL;
2774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2775 return 0;
2776}
2777
2778/**
2779 * ipr_store_update_fw - Update the firmware on the adapter
2780 * @class_dev: class_device struct
2781 * @buf: buffer
2782 * @count: buffer size
2783 *
2784 * This function will update the firmware on the adapter.
2785 *
2786 * Return value:
2787 * count on success / other on failure
2788 **/
2789static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2790 const char *buf, size_t count)
2791{
2792 struct Scsi_Host *shost = class_to_shost(class_dev);
2793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2794 struct ipr_ucode_image_header *image_hdr;
2795 const struct firmware *fw_entry;
2796 struct ipr_sglist *sglist;
1da177e4
LT
2797 char fname[100];
2798 char *src;
2799 int len, result, dnld_size;
2800
2801 if (!capable(CAP_SYS_ADMIN))
2802 return -EACCES;
2803
2804 len = snprintf(fname, 99, "%s", buf);
2805 fname[len-1] = '\0';
2806
2807 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2808 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2809 return -EIO;
2810 }
2811
2812 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2813
2814 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2815 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2816 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2817 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2818 release_firmware(fw_entry);
2819 return -EINVAL;
2820 }
2821
2822 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2823 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2824 sglist = ipr_alloc_ucode_buffer(dnld_size);
2825
2826 if (!sglist) {
2827 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2828 release_firmware(fw_entry);
2829 return -ENOMEM;
2830 }
2831
2832 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2833
2834 if (result) {
2835 dev_err(&ioa_cfg->pdev->dev,
2836 "Microcode buffer copy to DMA buffer failed\n");
12baa420 2837 goto out;
1da177e4
LT
2838 }
2839
12baa420 2840 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 2841
12baa420
BK
2842 if (!result)
2843 result = count;
2844out:
1da177e4
LT
2845 ipr_free_ucode_buffer(sglist);
2846 release_firmware(fw_entry);
12baa420 2847 return result;
1da177e4
LT
2848}
2849
2850static struct class_device_attribute ipr_update_fw_attr = {
2851 .attr = {
2852 .name = "update_fw",
2853 .mode = S_IWUSR,
2854 },
2855 .store = ipr_store_update_fw
2856};
2857
2858static struct class_device_attribute *ipr_ioa_attrs[] = {
2859 &ipr_fw_version_attr,
2860 &ipr_log_level_attr,
2861 &ipr_diagnostics_attr,
f37eb54b 2862 &ipr_ioa_state_attr,
1da177e4
LT
2863 &ipr_ioa_reset_attr,
2864 &ipr_update_fw_attr,
62275040 2865 &ipr_ioa_cache_attr,
1da177e4
LT
2866 NULL,
2867};
2868
2869#ifdef CONFIG_SCSI_IPR_DUMP
2870/**
2871 * ipr_read_dump - Dump the adapter
2872 * @kobj: kobject struct
2873 * @buf: buffer
2874 * @off: offset
2875 * @count: buffer size
2876 *
2877 * Return value:
2878 * number of bytes printed to buffer
2879 **/
2880static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2881 loff_t off, size_t count)
2882{
2883 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2884 struct Scsi_Host *shost = class_to_shost(cdev);
2885 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2886 struct ipr_dump *dump;
2887 unsigned long lock_flags = 0;
2888 char *src;
2889 int len;
2890 size_t rc = count;
2891
2892 if (!capable(CAP_SYS_ADMIN))
2893 return -EACCES;
2894
2895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2896 dump = ioa_cfg->dump;
2897
2898 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 return 0;
2901 }
2902 kref_get(&dump->kref);
2903 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2904
2905 if (off > dump->driver_dump.hdr.len) {
2906 kref_put(&dump->kref, ipr_release_dump);
2907 return 0;
2908 }
2909
2910 if (off + count > dump->driver_dump.hdr.len) {
2911 count = dump->driver_dump.hdr.len - off;
2912 rc = count;
2913 }
2914
2915 if (count && off < sizeof(dump->driver_dump)) {
2916 if (off + count > sizeof(dump->driver_dump))
2917 len = sizeof(dump->driver_dump) - off;
2918 else
2919 len = count;
2920 src = (u8 *)&dump->driver_dump + off;
2921 memcpy(buf, src, len);
2922 buf += len;
2923 off += len;
2924 count -= len;
2925 }
2926
2927 off -= sizeof(dump->driver_dump);
2928
2929 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2930 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2931 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2932 else
2933 len = count;
2934 src = (u8 *)&dump->ioa_dump + off;
2935 memcpy(buf, src, len);
2936 buf += len;
2937 off += len;
2938 count -= len;
2939 }
2940
2941 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2942
2943 while (count) {
2944 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2945 len = PAGE_ALIGN(off) - off;
2946 else
2947 len = count;
2948 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2949 src += off & ~PAGE_MASK;
2950 memcpy(buf, src, len);
2951 buf += len;
2952 off += len;
2953 count -= len;
2954 }
2955
2956 kref_put(&dump->kref, ipr_release_dump);
2957 return rc;
2958}
2959
2960/**
2961 * ipr_alloc_dump - Prepare for adapter dump
2962 * @ioa_cfg: ioa config struct
2963 *
2964 * Return value:
2965 * 0 on success / other on failure
2966 **/
2967static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2968{
2969 struct ipr_dump *dump;
2970 unsigned long lock_flags = 0;
2971
0bc42e35 2972 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2973
2974 if (!dump) {
2975 ipr_err("Dump memory allocation failed\n");
2976 return -ENOMEM;
2977 }
2978
1da177e4
LT
2979 kref_init(&dump->kref);
2980 dump->ioa_cfg = ioa_cfg;
2981
2982 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2983
2984 if (INACTIVE != ioa_cfg->sdt_state) {
2985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2986 kfree(dump);
2987 return 0;
2988 }
2989
2990 ioa_cfg->dump = dump;
2991 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2992 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2993 ioa_cfg->dump_taken = 1;
2994 schedule_work(&ioa_cfg->work_q);
2995 }
2996 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2997
1da177e4
LT
2998 return 0;
2999}
3000
3001/**
3002 * ipr_free_dump - Free adapter dump memory
3003 * @ioa_cfg: ioa config struct
3004 *
3005 * Return value:
3006 * 0 on success / other on failure
3007 **/
3008static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3009{
3010 struct ipr_dump *dump;
3011 unsigned long lock_flags = 0;
3012
3013 ENTER;
3014
3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016 dump = ioa_cfg->dump;
3017 if (!dump) {
3018 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3019 return 0;
3020 }
3021
3022 ioa_cfg->dump = NULL;
3023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3024
3025 kref_put(&dump->kref, ipr_release_dump);
3026
3027 LEAVE;
3028 return 0;
3029}
3030
3031/**
3032 * ipr_write_dump - Setup dump state of adapter
3033 * @kobj: kobject struct
3034 * @buf: buffer
3035 * @off: offset
3036 * @count: buffer size
3037 *
3038 * Return value:
3039 * number of bytes printed to buffer
3040 **/
3041static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3042 loff_t off, size_t count)
3043{
3044 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3045 struct Scsi_Host *shost = class_to_shost(cdev);
3046 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3047 int rc;
3048
3049 if (!capable(CAP_SYS_ADMIN))
3050 return -EACCES;
3051
3052 if (buf[0] == '1')
3053 rc = ipr_alloc_dump(ioa_cfg);
3054 else if (buf[0] == '0')
3055 rc = ipr_free_dump(ioa_cfg);
3056 else
3057 return -EINVAL;
3058
3059 if (rc)
3060 return rc;
3061 else
3062 return count;
3063}
3064
3065static struct bin_attribute ipr_dump_attr = {
3066 .attr = {
3067 .name = "dump",
3068 .mode = S_IRUSR | S_IWUSR,
3069 },
3070 .size = 0,
3071 .read = ipr_read_dump,
3072 .write = ipr_write_dump
3073};
3074#else
3075static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3076#endif
3077
3078/**
3079 * ipr_change_queue_depth - Change the device's queue depth
3080 * @sdev: scsi device struct
3081 * @qdepth: depth to set
3082 *
3083 * Return value:
3084 * actual depth set
3085 **/
3086static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3087{
35a39691
BK
3088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3089 struct ipr_resource_entry *res;
3090 unsigned long lock_flags = 0;
3091
3092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3093 res = (struct ipr_resource_entry *)sdev->hostdata;
3094
3095 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3096 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3098
1da177e4
LT
3099 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3100 return sdev->queue_depth;
3101}
3102
3103/**
3104 * ipr_change_queue_type - Change the device's queue type
3105 * @dsev: scsi device struct
3106 * @tag_type: type of tags to use
3107 *
3108 * Return value:
3109 * actual queue type set
3110 **/
3111static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3112{
3113 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3114 struct ipr_resource_entry *res;
3115 unsigned long lock_flags = 0;
3116
3117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3118 res = (struct ipr_resource_entry *)sdev->hostdata;
3119
3120 if (res) {
3121 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3122 /*
3123 * We don't bother quiescing the device here since the
3124 * adapter firmware does it for us.
3125 */
3126 scsi_set_tag_type(sdev, tag_type);
3127
3128 if (tag_type)
3129 scsi_activate_tcq(sdev, sdev->queue_depth);
3130 else
3131 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3132 } else
3133 tag_type = 0;
3134 } else
3135 tag_type = 0;
3136
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 return tag_type;
3139}
3140
3141/**
3142 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3143 * @dev: device struct
3144 * @buf: buffer
3145 *
3146 * Return value:
3147 * number of bytes printed to buffer
3148 **/
10523b3b 3149static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3150{
3151 struct scsi_device *sdev = to_scsi_device(dev);
3152 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3153 struct ipr_resource_entry *res;
3154 unsigned long lock_flags = 0;
3155 ssize_t len = -ENXIO;
3156
3157 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3158 res = (struct ipr_resource_entry *)sdev->hostdata;
3159 if (res)
3160 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3162 return len;
3163}
3164
3165static struct device_attribute ipr_adapter_handle_attr = {
3166 .attr = {
3167 .name = "adapter_handle",
3168 .mode = S_IRUSR,
3169 },
3170 .show = ipr_show_adapter_handle
3171};
3172
3173static struct device_attribute *ipr_dev_attrs[] = {
3174 &ipr_adapter_handle_attr,
3175 NULL,
3176};
3177
3178/**
3179 * ipr_biosparam - Return the HSC mapping
3180 * @sdev: scsi device struct
3181 * @block_device: block device pointer
3182 * @capacity: capacity of the device
3183 * @parm: Array containing returned HSC values.
3184 *
3185 * This function generates the HSC parms that fdisk uses.
3186 * We want to make sure we return something that places partitions
3187 * on 4k boundaries for best performance with the IOA.
3188 *
3189 * Return value:
3190 * 0 on success
3191 **/
3192static int ipr_biosparam(struct scsi_device *sdev,
3193 struct block_device *block_device,
3194 sector_t capacity, int *parm)
3195{
3196 int heads, sectors;
3197 sector_t cylinders;
3198
3199 heads = 128;
3200 sectors = 32;
3201
3202 cylinders = capacity;
3203 sector_div(cylinders, (128 * 32));
3204
3205 /* return result */
3206 parm[0] = heads;
3207 parm[1] = sectors;
3208 parm[2] = cylinders;
3209
3210 return 0;
3211}
3212
35a39691
BK
3213/**
3214 * ipr_find_starget - Find target based on bus/target.
3215 * @starget: scsi target struct
3216 *
3217 * Return value:
3218 * resource entry pointer if found / NULL if not found
3219 **/
3220static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3221{
3222 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3223 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3224 struct ipr_resource_entry *res;
3225
3226 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3227 if ((res->cfgte.res_addr.bus == starget->channel) &&
3228 (res->cfgte.res_addr.target == starget->id) &&
3229 (res->cfgte.res_addr.lun == 0)) {
3230 return res;
3231 }
3232 }
3233
3234 return NULL;
3235}
3236
3237static struct ata_port_info sata_port_info;
3238
3239/**
3240 * ipr_target_alloc - Prepare for commands to a SCSI target
3241 * @starget: scsi target struct
3242 *
3243 * If the device is a SATA device, this function allocates an
3244 * ATA port with libata, else it does nothing.
3245 *
3246 * Return value:
3247 * 0 on success / non-0 on failure
3248 **/
3249static int ipr_target_alloc(struct scsi_target *starget)
3250{
3251 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3253 struct ipr_sata_port *sata_port;
3254 struct ata_port *ap;
3255 struct ipr_resource_entry *res;
3256 unsigned long lock_flags;
3257
3258 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3259 res = ipr_find_starget(starget);
3260 starget->hostdata = NULL;
3261
3262 if (res && ipr_is_gata(res)) {
3263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3264 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3265 if (!sata_port)
3266 return -ENOMEM;
3267
3268 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3269 if (ap) {
3270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3271 sata_port->ioa_cfg = ioa_cfg;
3272 sata_port->ap = ap;
3273 sata_port->res = res;
3274
3275 res->sata_port = sata_port;
3276 ap->private_data = sata_port;
3277 starget->hostdata = sata_port;
3278 } else {
3279 kfree(sata_port);
3280 return -ENOMEM;
3281 }
3282 }
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284
3285 return 0;
3286}
3287
3288/**
3289 * ipr_target_destroy - Destroy a SCSI target
3290 * @starget: scsi target struct
3291 *
3292 * If the device was a SATA device, this function frees the libata
3293 * ATA port, else it does nothing.
3294 *
3295 **/
3296static void ipr_target_destroy(struct scsi_target *starget)
3297{
3298 struct ipr_sata_port *sata_port = starget->hostdata;
3299
3300 if (sata_port) {
3301 starget->hostdata = NULL;
3302 ata_sas_port_destroy(sata_port->ap);
3303 kfree(sata_port);
3304 }
3305}
3306
3307/**
3308 * ipr_find_sdev - Find device based on bus/target/lun.
3309 * @sdev: scsi device struct
3310 *
3311 * Return value:
3312 * resource entry pointer if found / NULL if not found
3313 **/
3314static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3315{
3316 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3317 struct ipr_resource_entry *res;
3318
3319 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3320 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3321 (res->cfgte.res_addr.target == sdev->id) &&
3322 (res->cfgte.res_addr.lun == sdev->lun))
3323 return res;
3324 }
3325
3326 return NULL;
3327}
3328
1da177e4
LT
3329/**
3330 * ipr_slave_destroy - Unconfigure a SCSI device
3331 * @sdev: scsi device struct
3332 *
3333 * Return value:
3334 * nothing
3335 **/
3336static void ipr_slave_destroy(struct scsi_device *sdev)
3337{
3338 struct ipr_resource_entry *res;
3339 struct ipr_ioa_cfg *ioa_cfg;
3340 unsigned long lock_flags = 0;
3341
3342 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3343
3344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345 res = (struct ipr_resource_entry *) sdev->hostdata;
3346 if (res) {
35a39691
BK
3347 if (res->sata_port)
3348 ata_port_disable(res->sata_port->ap);
1da177e4
LT
3349 sdev->hostdata = NULL;
3350 res->sdev = NULL;
35a39691 3351 res->sata_port = NULL;
1da177e4
LT
3352 }
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354}
3355
3356/**
3357 * ipr_slave_configure - Configure a SCSI device
3358 * @sdev: scsi device struct
3359 *
3360 * This function configures the specified scsi device.
3361 *
3362 * Return value:
3363 * 0 on success
3364 **/
3365static int ipr_slave_configure(struct scsi_device *sdev)
3366{
3367 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3368 struct ipr_resource_entry *res;
3369 unsigned long lock_flags = 0;
3370
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 res = sdev->hostdata;
3373 if (res) {
3374 if (ipr_is_af_dasd_device(res))
3375 sdev->type = TYPE_RAID;
0726ce26 3376 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3377 sdev->scsi_level = 4;
0726ce26
BK
3378 sdev->no_uld_attach = 1;
3379 }
1da177e4
LT
3380 if (ipr_is_vset_device(res)) {
3381 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3382 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3383 }
e4fbf44e 3384 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 3385 sdev->allow_restart = 1;
35a39691
BK
3386 if (ipr_is_gata(res) && res->sata_port) {
3387 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3388 ata_sas_slave_configure(sdev, res->sata_port->ap);
3389 } else {
3390 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3391 }
1da177e4
LT
3392 }
3393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3394 return 0;
3395}
3396
35a39691
BK
3397/**
3398 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3399 * @sdev: scsi device struct
3400 *
3401 * This function initializes an ATA port so that future commands
3402 * sent through queuecommand will work.
3403 *
3404 * Return value:
3405 * 0 on success
3406 **/
3407static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3408{
3409 struct ipr_sata_port *sata_port = NULL;
3410 int rc = -ENXIO;
3411
3412 ENTER;
3413 if (sdev->sdev_target)
3414 sata_port = sdev->sdev_target->hostdata;
3415 if (sata_port)
3416 rc = ata_sas_port_init(sata_port->ap);
3417 if (rc)
3418 ipr_slave_destroy(sdev);
3419
3420 LEAVE;
3421 return rc;
3422}
3423
1da177e4
LT
3424/**
3425 * ipr_slave_alloc - Prepare for commands to a device.
3426 * @sdev: scsi device struct
3427 *
3428 * This function saves a pointer to the resource entry
3429 * in the scsi device struct if the device exists. We
3430 * can then use this pointer in ipr_queuecommand when
3431 * handling new commands.
3432 *
3433 * Return value:
692aebfc 3434 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3435 **/
3436static int ipr_slave_alloc(struct scsi_device *sdev)
3437{
3438 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3439 struct ipr_resource_entry *res;
3440 unsigned long lock_flags;
692aebfc 3441 int rc = -ENXIO;
1da177e4
LT
3442
3443 sdev->hostdata = NULL;
3444
3445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446
35a39691
BK
3447 res = ipr_find_sdev(sdev);
3448 if (res) {
3449 res->sdev = sdev;
3450 res->add_to_ml = 0;
3451 res->in_erp = 0;
3452 sdev->hostdata = res;
3453 if (!ipr_is_naca_model(res))
3454 res->needs_sync_complete = 1;
3455 rc = 0;
3456 if (ipr_is_gata(res)) {
3457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3458 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
3459 }
3460 }
3461
3462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3463
692aebfc 3464 return rc;
1da177e4
LT
3465}
3466
3467/**
3468 * ipr_eh_host_reset - Reset the host adapter
3469 * @scsi_cmd: scsi command struct
3470 *
3471 * Return value:
3472 * SUCCESS / FAILED
3473 **/
df0ae249 3474static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3475{
3476 struct ipr_ioa_cfg *ioa_cfg;
3477 int rc;
3478
3479 ENTER;
3480 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3481
3482 dev_err(&ioa_cfg->pdev->dev,
3483 "Adapter being reset as a result of error recovery.\n");
3484
3485 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3486 ioa_cfg->sdt_state = GET_DUMP;
3487
3488 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3489
3490 LEAVE;
3491 return rc;
3492}
3493
df0ae249
JG
3494static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3495{
3496 int rc;
3497
3498 spin_lock_irq(cmd->device->host->host_lock);
3499 rc = __ipr_eh_host_reset(cmd);
3500 spin_unlock_irq(cmd->device->host->host_lock);
3501
3502 return rc;
3503}
3504
c6513096
BK
3505/**
3506 * ipr_device_reset - Reset the device
3507 * @ioa_cfg: ioa config struct
3508 * @res: resource entry struct
3509 *
3510 * This function issues a device reset to the affected device.
3511 * If the device is a SCSI device, a LUN reset will be sent
3512 * to the device first. If that does not work, a target reset
35a39691
BK
3513 * will be sent. If the device is a SATA device, a PHY reset will
3514 * be sent.
c6513096
BK
3515 *
3516 * Return value:
3517 * 0 on success / non-zero on failure
3518 **/
3519static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3520 struct ipr_resource_entry *res)
3521{
3522 struct ipr_cmnd *ipr_cmd;
3523 struct ipr_ioarcb *ioarcb;
3524 struct ipr_cmd_pkt *cmd_pkt;
35a39691 3525 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
3526 u32 ioasc;
3527
3528 ENTER;
3529 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3530 ioarcb = &ipr_cmd->ioarcb;
3531 cmd_pkt = &ioarcb->cmd_pkt;
35a39691 3532 regs = &ioarcb->add_data.u.regs;
c6513096
BK
3533
3534 ioarcb->res_handle = res->cfgte.res_handle;
3535 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3536 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
3537 if (ipr_is_gata(res)) {
3538 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3539 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3540 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3541 }
c6513096
BK
3542
3543 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3544 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3545 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
3546 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3547 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3548 sizeof(struct ipr_ioasa_gata));
c6513096
BK
3549
3550 LEAVE;
3551 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3552}
3553
35a39691
BK
3554/**
3555 * ipr_sata_reset - Reset the SATA port
3556 * @ap: SATA port to reset
3557 * @classes: class of the attached device
3558 *
3559 * This function issues a SATA phy reset to the affected ATA port.
3560 *
3561 * Return value:
3562 * 0 on success / non-zero on failure
3563 **/
3564static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3565{
3566 struct ipr_sata_port *sata_port = ap->private_data;
3567 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3568 struct ipr_resource_entry *res;
3569 unsigned long lock_flags = 0;
3570 int rc = -ENXIO;
3571
3572 ENTER;
3573 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
3574 while(ioa_cfg->in_reset_reload) {
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3578 }
3579
35a39691
BK
3580 res = sata_port->res;
3581 if (res) {
3582 rc = ipr_device_reset(ioa_cfg, res);
3583 switch(res->cfgte.proto) {
3584 case IPR_PROTO_SATA:
3585 case IPR_PROTO_SAS_STP:
3586 *classes = ATA_DEV_ATA;
3587 break;
3588 case IPR_PROTO_SATA_ATAPI:
3589 case IPR_PROTO_SAS_STP_ATAPI:
3590 *classes = ATA_DEV_ATAPI;
3591 break;
3592 default:
3593 *classes = ATA_DEV_UNKNOWN;
3594 break;
3595 };
3596 }
3597
3598 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3599 LEAVE;
3600 return rc;
3601}
3602
1da177e4
LT
3603/**
3604 * ipr_eh_dev_reset - Reset the device
3605 * @scsi_cmd: scsi command struct
3606 *
3607 * This function issues a device reset to the affected device.
3608 * A LUN reset will be sent to the device first. If that does
3609 * not work, a target reset will be sent.
3610 *
3611 * Return value:
3612 * SUCCESS / FAILED
3613 **/
94d0e7b8 3614static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3615{
3616 struct ipr_cmnd *ipr_cmd;
3617 struct ipr_ioa_cfg *ioa_cfg;
3618 struct ipr_resource_entry *res;
35a39691
BK
3619 struct ata_port *ap;
3620 int rc = 0;
1da177e4
LT
3621
3622 ENTER;
3623 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3624 res = scsi_cmd->device->hostdata;
3625
eeb88307 3626 if (!res)
1da177e4
LT
3627 return FAILED;
3628
3629 /*
3630 * If we are currently going through reset/reload, return failed. This will force the
3631 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3632 * reset to complete
3633 */
3634 if (ioa_cfg->in_reset_reload)
3635 return FAILED;
3636 if (ioa_cfg->ioa_is_dead)
3637 return FAILED;
3638
3639 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3640 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3641 if (ipr_cmd->scsi_cmd)
3642 ipr_cmd->done = ipr_scsi_eh_done;
7402ecef
BK
3643 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3644 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3645 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3646 }
1da177e4
LT
3647 }
3648 }
3649
3650 res->resetting_device = 1;
fb3ed3cb 3651 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
3652
3653 if (ipr_is_gata(res) && res->sata_port) {
3654 ap = res->sata_port->ap;
3655 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3656 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3657 spin_lock_irq(scsi_cmd->device->host->host_lock);
3658 } else
3659 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3660 res->resetting_device = 0;
3661
1da177e4 3662 LEAVE;
c6513096 3663 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3664}
3665
94d0e7b8
JG
3666static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3667{
3668 int rc;
3669
3670 spin_lock_irq(cmd->device->host->host_lock);
3671 rc = __ipr_eh_dev_reset(cmd);
3672 spin_unlock_irq(cmd->device->host->host_lock);
3673
3674 return rc;
3675}
3676
1da177e4
LT
3677/**
3678 * ipr_bus_reset_done - Op done function for bus reset.
3679 * @ipr_cmd: ipr command struct
3680 *
3681 * This function is the op done function for a bus reset
3682 *
3683 * Return value:
3684 * none
3685 **/
3686static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3687{
3688 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3689 struct ipr_resource_entry *res;
3690
3691 ENTER;
3692 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3693 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3694 sizeof(res->cfgte.res_handle))) {
3695 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3696 break;
3697 }
3698 }
3699
3700 /*
3701 * If abort has not completed, indicate the reset has, else call the
3702 * abort's done function to wake the sleeping eh thread
3703 */
3704 if (ipr_cmd->sibling->sibling)
3705 ipr_cmd->sibling->sibling = NULL;
3706 else
3707 ipr_cmd->sibling->done(ipr_cmd->sibling);
3708
3709 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3710 LEAVE;
3711}
3712
3713/**
3714 * ipr_abort_timeout - An abort task has timed out
3715 * @ipr_cmd: ipr command struct
3716 *
3717 * This function handles when an abort task times out. If this
3718 * happens we issue a bus reset since we have resources tied
3719 * up that must be freed before returning to the midlayer.
3720 *
3721 * Return value:
3722 * none
3723 **/
3724static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3725{
3726 struct ipr_cmnd *reset_cmd;
3727 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3728 struct ipr_cmd_pkt *cmd_pkt;
3729 unsigned long lock_flags = 0;
3730
3731 ENTER;
3732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3733 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3734 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3735 return;
3736 }
3737
fb3ed3cb 3738 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
3739 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3740 ipr_cmd->sibling = reset_cmd;
3741 reset_cmd->sibling = ipr_cmd;
3742 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3743 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3744 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3745 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3746 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3747
3748 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3750 LEAVE;
3751}
3752
3753/**
3754 * ipr_cancel_op - Cancel specified op
3755 * @scsi_cmd: scsi command struct
3756 *
3757 * This function cancels specified op.
3758 *
3759 * Return value:
3760 * SUCCESS / FAILED
3761 **/
3762static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3763{
3764 struct ipr_cmnd *ipr_cmd;
3765 struct ipr_ioa_cfg *ioa_cfg;
3766 struct ipr_resource_entry *res;
3767 struct ipr_cmd_pkt *cmd_pkt;
3768 u32 ioasc;
3769 int op_found = 0;
3770
3771 ENTER;
3772 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3773 res = scsi_cmd->device->hostdata;
3774
8fa728a2
JG
3775 /* If we are currently going through reset/reload, return failed.
3776 * This will force the mid-layer to call ipr_eh_host_reset,
3777 * which will then go to sleep and wait for the reset to complete
3778 */
3779 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3780 return FAILED;
04d9768f 3781 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
3782 return FAILED;
3783
3784 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3785 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3786 ipr_cmd->done = ipr_scsi_eh_done;
3787 op_found = 1;
3788 break;
3789 }
3790 }
3791
3792 if (!op_found)
3793 return SUCCESS;
3794
3795 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3796 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3797 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3798 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3799 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3800 ipr_cmd->u.sdev = scsi_cmd->device;
3801
fb3ed3cb
BK
3802 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3803 scsi_cmd->cmnd[0]);
1da177e4
LT
3804 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3805 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3806
3807 /*
3808 * If the abort task timed out and we sent a bus reset, we will get
3809 * one the following responses to the abort
3810 */
3811 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3812 ioasc = 0;
3813 ipr_trace;
3814 }
3815
3816 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
3817 if (!ipr_is_naca_model(res))
3818 res->needs_sync_complete = 1;
1da177e4
LT
3819
3820 LEAVE;
3821 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3822}
3823
3824/**
3825 * ipr_eh_abort - Abort a single op
3826 * @scsi_cmd: scsi command struct
3827 *
3828 * Return value:
3829 * SUCCESS / FAILED
3830 **/
3831static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3832{
8fa728a2
JG
3833 unsigned long flags;
3834 int rc;
1da177e4
LT
3835
3836 ENTER;
1da177e4 3837
8fa728a2
JG
3838 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3839 rc = ipr_cancel_op(scsi_cmd);
3840 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3841
3842 LEAVE;
8fa728a2 3843 return rc;
1da177e4
LT
3844}
3845
3846/**
3847 * ipr_handle_other_interrupt - Handle "other" interrupts
3848 * @ioa_cfg: ioa config struct
3849 * @int_reg: interrupt register
3850 *
3851 * Return value:
3852 * IRQ_NONE / IRQ_HANDLED
3853 **/
3854static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3855 volatile u32 int_reg)
3856{
3857 irqreturn_t rc = IRQ_HANDLED;
3858
3859 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3860 /* Mask the interrupt */
3861 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3862
3863 /* Clear the interrupt */
3864 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3865 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3866
3867 list_del(&ioa_cfg->reset_cmd->queue);
3868 del_timer(&ioa_cfg->reset_cmd->timer);
3869 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3870 } else {
3871 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3872 ioa_cfg->ioa_unit_checked = 1;
3873 else
3874 dev_err(&ioa_cfg->pdev->dev,
3875 "Permanent IOA failure. 0x%08X\n", int_reg);
3876
3877 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3878 ioa_cfg->sdt_state = GET_DUMP;
3879
3880 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3881 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3882 }
3883
3884 return rc;
3885}
3886
3887/**
3888 * ipr_isr - Interrupt service routine
3889 * @irq: irq number
3890 * @devp: pointer to ioa config struct
1da177e4
LT
3891 *
3892 * Return value:
3893 * IRQ_NONE / IRQ_HANDLED
3894 **/
7d12e780 3895static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
3896{
3897 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3898 unsigned long lock_flags = 0;
3899 volatile u32 int_reg, int_mask_reg;
3900 u32 ioasc;
3901 u16 cmd_index;
3902 struct ipr_cmnd *ipr_cmd;
3903 irqreturn_t rc = IRQ_NONE;
3904
3905 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3906
3907 /* If interrupts are disabled, ignore the interrupt */
3908 if (!ioa_cfg->allow_interrupts) {
3909 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3910 return IRQ_NONE;
3911 }
3912
3913 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3914 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3915
3916 /* If an interrupt on the adapter did not occur, ignore it */
3917 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3919 return IRQ_NONE;
3920 }
3921
3922 while (1) {
3923 ipr_cmd = NULL;
3924
3925 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3926 ioa_cfg->toggle_bit) {
3927
3928 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3929 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3930
3931 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3932 ioa_cfg->errors_logged++;
3933 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3934
3935 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3936 ioa_cfg->sdt_state = GET_DUMP;
3937
3938 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940 return IRQ_HANDLED;
3941 }
3942
3943 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3944
3945 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3946
3947 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3948
3949 list_del(&ipr_cmd->queue);
3950 del_timer(&ipr_cmd->timer);
3951 ipr_cmd->done(ipr_cmd);
3952
3953 rc = IRQ_HANDLED;
3954
3955 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3956 ioa_cfg->hrrq_curr++;
3957 } else {
3958 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3959 ioa_cfg->toggle_bit ^= 1u;
3960 }
3961 }
3962
3963 if (ipr_cmd != NULL) {
3964 /* Clear the PCI interrupt */
3965 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3966 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3967 } else
3968 break;
3969 }
3970
3971 if (unlikely(rc == IRQ_NONE))
3972 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3973
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3975 return rc;
3976}
3977
3978/**
3979 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3980 * @ioa_cfg: ioa config struct
3981 * @ipr_cmd: ipr command struct
3982 *
3983 * Return value:
3984 * 0 on success / -1 on failure
3985 **/
3986static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3987 struct ipr_cmnd *ipr_cmd)
3988{
3989 int i;
3990 struct scatterlist *sglist;
3991 u32 length;
3992 u32 ioadl_flags = 0;
3993 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3994 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3995 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3996
3997 length = scsi_cmd->request_bufflen;
3998
3999 if (length == 0)
4000 return 0;
4001
4002 if (scsi_cmd->use_sg) {
4003 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4004 scsi_cmd->request_buffer,
4005 scsi_cmd->use_sg,
4006 scsi_cmd->sc_data_direction);
4007
4008 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4009 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4010 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4011 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4012 ioarcb->write_ioadl_len =
4013 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4014 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4015 ioadl_flags = IPR_IOADL_FLAGS_READ;
4016 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4017 ioarcb->read_ioadl_len =
4018 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4019 }
4020
4021 sglist = scsi_cmd->request_buffer;
4022
4023 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4024 ioadl[i].flags_and_data_len =
4025 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4026 ioadl[i].address =
4027 cpu_to_be32(sg_dma_address(&sglist[i]));
4028 }
4029
4030 if (likely(ipr_cmd->dma_use_sg)) {
4031 ioadl[i-1].flags_and_data_len |=
4032 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4033 return 0;
4034 } else
4035 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4036 } else {
4037 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4038 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4039 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4040 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4041 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4042 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4043 ioadl_flags = IPR_IOADL_FLAGS_READ;
4044 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4045 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4046 }
4047
4048 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4049 scsi_cmd->request_buffer, length,
4050 scsi_cmd->sc_data_direction);
4051
4052 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4053 ipr_cmd->dma_use_sg = 1;
4054 ioadl[0].flags_and_data_len =
4055 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4056 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4057 return 0;
4058 } else
4059 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4060 }
4061
4062 return -1;
4063}
4064
4065/**
4066 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4067 * @scsi_cmd: scsi command struct
4068 *
4069 * Return value:
4070 * task attributes
4071 **/
4072static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4073{
4074 u8 tag[2];
4075 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4076
4077 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4078 switch (tag[0]) {
4079 case MSG_SIMPLE_TAG:
4080 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4081 break;
4082 case MSG_HEAD_TAG:
4083 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4084 break;
4085 case MSG_ORDERED_TAG:
4086 rc = IPR_FLAGS_LO_ORDERED_TASK;
4087 break;
4088 };
4089 }
4090
4091 return rc;
4092}
4093
4094/**
4095 * ipr_erp_done - Process completion of ERP for a device
4096 * @ipr_cmd: ipr command struct
4097 *
4098 * This function copies the sense buffer into the scsi_cmd
4099 * struct and pushes the scsi_done function.
4100 *
4101 * Return value:
4102 * nothing
4103 **/
4104static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4105{
4106 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4107 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4109 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4110
4111 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4112 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
4113 scmd_printk(KERN_ERR, scsi_cmd,
4114 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
4115 } else {
4116 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4117 SCSI_SENSE_BUFFERSIZE);
4118 }
4119
4120 if (res) {
ee0a90fa
BK
4121 if (!ipr_is_naca_model(res))
4122 res->needs_sync_complete = 1;
1da177e4
LT
4123 res->in_erp = 0;
4124 }
4125 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4126 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4127 scsi_cmd->scsi_done(scsi_cmd);
4128}
4129
4130/**
4131 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4132 * @ipr_cmd: ipr command struct
4133 *
4134 * Return value:
4135 * none
4136 **/
4137static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4138{
4139 struct ipr_ioarcb *ioarcb;
4140 struct ipr_ioasa *ioasa;
4141
4142 ioarcb = &ipr_cmd->ioarcb;
4143 ioasa = &ipr_cmd->ioasa;
4144
4145 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4146 ioarcb->write_data_transfer_length = 0;
4147 ioarcb->read_data_transfer_length = 0;
4148 ioarcb->write_ioadl_len = 0;
4149 ioarcb->read_ioadl_len = 0;
4150 ioasa->ioasc = 0;
4151 ioasa->residual_data_len = 0;
4152}
4153
4154/**
4155 * ipr_erp_request_sense - Send request sense to a device
4156 * @ipr_cmd: ipr command struct
4157 *
4158 * This function sends a request sense to a device as a result
4159 * of a check condition.
4160 *
4161 * Return value:
4162 * nothing
4163 **/
4164static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4165{
4166 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4167 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4168
4169 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4170 ipr_erp_done(ipr_cmd);
4171 return;
4172 }
4173
4174 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4175
4176 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4177 cmd_pkt->cdb[0] = REQUEST_SENSE;
4178 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4179 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4180 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4181 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4182
4183 ipr_cmd->ioadl[0].flags_and_data_len =
4184 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4185 ipr_cmd->ioadl[0].address =
4186 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4187
4188 ipr_cmd->ioarcb.read_ioadl_len =
4189 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4190 ipr_cmd->ioarcb.read_data_transfer_length =
4191 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4192
4193 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4194 IPR_REQUEST_SENSE_TIMEOUT * 2);
4195}
4196
4197/**
4198 * ipr_erp_cancel_all - Send cancel all to a device
4199 * @ipr_cmd: ipr command struct
4200 *
4201 * This function sends a cancel all to a device to clear the
4202 * queue. If we are running TCQ on the device, QERR is set to 1,
4203 * which means all outstanding ops have been dropped on the floor.
4204 * Cancel all will return them to us.
4205 *
4206 * Return value:
4207 * nothing
4208 **/
4209static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4210{
4211 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4212 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4213 struct ipr_cmd_pkt *cmd_pkt;
4214
4215 res->in_erp = 1;
4216
4217 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4218
4219 if (!scsi_get_tag_type(scsi_cmd->device)) {
4220 ipr_erp_request_sense(ipr_cmd);
4221 return;
4222 }
4223
4224 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4225 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4226 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4227
4228 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4229 IPR_CANCEL_ALL_TIMEOUT);
4230}
4231
4232/**
4233 * ipr_dump_ioasa - Dump contents of IOASA
4234 * @ioa_cfg: ioa config struct
4235 * @ipr_cmd: ipr command struct
fe964d0a 4236 * @res: resource entry struct
1da177e4
LT
4237 *
4238 * This function is invoked by the interrupt handler when ops
4239 * fail. It will log the IOASA if appropriate. Only called
4240 * for GPDD ops.
4241 *
4242 * Return value:
4243 * none
4244 **/
4245static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 4246 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
4247{
4248 int i;
4249 u16 data_len;
4250 u32 ioasc;
4251 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4252 __be32 *ioasa_data = (__be32 *)ioasa;
4253 int error_index;
4254
4255 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4256
4257 if (0 == ioasc)
4258 return;
4259
4260 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4261 return;
4262
4263 error_index = ipr_get_error(ioasc);
4264
4265 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4266 /* Don't log an error if the IOA already logged one */
4267 if (ioasa->ilid != 0)
4268 return;
4269
4270 if (ipr_error_table[error_index].log_ioasa == 0)
4271 return;
4272 }
4273
fe964d0a 4274 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
4275
4276 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4277 data_len = sizeof(struct ipr_ioasa);
4278 else
4279 data_len = be16_to_cpu(ioasa->ret_stat_len);
4280
4281 ipr_err("IOASA Dump:\n");
4282
4283 for (i = 0; i < data_len / 4; i += 4) {
4284 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4285 be32_to_cpu(ioasa_data[i]),
4286 be32_to_cpu(ioasa_data[i+1]),
4287 be32_to_cpu(ioasa_data[i+2]),
4288 be32_to_cpu(ioasa_data[i+3]));
4289 }
4290}
4291
4292/**
4293 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4294 * @ioasa: IOASA
4295 * @sense_buf: sense data buffer
4296 *
4297 * Return value:
4298 * none
4299 **/
4300static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4301{
4302 u32 failing_lba;
4303 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4304 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4305 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4306 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4307
4308 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4309
4310 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4311 return;
4312
4313 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4314
4315 if (ipr_is_vset_device(res) &&
4316 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4317 ioasa->u.vset.failing_lba_hi != 0) {
4318 sense_buf[0] = 0x72;
4319 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4320 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4321 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4322
4323 sense_buf[7] = 12;
4324 sense_buf[8] = 0;
4325 sense_buf[9] = 0x0A;
4326 sense_buf[10] = 0x80;
4327
4328 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4329
4330 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4331 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4332 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4333 sense_buf[15] = failing_lba & 0x000000ff;
4334
4335 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4336
4337 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4338 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4339 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4340 sense_buf[19] = failing_lba & 0x000000ff;
4341 } else {
4342 sense_buf[0] = 0x70;
4343 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4344 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4345 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4346
4347 /* Illegal request */
4348 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4349 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4350 sense_buf[7] = 10; /* additional length */
4351
4352 /* IOARCB was in error */
4353 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4354 sense_buf[15] = 0xC0;
4355 else /* Parameter data was invalid */
4356 sense_buf[15] = 0x80;
4357
4358 sense_buf[16] =
4359 ((IPR_FIELD_POINTER_MASK &
4360 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4361 sense_buf[17] =
4362 (IPR_FIELD_POINTER_MASK &
4363 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4364 } else {
4365 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4366 if (ipr_is_vset_device(res))
4367 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4368 else
4369 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4370
4371 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4372 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4373 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4374 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4375 sense_buf[6] = failing_lba & 0x000000ff;
4376 }
4377
4378 sense_buf[7] = 6; /* additional length */
4379 }
4380 }
4381}
4382
ee0a90fa
BK
4383/**
4384 * ipr_get_autosense - Copy autosense data to sense buffer
4385 * @ipr_cmd: ipr command struct
4386 *
4387 * This function copies the autosense buffer to the buffer
4388 * in the scsi_cmd, if there is autosense available.
4389 *
4390 * Return value:
4391 * 1 if autosense was available / 0 if not
4392 **/
4393static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4394{
4395 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4396
117d2ce1 4397 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
4398 return 0;
4399
4400 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4401 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4402 SCSI_SENSE_BUFFERSIZE));
4403 return 1;
4404}
4405
1da177e4
LT
4406/**
4407 * ipr_erp_start - Process an error response for a SCSI op
4408 * @ioa_cfg: ioa config struct
4409 * @ipr_cmd: ipr command struct
4410 *
4411 * This function determines whether or not to initiate ERP
4412 * on the affected device.
4413 *
4414 * Return value:
4415 * nothing
4416 **/
4417static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4418 struct ipr_cmnd *ipr_cmd)
4419{
4420 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4421 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4422 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4423
4424 if (!res) {
4425 ipr_scsi_eh_done(ipr_cmd);
4426 return;
4427 }
4428
4429 if (ipr_is_gscsi(res))
fe964d0a 4430 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
1da177e4
LT
4431 else
4432 ipr_gen_sense(ipr_cmd);
4433
4434 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4435 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
4436 if (ipr_is_naca_model(res))
4437 scsi_cmd->result |= (DID_ABORT << 16);
4438 else
4439 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4440 break;
4441 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4442 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4443 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4444 break;
4445 case IPR_IOASC_HW_SEL_TIMEOUT:
4446 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
4447 if (!ipr_is_naca_model(res))
4448 res->needs_sync_complete = 1;
1da177e4
LT
4449 break;
4450 case IPR_IOASC_SYNC_REQUIRED:
4451 if (!res->in_erp)
4452 res->needs_sync_complete = 1;
4453 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4454 break;
4455 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4456 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4457 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4458 break;
4459 case IPR_IOASC_BUS_WAS_RESET:
4460 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4461 /*
4462 * Report the bus reset and ask for a retry. The device
4463 * will give CC/UA the next command.
4464 */
4465 if (!res->resetting_device)
4466 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4467 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
4468 if (!ipr_is_naca_model(res))
4469 res->needs_sync_complete = 1;
1da177e4
LT
4470 break;
4471 case IPR_IOASC_HW_DEV_BUS_STATUS:
4472 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4473 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
4474 if (!ipr_get_autosense(ipr_cmd)) {
4475 if (!ipr_is_naca_model(res)) {
4476 ipr_erp_cancel_all(ipr_cmd);
4477 return;
4478 }
4479 }
1da177e4 4480 }
ee0a90fa
BK
4481 if (!ipr_is_naca_model(res))
4482 res->needs_sync_complete = 1;
1da177e4
LT
4483 break;
4484 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4485 break;
4486 default:
5b7304fb
BK
4487 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4488 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4489 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4490 res->needs_sync_complete = 1;
4491 break;
4492 }
4493
4494 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4495 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4496 scsi_cmd->scsi_done(scsi_cmd);
4497}
4498
4499/**
4500 * ipr_scsi_done - mid-layer done function
4501 * @ipr_cmd: ipr command struct
4502 *
4503 * This function is invoked by the interrupt handler for
4504 * ops generated by the SCSI mid-layer
4505 *
4506 * Return value:
4507 * none
4508 **/
4509static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4510{
4511 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4512 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4513 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4514
4515 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4516
4517 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4518 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4519 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4520 scsi_cmd->scsi_done(scsi_cmd);
4521 } else
4522 ipr_erp_start(ioa_cfg, ipr_cmd);
4523}
4524
1da177e4
LT
4525/**
4526 * ipr_queuecommand - Queue a mid-layer request
4527 * @scsi_cmd: scsi command struct
4528 * @done: done function
4529 *
4530 * This function queues a request generated by the mid-layer.
4531 *
4532 * Return value:
4533 * 0 on success
4534 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4535 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4536 **/
4537static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4538 void (*done) (struct scsi_cmnd *))
4539{
4540 struct ipr_ioa_cfg *ioa_cfg;
4541 struct ipr_resource_entry *res;
4542 struct ipr_ioarcb *ioarcb;
4543 struct ipr_cmnd *ipr_cmd;
4544 int rc = 0;
4545
4546 scsi_cmd->scsi_done = done;
4547 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4548 res = scsi_cmd->device->hostdata;
4549 scsi_cmd->result = (DID_OK << 16);
4550
4551 /*
4552 * We are currently blocking all devices due to a host reset
4553 * We have told the host to stop giving us new requests, but
4554 * ERP ops don't count. FIXME
4555 */
4556 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4557 return SCSI_MLQUEUE_HOST_BUSY;
4558
4559 /*
4560 * FIXME - Create scsi_set_host_offline interface
4561 * and the ioa_is_dead check can be removed
4562 */
4563 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4564 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4565 scsi_cmd->result = (DID_NO_CONNECT << 16);
4566 scsi_cmd->scsi_done(scsi_cmd);
4567 return 0;
4568 }
4569
35a39691
BK
4570 if (ipr_is_gata(res) && res->sata_port)
4571 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4572
1da177e4
LT
4573 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4574 ioarcb = &ipr_cmd->ioarcb;
4575 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4576
4577 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4578 ipr_cmd->scsi_cmd = scsi_cmd;
4579 ioarcb->res_handle = res->cfgte.res_handle;
4580 ipr_cmd->done = ipr_scsi_done;
4581 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4582
4583 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4584 if (scsi_cmd->underflow == 0)
4585 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4586
4587 if (res->needs_sync_complete) {
4588 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4589 res->needs_sync_complete = 0;
4590 }
4591
4592 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4593 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4594 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4595 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4596 }
4597
4598 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4599 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4600 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4601
1da177e4
LT
4602 if (likely(rc == 0))
4603 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4604
4605 if (likely(rc == 0)) {
4606 mb();
4607 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4608 ioa_cfg->regs.ioarrin_reg);
4609 } else {
4610 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4611 return SCSI_MLQUEUE_HOST_BUSY;
4612 }
4613
4614 return 0;
4615}
4616
35a39691
BK
4617/**
4618 * ipr_ioctl - IOCTL handler
4619 * @sdev: scsi device struct
4620 * @cmd: IOCTL cmd
4621 * @arg: IOCTL arg
4622 *
4623 * Return value:
4624 * 0 on success / other on failure
4625 **/
4626int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4627{
4628 struct ipr_resource_entry *res;
4629
4630 res = (struct ipr_resource_entry *)sdev->hostdata;
4631 if (res && ipr_is_gata(res))
4632 return ata_scsi_ioctl(sdev, cmd, arg);
4633
4634 return -EINVAL;
4635}
4636
1da177e4
LT
4637/**
4638 * ipr_info - Get information about the card/driver
4639 * @scsi_host: scsi host struct
4640 *
4641 * Return value:
4642 * pointer to buffer with description string
4643 **/
4644static const char * ipr_ioa_info(struct Scsi_Host *host)
4645{
4646 static char buffer[512];
4647 struct ipr_ioa_cfg *ioa_cfg;
4648 unsigned long lock_flags = 0;
4649
4650 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4651
4652 spin_lock_irqsave(host->host_lock, lock_flags);
4653 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4654 spin_unlock_irqrestore(host->host_lock, lock_flags);
4655
4656 return buffer;
4657}
4658
4659static struct scsi_host_template driver_template = {
4660 .module = THIS_MODULE,
4661 .name = "IPR",
4662 .info = ipr_ioa_info,
35a39691 4663 .ioctl = ipr_ioctl,
1da177e4
LT
4664 .queuecommand = ipr_queuecommand,
4665 .eh_abort_handler = ipr_eh_abort,
4666 .eh_device_reset_handler = ipr_eh_dev_reset,
4667 .eh_host_reset_handler = ipr_eh_host_reset,
4668 .slave_alloc = ipr_slave_alloc,
4669 .slave_configure = ipr_slave_configure,
4670 .slave_destroy = ipr_slave_destroy,
35a39691
BK
4671 .target_alloc = ipr_target_alloc,
4672 .target_destroy = ipr_target_destroy,
1da177e4
LT
4673 .change_queue_depth = ipr_change_queue_depth,
4674 .change_queue_type = ipr_change_queue_type,
4675 .bios_param = ipr_biosparam,
4676 .can_queue = IPR_MAX_COMMANDS,
4677 .this_id = -1,
4678 .sg_tablesize = IPR_MAX_SGLIST,
4679 .max_sectors = IPR_IOA_MAX_SECTORS,
4680 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4681 .use_clustering = ENABLE_CLUSTERING,
4682 .shost_attrs = ipr_ioa_attrs,
4683 .sdev_attrs = ipr_dev_attrs,
4684 .proc_name = IPR_NAME
4685};
4686
35a39691
BK
4687/**
4688 * ipr_ata_phy_reset - libata phy_reset handler
4689 * @ap: ata port to reset
4690 *
4691 **/
4692static void ipr_ata_phy_reset(struct ata_port *ap)
4693{
4694 unsigned long flags;
4695 struct ipr_sata_port *sata_port = ap->private_data;
4696 struct ipr_resource_entry *res = sata_port->res;
4697 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4698 int rc;
4699
4700 ENTER;
4701 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4702 while(ioa_cfg->in_reset_reload) {
4703 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4704 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4705 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4706 }
4707
4708 if (!ioa_cfg->allow_cmds)
4709 goto out_unlock;
4710
4711 rc = ipr_device_reset(ioa_cfg, res);
4712
4713 if (rc) {
4714 ap->ops->port_disable(ap);
4715 goto out_unlock;
4716 }
4717
4718 switch(res->cfgte.proto) {
4719 case IPR_PROTO_SATA:
4720 case IPR_PROTO_SAS_STP:
4721 ap->device[0].class = ATA_DEV_ATA;
4722 break;
4723 case IPR_PROTO_SATA_ATAPI:
4724 case IPR_PROTO_SAS_STP_ATAPI:
4725 ap->device[0].class = ATA_DEV_ATAPI;
4726 break;
4727 default:
4728 ap->device[0].class = ATA_DEV_UNKNOWN;
4729 ap->ops->port_disable(ap);
4730 break;
4731 };
4732
4733out_unlock:
4734 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4735 LEAVE;
4736}
4737
4738/**
4739 * ipr_ata_post_internal - Cleanup after an internal command
4740 * @qc: ATA queued command
4741 *
4742 * Return value:
4743 * none
4744 **/
4745static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4746{
4747 struct ipr_sata_port *sata_port = qc->ap->private_data;
4748 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4749 struct ipr_cmnd *ipr_cmd;
4750 unsigned long flags;
4751
4752 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
4753 while(ioa_cfg->in_reset_reload) {
4754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4755 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4756 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4757 }
4758
35a39691
BK
4759 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4760 if (ipr_cmd->qc == qc) {
4761 ipr_device_reset(ioa_cfg, sata_port->res);
4762 break;
4763 }
4764 }
4765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4766}
4767
4768/**
4769 * ipr_tf_read - Read the current ATA taskfile for the ATA port
4770 * @ap: ATA port
4771 * @tf: destination ATA taskfile
4772 *
4773 * Return value:
4774 * none
4775 **/
4776static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
4777{
4778 struct ipr_sata_port *sata_port = ap->private_data;
4779 struct ipr_ioasa_gata *g = &sata_port->ioasa;
4780
4781 tf->feature = g->error;
4782 tf->nsect = g->nsect;
4783 tf->lbal = g->lbal;
4784 tf->lbam = g->lbam;
4785 tf->lbah = g->lbah;
4786 tf->device = g->device;
4787 tf->command = g->status;
4788 tf->hob_nsect = g->hob_nsect;
4789 tf->hob_lbal = g->hob_lbal;
4790 tf->hob_lbam = g->hob_lbam;
4791 tf->hob_lbah = g->hob_lbah;
4792 tf->ctl = g->alt_status;
4793}
4794
4795/**
4796 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
4797 * @regs: destination
4798 * @tf: source ATA taskfile
4799 *
4800 * Return value:
4801 * none
4802 **/
4803static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
4804 struct ata_taskfile *tf)
4805{
4806 regs->feature = tf->feature;
4807 regs->nsect = tf->nsect;
4808 regs->lbal = tf->lbal;
4809 regs->lbam = tf->lbam;
4810 regs->lbah = tf->lbah;
4811 regs->device = tf->device;
4812 regs->command = tf->command;
4813 regs->hob_feature = tf->hob_feature;
4814 regs->hob_nsect = tf->hob_nsect;
4815 regs->hob_lbal = tf->hob_lbal;
4816 regs->hob_lbam = tf->hob_lbam;
4817 regs->hob_lbah = tf->hob_lbah;
4818 regs->ctl = tf->ctl;
4819}
4820
4821/**
4822 * ipr_sata_done - done function for SATA commands
4823 * @ipr_cmd: ipr command struct
4824 *
4825 * This function is invoked by the interrupt handler for
4826 * ops generated by the SCSI mid-layer to SATA devices
4827 *
4828 * Return value:
4829 * none
4830 **/
4831static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
4832{
4833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4834 struct ata_queued_cmd *qc = ipr_cmd->qc;
4835 struct ipr_sata_port *sata_port = qc->ap->private_data;
4836 struct ipr_resource_entry *res = sata_port->res;
4837 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4838
4839 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4840 sizeof(struct ipr_ioasa_gata));
4841 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4842
4843 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
4844 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
4845 res->cfgte.res_addr.target);
4846
4847 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4848 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4849 else
4850 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
4851 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4852 ata_qc_complete(qc);
4853}
4854
4855/**
4856 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
4857 * @ipr_cmd: ipr command struct
4858 * @qc: ATA queued command
4859 *
4860 **/
4861static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
4862 struct ata_queued_cmd *qc)
4863{
4864 u32 ioadl_flags = 0;
4865 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4866 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4867 int len = qc->nbytes + qc->pad_len;
4868 struct scatterlist *sg;
4869
4870 if (len == 0)
4871 return;
4872
4873 if (qc->dma_dir == DMA_TO_DEVICE) {
4874 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4875 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4876 ioarcb->write_data_transfer_length = cpu_to_be32(len);
4877 ioarcb->write_ioadl_len =
4878 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4879 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
4880 ioadl_flags = IPR_IOADL_FLAGS_READ;
4881 ioarcb->read_data_transfer_length = cpu_to_be32(len);
4882 ioarcb->read_ioadl_len =
4883 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4884 }
4885
4886 ata_for_each_sg(sg, qc) {
4887 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4888 ioadl->address = cpu_to_be32(sg_dma_address(sg));
4889 if (ata_sg_is_last(sg, qc))
4890 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4891 else
4892 ioadl++;
4893 }
4894}
4895
4896/**
4897 * ipr_qc_issue - Issue a SATA qc to a device
4898 * @qc: queued command
4899 *
4900 * Return value:
4901 * 0 if success
4902 **/
4903static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
4904{
4905 struct ata_port *ap = qc->ap;
4906 struct ipr_sata_port *sata_port = ap->private_data;
4907 struct ipr_resource_entry *res = sata_port->res;
4908 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4909 struct ipr_cmnd *ipr_cmd;
4910 struct ipr_ioarcb *ioarcb;
4911 struct ipr_ioarcb_ata_regs *regs;
4912
4913 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
4914 return -EIO;
4915
4916 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4917 ioarcb = &ipr_cmd->ioarcb;
4918 regs = &ioarcb->add_data.u.regs;
4919
4920 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
4921 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
4922
4923 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4924 ipr_cmd->qc = qc;
4925 ipr_cmd->done = ipr_sata_done;
4926 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4927 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
4928 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4930 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
4931
4932 ipr_build_ata_ioadl(ipr_cmd, qc);
4933 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4934 ipr_copy_sata_tf(regs, &qc->tf);
4935 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
4936 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4937
4938 switch (qc->tf.protocol) {
4939 case ATA_PROT_NODATA:
4940 case ATA_PROT_PIO:
4941 break;
4942
4943 case ATA_PROT_DMA:
4944 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4945 break;
4946
4947 case ATA_PROT_ATAPI:
4948 case ATA_PROT_ATAPI_NODATA:
4949 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4950 break;
4951
4952 case ATA_PROT_ATAPI_DMA:
4953 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
4954 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
4955 break;
4956
4957 default:
4958 WARN_ON(1);
4959 return -1;
4960 }
4961
4962 mb();
4963 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
4964 ioa_cfg->regs.ioarrin_reg);
4965 return 0;
4966}
4967
4968/**
4969 * ipr_ata_check_status - Return last ATA status
4970 * @ap: ATA port
4971 *
4972 * Return value:
4973 * ATA status
4974 **/
4975static u8 ipr_ata_check_status(struct ata_port *ap)
4976{
4977 struct ipr_sata_port *sata_port = ap->private_data;
4978 return sata_port->ioasa.status;
4979}
4980
4981/**
4982 * ipr_ata_check_altstatus - Return last ATA altstatus
4983 * @ap: ATA port
4984 *
4985 * Return value:
4986 * Alt ATA status
4987 **/
4988static u8 ipr_ata_check_altstatus(struct ata_port *ap)
4989{
4990 struct ipr_sata_port *sata_port = ap->private_data;
4991 return sata_port->ioasa.alt_status;
4992}
4993
4994static struct ata_port_operations ipr_sata_ops = {
4995 .port_disable = ata_port_disable,
4996 .check_status = ipr_ata_check_status,
4997 .check_altstatus = ipr_ata_check_altstatus,
4998 .dev_select = ata_noop_dev_select,
4999 .phy_reset = ipr_ata_phy_reset,
5000 .post_internal_cmd = ipr_ata_post_internal,
5001 .tf_read = ipr_tf_read,
5002 .qc_prep = ata_noop_qc_prep,
5003 .qc_issue = ipr_qc_issue,
5004 .port_start = ata_sas_port_start,
5005 .port_stop = ata_sas_port_stop
5006};
5007
5008static struct ata_port_info sata_port_info = {
5009 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5010 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5011 .pio_mask = 0x10, /* pio4 */
5012 .mwdma_mask = 0x07,
5013 .udma_mask = 0x7f, /* udma0-6 */
5014 .port_ops = &ipr_sata_ops
5015};
5016
1da177e4
LT
5017#ifdef CONFIG_PPC_PSERIES
5018static const u16 ipr_blocked_processors[] = {
5019 PV_NORTHSTAR,
5020 PV_PULSAR,
5021 PV_POWER4,
5022 PV_ICESTAR,
5023 PV_SSTAR,
5024 PV_POWER4p,
5025 PV_630,
5026 PV_630p
5027};
5028
5029/**
5030 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5031 * @ioa_cfg: ioa cfg struct
5032 *
5033 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5034 * certain pSeries hardware. This function determines if the given
5035 * adapter is in one of these confgurations or not.
5036 *
5037 * Return value:
5038 * 1 if adapter is not supported / 0 if adapter is supported
5039 **/
5040static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5041{
5042 u8 rev_id;
5043 int i;
5044
5045 if (ioa_cfg->type == 0x5702) {
5046 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5047 &rev_id) == PCIBIOS_SUCCESSFUL) {
5048 if (rev_id < 4) {
5049 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5050 if (__is_processor(ipr_blocked_processors[i]))
5051 return 1;
5052 }
5053 }
5054 }
5055 }
5056 return 0;
5057}
5058#else
5059#define ipr_invalid_adapter(ioa_cfg) 0
5060#endif
5061
5062/**
5063 * ipr_ioa_bringdown_done - IOA bring down completion.
5064 * @ipr_cmd: ipr command struct
5065 *
5066 * This function processes the completion of an adapter bring down.
5067 * It wakes any reset sleepers.
5068 *
5069 * Return value:
5070 * IPR_RC_JOB_RETURN
5071 **/
5072static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5073{
5074 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5075
5076 ENTER;
5077 ioa_cfg->in_reset_reload = 0;
5078 ioa_cfg->reset_retries = 0;
5079 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5080 wake_up_all(&ioa_cfg->reset_wait_q);
5081
5082 spin_unlock_irq(ioa_cfg->host->host_lock);
5083 scsi_unblock_requests(ioa_cfg->host);
5084 spin_lock_irq(ioa_cfg->host->host_lock);
5085 LEAVE;
5086
5087 return IPR_RC_JOB_RETURN;
5088}
5089
5090/**
5091 * ipr_ioa_reset_done - IOA reset completion.
5092 * @ipr_cmd: ipr command struct
5093 *
5094 * This function processes the completion of an adapter reset.
5095 * It schedules any necessary mid-layer add/removes and
5096 * wakes any reset sleepers.
5097 *
5098 * Return value:
5099 * IPR_RC_JOB_RETURN
5100 **/
5101static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5102{
5103 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5104 struct ipr_resource_entry *res;
5105 struct ipr_hostrcb *hostrcb, *temp;
5106 int i = 0;
5107
5108 ENTER;
5109 ioa_cfg->in_reset_reload = 0;
5110 ioa_cfg->allow_cmds = 1;
5111 ioa_cfg->reset_cmd = NULL;
3d1d0da6 5112 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
5113
5114 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5115 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5116 ipr_trace;
5117 break;
5118 }
5119 }
5120 schedule_work(&ioa_cfg->work_q);
5121
5122 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5123 list_del(&hostrcb->queue);
5124 if (i++ < IPR_NUM_LOG_HCAMS)
5125 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5126 else
5127 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5128 }
5129
5130 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5131
5132 ioa_cfg->reset_retries = 0;
5133 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5134 wake_up_all(&ioa_cfg->reset_wait_q);
5135
5136 spin_unlock_irq(ioa_cfg->host->host_lock);
5137 scsi_unblock_requests(ioa_cfg->host);
5138 spin_lock_irq(ioa_cfg->host->host_lock);
5139
5140 if (!ioa_cfg->allow_cmds)
5141 scsi_block_requests(ioa_cfg->host);
5142
5143 LEAVE;
5144 return IPR_RC_JOB_RETURN;
5145}
5146
5147/**
5148 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5149 * @supported_dev: supported device struct
5150 * @vpids: vendor product id struct
5151 *
5152 * Return value:
5153 * none
5154 **/
5155static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5156 struct ipr_std_inq_vpids *vpids)
5157{
5158 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5159 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5160 supported_dev->num_records = 1;
5161 supported_dev->data_length =
5162 cpu_to_be16(sizeof(struct ipr_supported_device));
5163 supported_dev->reserved = 0;
5164}
5165
5166/**
5167 * ipr_set_supported_devs - Send Set Supported Devices for a device
5168 * @ipr_cmd: ipr command struct
5169 *
5170 * This function send a Set Supported Devices to the adapter
5171 *
5172 * Return value:
5173 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5174 **/
5175static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5176{
5177 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5178 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5179 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5180 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5181 struct ipr_resource_entry *res = ipr_cmd->u.res;
5182
5183 ipr_cmd->job_step = ipr_ioa_reset_done;
5184
5185 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 5186 if (!ipr_is_scsi_disk(res))
1da177e4
LT
5187 continue;
5188
5189 ipr_cmd->u.res = res;
5190 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5191
5192 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5193 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5194 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5195
5196 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5197 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5198 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5199
5200 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5201 sizeof(struct ipr_supported_device));
5202 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5203 offsetof(struct ipr_misc_cbs, supp_dev));
5204 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5205 ioarcb->write_data_transfer_length =
5206 cpu_to_be32(sizeof(struct ipr_supported_device));
5207
5208 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5209 IPR_SET_SUP_DEVICE_TIMEOUT);
5210
5211 ipr_cmd->job_step = ipr_set_supported_devs;
5212 return IPR_RC_JOB_RETURN;
5213 }
5214
5215 return IPR_RC_JOB_CONTINUE;
5216}
5217
62275040
BK
5218/**
5219 * ipr_setup_write_cache - Disable write cache if needed
5220 * @ipr_cmd: ipr command struct
5221 *
5222 * This function sets up adapters write cache to desired setting
5223 *
5224 * Return value:
5225 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5226 **/
5227static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5228{
5229 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5230
5231 ipr_cmd->job_step = ipr_set_supported_devs;
5232 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5233 struct ipr_resource_entry, queue);
5234
5235 if (ioa_cfg->cache_state != CACHE_DISABLED)
5236 return IPR_RC_JOB_CONTINUE;
5237
5238 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5239 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5240 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5241 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5242
5243 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5244
5245 return IPR_RC_JOB_RETURN;
5246}
5247
1da177e4
LT
5248/**
5249 * ipr_get_mode_page - Locate specified mode page
5250 * @mode_pages: mode page buffer
5251 * @page_code: page code to find
5252 * @len: minimum required length for mode page
5253 *
5254 * Return value:
5255 * pointer to mode page / NULL on failure
5256 **/
5257static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5258 u32 page_code, u32 len)
5259{
5260 struct ipr_mode_page_hdr *mode_hdr;
5261 u32 page_length;
5262 u32 length;
5263
5264 if (!mode_pages || (mode_pages->hdr.length == 0))
5265 return NULL;
5266
5267 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5268 mode_hdr = (struct ipr_mode_page_hdr *)
5269 (mode_pages->data + mode_pages->hdr.block_desc_len);
5270
5271 while (length) {
5272 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5273 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5274 return mode_hdr;
5275 break;
5276 } else {
5277 page_length = (sizeof(struct ipr_mode_page_hdr) +
5278 mode_hdr->page_length);
5279 length -= page_length;
5280 mode_hdr = (struct ipr_mode_page_hdr *)
5281 ((unsigned long)mode_hdr + page_length);
5282 }
5283 }
5284 return NULL;
5285}
5286
5287/**
5288 * ipr_check_term_power - Check for term power errors
5289 * @ioa_cfg: ioa config struct
5290 * @mode_pages: IOAFP mode pages buffer
5291 *
5292 * Check the IOAFP's mode page 28 for term power errors
5293 *
5294 * Return value:
5295 * nothing
5296 **/
5297static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5298 struct ipr_mode_pages *mode_pages)
5299{
5300 int i;
5301 int entry_length;
5302 struct ipr_dev_bus_entry *bus;
5303 struct ipr_mode_page28 *mode_page;
5304
5305 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5306 sizeof(struct ipr_mode_page28));
5307
5308 entry_length = mode_page->entry_length;
5309
5310 bus = mode_page->bus;
5311
5312 for (i = 0; i < mode_page->num_entries; i++) {
5313 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5314 dev_err(&ioa_cfg->pdev->dev,
5315 "Term power is absent on scsi bus %d\n",
5316 bus->res_addr.bus);
5317 }
5318
5319 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5320 }
5321}
5322
5323/**
5324 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5325 * @ioa_cfg: ioa config struct
5326 *
5327 * Looks through the config table checking for SES devices. If
5328 * the SES device is in the SES table indicating a maximum SCSI
5329 * bus speed, the speed is limited for the bus.
5330 *
5331 * Return value:
5332 * none
5333 **/
5334static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5335{
5336 u32 max_xfer_rate;
5337 int i;
5338
5339 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5340 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5341 ioa_cfg->bus_attr[i].bus_width);
5342
5343 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5344 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5345 }
5346}
5347
5348/**
5349 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5350 * @ioa_cfg: ioa config struct
5351 * @mode_pages: mode page 28 buffer
5352 *
5353 * Updates mode page 28 based on driver configuration
5354 *
5355 * Return value:
5356 * none
5357 **/
5358static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5359 struct ipr_mode_pages *mode_pages)
5360{
5361 int i, entry_length;
5362 struct ipr_dev_bus_entry *bus;
5363 struct ipr_bus_attributes *bus_attr;
5364 struct ipr_mode_page28 *mode_page;
5365
5366 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5367 sizeof(struct ipr_mode_page28));
5368
5369 entry_length = mode_page->entry_length;
5370
5371 /* Loop for each device bus entry */
5372 for (i = 0, bus = mode_page->bus;
5373 i < mode_page->num_entries;
5374 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5375 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5376 dev_err(&ioa_cfg->pdev->dev,
5377 "Invalid resource address reported: 0x%08X\n",
5378 IPR_GET_PHYS_LOC(bus->res_addr));
5379 continue;
5380 }
5381
5382 bus_attr = &ioa_cfg->bus_attr[i];
5383 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5384 bus->bus_width = bus_attr->bus_width;
5385 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5386 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5387 if (bus_attr->qas_enabled)
5388 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5389 else
5390 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5391 }
5392}
5393
5394/**
5395 * ipr_build_mode_select - Build a mode select command
5396 * @ipr_cmd: ipr command struct
5397 * @res_handle: resource handle to send command to
5398 * @parm: Byte 2 of Mode Sense command
5399 * @dma_addr: DMA buffer address
5400 * @xfer_len: data transfer length
5401 *
5402 * Return value:
5403 * none
5404 **/
5405static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5406 __be32 res_handle, u8 parm, u32 dma_addr,
5407 u8 xfer_len)
5408{
5409 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5410 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5411
5412 ioarcb->res_handle = res_handle;
5413 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5414 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5415 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5416 ioarcb->cmd_pkt.cdb[1] = parm;
5417 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5418
5419 ioadl->flags_and_data_len =
5420 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5421 ioadl->address = cpu_to_be32(dma_addr);
5422 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5423 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5424}
5425
5426/**
5427 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5428 * @ipr_cmd: ipr command struct
5429 *
5430 * This function sets up the SCSI bus attributes and sends
5431 * a Mode Select for Page 28 to activate them.
5432 *
5433 * Return value:
5434 * IPR_RC_JOB_RETURN
5435 **/
5436static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5437{
5438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5439 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5440 int length;
5441
5442 ENTER;
4733804c
BK
5443 ipr_scsi_bus_speed_limit(ioa_cfg);
5444 ipr_check_term_power(ioa_cfg, mode_pages);
5445 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5446 length = mode_pages->hdr.length + 1;
5447 mode_pages->hdr.length = 0;
1da177e4
LT
5448
5449 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5450 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5451 length);
5452
62275040 5453 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
5454 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5455
5456 LEAVE;
5457 return IPR_RC_JOB_RETURN;
5458}
5459
5460/**
5461 * ipr_build_mode_sense - Builds a mode sense command
5462 * @ipr_cmd: ipr command struct
5463 * @res: resource entry struct
5464 * @parm: Byte 2 of mode sense command
5465 * @dma_addr: DMA address of mode sense buffer
5466 * @xfer_len: Size of DMA buffer
5467 *
5468 * Return value:
5469 * none
5470 **/
5471static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5472 __be32 res_handle,
5473 u8 parm, u32 dma_addr, u8 xfer_len)
5474{
5475 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5476 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5477
5478 ioarcb->res_handle = res_handle;
5479 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5480 ioarcb->cmd_pkt.cdb[2] = parm;
5481 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5482 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5483
5484 ioadl->flags_and_data_len =
5485 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5486 ioadl->address = cpu_to_be32(dma_addr);
5487 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5488 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5489}
5490
dfed823e
BK
5491/**
5492 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5493 * @ipr_cmd: ipr command struct
5494 *
5495 * This function handles the failure of an IOA bringup command.
5496 *
5497 * Return value:
5498 * IPR_RC_JOB_RETURN
5499 **/
5500static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5501{
5502 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5503 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5504
5505 dev_err(&ioa_cfg->pdev->dev,
5506 "0x%02X failed with IOASC: 0x%08X\n",
5507 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5508
5509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5510 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5511 return IPR_RC_JOB_RETURN;
5512}
5513
5514/**
5515 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5516 * @ipr_cmd: ipr command struct
5517 *
5518 * This function handles the failure of a Mode Sense to the IOAFP.
5519 * Some adapters do not handle all mode pages.
5520 *
5521 * Return value:
5522 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5523 **/
5524static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5525{
5526 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5527
5528 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5529 ipr_cmd->job_step = ipr_setup_write_cache;
5530 return IPR_RC_JOB_CONTINUE;
5531 }
5532
5533 return ipr_reset_cmd_failed(ipr_cmd);
5534}
5535
1da177e4
LT
5536/**
5537 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5538 * @ipr_cmd: ipr command struct
5539 *
5540 * This function send a Page 28 mode sense to the IOA to
5541 * retrieve SCSI bus attributes.
5542 *
5543 * Return value:
5544 * IPR_RC_JOB_RETURN
5545 **/
5546static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5547{
5548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5549
5550 ENTER;
5551 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5552 0x28, ioa_cfg->vpd_cbs_dma +
5553 offsetof(struct ipr_misc_cbs, mode_pages),
5554 sizeof(struct ipr_mode_pages));
5555
5556 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 5557 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
5558
5559 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5560
5561 LEAVE;
5562 return IPR_RC_JOB_RETURN;
5563}
5564
5565/**
5566 * ipr_init_res_table - Initialize the resource table
5567 * @ipr_cmd: ipr command struct
5568 *
5569 * This function looks through the existing resource table, comparing
5570 * it with the config table. This function will take care of old/new
5571 * devices and schedule adding/removing them from the mid-layer
5572 * as appropriate.
5573 *
5574 * Return value:
5575 * IPR_RC_JOB_CONTINUE
5576 **/
5577static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5578{
5579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5580 struct ipr_resource_entry *res, *temp;
5581 struct ipr_config_table_entry *cfgte;
5582 int found, i;
5583 LIST_HEAD(old_res);
5584
5585 ENTER;
5586 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5587 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5588
5589 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5590 list_move_tail(&res->queue, &old_res);
5591
5592 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5593 cfgte = &ioa_cfg->cfg_table->dev[i];
5594 found = 0;
5595
5596 list_for_each_entry_safe(res, temp, &old_res, queue) {
5597 if (!memcmp(&res->cfgte.res_addr,
5598 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5599 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5600 found = 1;
5601 break;
5602 }
5603 }
5604
5605 if (!found) {
5606 if (list_empty(&ioa_cfg->free_res_q)) {
5607 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5608 break;
5609 }
5610
5611 found = 1;
5612 res = list_entry(ioa_cfg->free_res_q.next,
5613 struct ipr_resource_entry, queue);
5614 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5615 ipr_init_res_entry(res);
5616 res->add_to_ml = 1;
5617 }
5618
5619 if (found)
5620 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5621 }
5622
5623 list_for_each_entry_safe(res, temp, &old_res, queue) {
5624 if (res->sdev) {
5625 res->del_from_ml = 1;
1121b794 5626 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
5627 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5628 } else {
5629 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5630 }
5631 }
5632
5633 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5634
5635 LEAVE;
5636 return IPR_RC_JOB_CONTINUE;
5637}
5638
5639/**
5640 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5641 * @ipr_cmd: ipr command struct
5642 *
5643 * This function sends a Query IOA Configuration command
5644 * to the adapter to retrieve the IOA configuration table.
5645 *
5646 * Return value:
5647 * IPR_RC_JOB_RETURN
5648 **/
5649static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5650{
5651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5652 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5653 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5654 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5655
5656 ENTER;
5657 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5658 ucode_vpd->major_release, ucode_vpd->card_type,
5659 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5660 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5661 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5662
5663 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5664 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5665 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5666
5667 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5668 ioarcb->read_data_transfer_length =
5669 cpu_to_be32(sizeof(struct ipr_config_table));
5670
5671 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5672 ioadl->flags_and_data_len =
5673 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5674
5675 ipr_cmd->job_step = ipr_init_res_table;
5676
5677 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5678
5679 LEAVE;
5680 return IPR_RC_JOB_RETURN;
5681}
5682
5683/**
5684 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5685 * @ipr_cmd: ipr command struct
5686 *
5687 * This utility function sends an inquiry to the adapter.
5688 *
5689 * Return value:
5690 * none
5691 **/
5692static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5693 u32 dma_addr, u8 xfer_len)
5694{
5695 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5696 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5697
5698 ENTER;
5699 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5700 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5701
5702 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5703 ioarcb->cmd_pkt.cdb[1] = flags;
5704 ioarcb->cmd_pkt.cdb[2] = page;
5705 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5706
5707 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5708 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5709
5710 ioadl->address = cpu_to_be32(dma_addr);
5711 ioadl->flags_and_data_len =
5712 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5713
5714 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5715 LEAVE;
5716}
5717
62275040
BK
5718/**
5719 * ipr_inquiry_page_supported - Is the given inquiry page supported
5720 * @page0: inquiry page 0 buffer
5721 * @page: page code.
5722 *
5723 * This function determines if the specified inquiry page is supported.
5724 *
5725 * Return value:
5726 * 1 if page is supported / 0 if not
5727 **/
5728static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5729{
5730 int i;
5731
5732 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5733 if (page0->page[i] == page)
5734 return 1;
5735
5736 return 0;
5737}
5738
1da177e4
LT
5739/**
5740 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5741 * @ipr_cmd: ipr command struct
5742 *
5743 * This function sends a Page 3 inquiry to the adapter
5744 * to retrieve software VPD information.
5745 *
5746 * Return value:
5747 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5748 **/
5749static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
5750{
5751 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5752 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5753
5754 ENTER;
5755
5756 if (!ipr_inquiry_page_supported(page0, 1))
5757 ioa_cfg->cache_state = CACHE_NONE;
5758
5759 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5760
5761 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5762 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5763 sizeof(struct ipr_inquiry_page3));
5764
5765 LEAVE;
5766 return IPR_RC_JOB_RETURN;
5767}
5768
5769/**
5770 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5771 * @ipr_cmd: ipr command struct
5772 *
5773 * This function sends a Page 0 inquiry to the adapter
5774 * to retrieve supported inquiry pages.
5775 *
5776 * Return value:
5777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5778 **/
5779static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
5780{
5781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5782 char type[5];
5783
5784 ENTER;
5785
5786 /* Grab the type out of the VPD and store it away */
5787 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5788 type[4] = '\0';
5789 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5790
62275040 5791 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 5792
62275040
BK
5793 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5794 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5795 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
5796
5797 LEAVE;
5798 return IPR_RC_JOB_RETURN;
5799}
5800
5801/**
5802 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5803 * @ipr_cmd: ipr command struct
5804 *
5805 * This function sends a standard inquiry to the adapter.
5806 *
5807 * Return value:
5808 * IPR_RC_JOB_RETURN
5809 **/
5810static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5811{
5812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5813
5814 ENTER;
62275040 5815 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
5816
5817 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5818 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5819 sizeof(struct ipr_ioa_vpd));
5820
5821 LEAVE;
5822 return IPR_RC_JOB_RETURN;
5823}
5824
5825/**
5826 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5827 * @ipr_cmd: ipr command struct
5828 *
5829 * This function send an Identify Host Request Response Queue
5830 * command to establish the HRRQ with the adapter.
5831 *
5832 * Return value:
5833 * IPR_RC_JOB_RETURN
5834 **/
5835static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5836{
5837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5838 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5839
5840 ENTER;
5841 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5842
5843 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5844 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5845
5846 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5847 ioarcb->cmd_pkt.cdb[2] =
5848 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5849 ioarcb->cmd_pkt.cdb[3] =
5850 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5851 ioarcb->cmd_pkt.cdb[4] =
5852 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5853 ioarcb->cmd_pkt.cdb[5] =
5854 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5855 ioarcb->cmd_pkt.cdb[7] =
5856 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5857 ioarcb->cmd_pkt.cdb[8] =
5858 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5859
5860 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5861
5862 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5863
5864 LEAVE;
5865 return IPR_RC_JOB_RETURN;
5866}
5867
5868/**
5869 * ipr_reset_timer_done - Adapter reset timer function
5870 * @ipr_cmd: ipr command struct
5871 *
5872 * Description: This function is used in adapter reset processing
5873 * for timing events. If the reset_cmd pointer in the IOA
5874 * config struct is not this adapter's we are doing nested
5875 * resets and fail_all_ops will take care of freeing the
5876 * command block.
5877 *
5878 * Return value:
5879 * none
5880 **/
5881static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5882{
5883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5884 unsigned long lock_flags = 0;
5885
5886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5887
5888 if (ioa_cfg->reset_cmd == ipr_cmd) {
5889 list_del(&ipr_cmd->queue);
5890 ipr_cmd->done(ipr_cmd);
5891 }
5892
5893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5894}
5895
5896/**
5897 * ipr_reset_start_timer - Start a timer for adapter reset job
5898 * @ipr_cmd: ipr command struct
5899 * @timeout: timeout value
5900 *
5901 * Description: This function is used in adapter reset processing
5902 * for timing events. If the reset_cmd pointer in the IOA
5903 * config struct is not this adapter's we are doing nested
5904 * resets and fail_all_ops will take care of freeing the
5905 * command block.
5906 *
5907 * Return value:
5908 * none
5909 **/
5910static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5911 unsigned long timeout)
5912{
5913 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5914 ipr_cmd->done = ipr_reset_ioa_job;
5915
5916 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5917 ipr_cmd->timer.expires = jiffies + timeout;
5918 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5919 add_timer(&ipr_cmd->timer);
5920}
5921
5922/**
5923 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5924 * @ioa_cfg: ioa cfg struct
5925 *
5926 * Return value:
5927 * nothing
5928 **/
5929static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5930{
5931 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5932
5933 /* Initialize Host RRQ pointers */
5934 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5935 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5936 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5937 ioa_cfg->toggle_bit = 1;
5938
5939 /* Zero out config table */
5940 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5941}
5942
5943/**
5944 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5945 * @ipr_cmd: ipr command struct
5946 *
5947 * This function reinitializes some control blocks and
5948 * enables destructive diagnostics on the adapter.
5949 *
5950 * Return value:
5951 * IPR_RC_JOB_RETURN
5952 **/
5953static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5954{
5955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5956 volatile u32 int_reg;
5957
5958 ENTER;
5959 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5960 ipr_init_ioa_mem(ioa_cfg);
5961
5962 ioa_cfg->allow_interrupts = 1;
5963 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5964
5965 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5966 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5967 ioa_cfg->regs.clr_interrupt_mask_reg);
5968 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5969 return IPR_RC_JOB_CONTINUE;
5970 }
5971
5972 /* Enable destructive diagnostics on IOA */
3d1d0da6 5973 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
5974
5975 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5976 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5977
5978 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5979
5980 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5981 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5982 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5983 ipr_cmd->done = ipr_reset_ioa_job;
5984 add_timer(&ipr_cmd->timer);
5985 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5986
5987 LEAVE;
5988 return IPR_RC_JOB_RETURN;
5989}
5990
5991/**
5992 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5993 * @ipr_cmd: ipr command struct
5994 *
5995 * This function is invoked when an adapter dump has run out
5996 * of processing time.
5997 *
5998 * Return value:
5999 * IPR_RC_JOB_CONTINUE
6000 **/
6001static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6002{
6003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6004
6005 if (ioa_cfg->sdt_state == GET_DUMP)
6006 ioa_cfg->sdt_state = ABORT_DUMP;
6007
6008 ipr_cmd->job_step = ipr_reset_alert;
6009
6010 return IPR_RC_JOB_CONTINUE;
6011}
6012
6013/**
6014 * ipr_unit_check_no_data - Log a unit check/no data error log
6015 * @ioa_cfg: ioa config struct
6016 *
6017 * Logs an error indicating the adapter unit checked, but for some
6018 * reason, we were unable to fetch the unit check buffer.
6019 *
6020 * Return value:
6021 * nothing
6022 **/
6023static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6024{
6025 ioa_cfg->errors_logged++;
6026 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6027}
6028
6029/**
6030 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6031 * @ioa_cfg: ioa config struct
6032 *
6033 * Fetches the unit check buffer from the adapter by clocking the data
6034 * through the mailbox register.
6035 *
6036 * Return value:
6037 * nothing
6038 **/
6039static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6040{
6041 unsigned long mailbox;
6042 struct ipr_hostrcb *hostrcb;
6043 struct ipr_uc_sdt sdt;
6044 int rc, length;
6045
6046 mailbox = readl(ioa_cfg->ioa_mailbox);
6047
6048 if (!ipr_sdt_is_fmt2(mailbox)) {
6049 ipr_unit_check_no_data(ioa_cfg);
6050 return;
6051 }
6052
6053 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6054 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6055 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6056
6057 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6058 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6059 ipr_unit_check_no_data(ioa_cfg);
6060 return;
6061 }
6062
6063 /* Find length of the first sdt entry (UC buffer) */
6064 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6065 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6066
6067 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6068 struct ipr_hostrcb, queue);
6069 list_del(&hostrcb->queue);
6070 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6071
6072 rc = ipr_get_ldump_data_section(ioa_cfg,
6073 be32_to_cpu(sdt.entry[0].bar_str_offset),
6074 (__be32 *)&hostrcb->hcam,
6075 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6076
6077 if (!rc)
6078 ipr_handle_log_data(ioa_cfg, hostrcb);
6079 else
6080 ipr_unit_check_no_data(ioa_cfg);
6081
6082 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6083}
6084
6085/**
6086 * ipr_reset_restore_cfg_space - Restore PCI config space.
6087 * @ipr_cmd: ipr command struct
6088 *
6089 * Description: This function restores the saved PCI config space of
6090 * the adapter, fails all outstanding ops back to the callers, and
6091 * fetches the dump/unit check if applicable to this reset.
6092 *
6093 * Return value:
6094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6095 **/
6096static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6097{
6098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6099 int rc;
6100
6101 ENTER;
b30197d2 6102 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
6103 rc = pci_restore_state(ioa_cfg->pdev);
6104
6105 if (rc != PCIBIOS_SUCCESSFUL) {
6106 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6107 return IPR_RC_JOB_CONTINUE;
6108 }
6109
6110 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6111 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6112 return IPR_RC_JOB_CONTINUE;
6113 }
6114
6115 ipr_fail_all_ops(ioa_cfg);
6116
6117 if (ioa_cfg->ioa_unit_checked) {
6118 ioa_cfg->ioa_unit_checked = 0;
6119 ipr_get_unit_check_buffer(ioa_cfg);
6120 ipr_cmd->job_step = ipr_reset_alert;
6121 ipr_reset_start_timer(ipr_cmd, 0);
6122 return IPR_RC_JOB_RETURN;
6123 }
6124
6125 if (ioa_cfg->in_ioa_bringdown) {
6126 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6127 } else {
6128 ipr_cmd->job_step = ipr_reset_enable_ioa;
6129
6130 if (GET_DUMP == ioa_cfg->sdt_state) {
6131 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6132 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6133 schedule_work(&ioa_cfg->work_q);
6134 return IPR_RC_JOB_RETURN;
6135 }
6136 }
6137
6138 ENTER;
6139 return IPR_RC_JOB_CONTINUE;
6140}
6141
6142/**
6143 * ipr_reset_start_bist - Run BIST on the adapter.
6144 * @ipr_cmd: ipr command struct
6145 *
6146 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6147 *
6148 * Return value:
6149 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6150 **/
6151static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6152{
6153 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6154 int rc;
6155
6156 ENTER;
b30197d2 6157 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
6158 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6159
6160 if (rc != PCIBIOS_SUCCESSFUL) {
6161 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6162 rc = IPR_RC_JOB_CONTINUE;
6163 } else {
6164 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6165 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6166 rc = IPR_RC_JOB_RETURN;
6167 }
6168
6169 LEAVE;
6170 return rc;
6171}
6172
6173/**
6174 * ipr_reset_allowed - Query whether or not IOA can be reset
6175 * @ioa_cfg: ioa config struct
6176 *
6177 * Return value:
6178 * 0 if reset not allowed / non-zero if reset is allowed
6179 **/
6180static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6181{
6182 volatile u32 temp_reg;
6183
6184 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6185 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6186}
6187
6188/**
6189 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6190 * @ipr_cmd: ipr command struct
6191 *
6192 * Description: This function waits for adapter permission to run BIST,
6193 * then runs BIST. If the adapter does not give permission after a
6194 * reasonable time, we will reset the adapter anyway. The impact of
6195 * resetting the adapter without warning the adapter is the risk of
6196 * losing the persistent error log on the adapter. If the adapter is
6197 * reset while it is writing to the flash on the adapter, the flash
6198 * segment will have bad ECC and be zeroed.
6199 *
6200 * Return value:
6201 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6202 **/
6203static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6204{
6205 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6206 int rc = IPR_RC_JOB_RETURN;
6207
6208 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6209 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6210 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6211 } else {
6212 ipr_cmd->job_step = ipr_reset_start_bist;
6213 rc = IPR_RC_JOB_CONTINUE;
6214 }
6215
6216 return rc;
6217}
6218
6219/**
6220 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6221 * @ipr_cmd: ipr command struct
6222 *
6223 * Description: This function alerts the adapter that it will be reset.
6224 * If memory space is not currently enabled, proceed directly
6225 * to running BIST on the adapter. The timer must always be started
6226 * so we guarantee we do not run BIST from ipr_isr.
6227 *
6228 * Return value:
6229 * IPR_RC_JOB_RETURN
6230 **/
6231static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6232{
6233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6234 u16 cmd_reg;
6235 int rc;
6236
6237 ENTER;
6238 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6239
6240 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6241 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6242 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6243 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6244 } else {
6245 ipr_cmd->job_step = ipr_reset_start_bist;
6246 }
6247
6248 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6249 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6250
6251 LEAVE;
6252 return IPR_RC_JOB_RETURN;
6253}
6254
6255/**
6256 * ipr_reset_ucode_download_done - Microcode download completion
6257 * @ipr_cmd: ipr command struct
6258 *
6259 * Description: This function unmaps the microcode download buffer.
6260 *
6261 * Return value:
6262 * IPR_RC_JOB_CONTINUE
6263 **/
6264static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6265{
6266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6267 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6268
6269 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6270 sglist->num_sg, DMA_TO_DEVICE);
6271
6272 ipr_cmd->job_step = ipr_reset_alert;
6273 return IPR_RC_JOB_CONTINUE;
6274}
6275
6276/**
6277 * ipr_reset_ucode_download - Download microcode to the adapter
6278 * @ipr_cmd: ipr command struct
6279 *
6280 * Description: This function checks to see if it there is microcode
6281 * to download to the adapter. If there is, a download is performed.
6282 *
6283 * Return value:
6284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6285 **/
6286static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6287{
6288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6289 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6290
6291 ENTER;
6292 ipr_cmd->job_step = ipr_reset_alert;
6293
6294 if (!sglist)
6295 return IPR_RC_JOB_CONTINUE;
6296
6297 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6298 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6299 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6300 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6301 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6302 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6303 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6304
12baa420 6305 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
6306 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6307
6308 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6309 IPR_WRITE_BUFFER_TIMEOUT);
6310
6311 LEAVE;
6312 return IPR_RC_JOB_RETURN;
6313}
6314
6315/**
6316 * ipr_reset_shutdown_ioa - Shutdown the adapter
6317 * @ipr_cmd: ipr command struct
6318 *
6319 * Description: This function issues an adapter shutdown of the
6320 * specified type to the specified adapter as part of the
6321 * adapter reset job.
6322 *
6323 * Return value:
6324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6325 **/
6326static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6327{
6328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6329 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6330 unsigned long timeout;
6331 int rc = IPR_RC_JOB_CONTINUE;
6332
6333 ENTER;
6334 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6335 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6336 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6337 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6338 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6339
6340 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6341 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6342 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6343 timeout = IPR_INTERNAL_TIMEOUT;
6344 else
6345 timeout = IPR_SHUTDOWN_TIMEOUT;
6346
6347 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6348
6349 rc = IPR_RC_JOB_RETURN;
6350 ipr_cmd->job_step = ipr_reset_ucode_download;
6351 } else
6352 ipr_cmd->job_step = ipr_reset_alert;
6353
6354 LEAVE;
6355 return rc;
6356}
6357
6358/**
6359 * ipr_reset_ioa_job - Adapter reset job
6360 * @ipr_cmd: ipr command struct
6361 *
6362 * Description: This function is the job router for the adapter reset job.
6363 *
6364 * Return value:
6365 * none
6366 **/
6367static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6368{
6369 u32 rc, ioasc;
1da177e4
LT
6370 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6371
6372 do {
6373 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6374
6375 if (ioa_cfg->reset_cmd != ipr_cmd) {
6376 /*
6377 * We are doing nested adapter resets and this is
6378 * not the current reset job.
6379 */
6380 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6381 return;
6382 }
6383
6384 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
6385 rc = ipr_cmd->job_step_failed(ipr_cmd);
6386 if (rc == IPR_RC_JOB_RETURN)
6387 return;
1da177e4
LT
6388 }
6389
6390 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 6391 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
6392 rc = ipr_cmd->job_step(ipr_cmd);
6393 } while(rc == IPR_RC_JOB_CONTINUE);
6394}
6395
6396/**
6397 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6398 * @ioa_cfg: ioa config struct
6399 * @job_step: first job step of reset job
6400 * @shutdown_type: shutdown type
6401 *
6402 * Description: This function will initiate the reset of the given adapter
6403 * starting at the selected job step.
6404 * If the caller needs to wait on the completion of the reset,
6405 * the caller must sleep on the reset_wait_q.
6406 *
6407 * Return value:
6408 * none
6409 **/
6410static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6411 int (*job_step) (struct ipr_cmnd *),
6412 enum ipr_shutdown_type shutdown_type)
6413{
6414 struct ipr_cmnd *ipr_cmd;
6415
6416 ioa_cfg->in_reset_reload = 1;
6417 ioa_cfg->allow_cmds = 0;
6418 scsi_block_requests(ioa_cfg->host);
6419
6420 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6421 ioa_cfg->reset_cmd = ipr_cmd;
6422 ipr_cmd->job_step = job_step;
6423 ipr_cmd->u.shutdown_type = shutdown_type;
6424
6425 ipr_reset_ioa_job(ipr_cmd);
6426}
6427
6428/**
6429 * ipr_initiate_ioa_reset - Initiate an adapter reset
6430 * @ioa_cfg: ioa config struct
6431 * @shutdown_type: shutdown type
6432 *
6433 * Description: This function will initiate the reset of the given adapter.
6434 * If the caller needs to wait on the completion of the reset,
6435 * the caller must sleep on the reset_wait_q.
6436 *
6437 * Return value:
6438 * none
6439 **/
6440static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6441 enum ipr_shutdown_type shutdown_type)
6442{
6443 if (ioa_cfg->ioa_is_dead)
6444 return;
6445
6446 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6447 ioa_cfg->sdt_state = ABORT_DUMP;
6448
6449 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6450 dev_err(&ioa_cfg->pdev->dev,
6451 "IOA taken offline - error recovery failed\n");
6452
6453 ioa_cfg->reset_retries = 0;
6454 ioa_cfg->ioa_is_dead = 1;
6455
6456 if (ioa_cfg->in_ioa_bringdown) {
6457 ioa_cfg->reset_cmd = NULL;
6458 ioa_cfg->in_reset_reload = 0;
6459 ipr_fail_all_ops(ioa_cfg);
6460 wake_up_all(&ioa_cfg->reset_wait_q);
6461
6462 spin_unlock_irq(ioa_cfg->host->host_lock);
6463 scsi_unblock_requests(ioa_cfg->host);
6464 spin_lock_irq(ioa_cfg->host->host_lock);
6465 return;
6466 } else {
6467 ioa_cfg->in_ioa_bringdown = 1;
6468 shutdown_type = IPR_SHUTDOWN_NONE;
6469 }
6470 }
6471
6472 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6473 shutdown_type);
6474}
6475
f8a88b19
LV
6476/**
6477 * ipr_reset_freeze - Hold off all I/O activity
6478 * @ipr_cmd: ipr command struct
6479 *
6480 * Description: If the PCI slot is frozen, hold off all I/O
6481 * activity; then, as soon as the slot is available again,
6482 * initiate an adapter reset.
6483 */
6484static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6485{
6486 /* Disallow new interrupts, avoid loop */
6487 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6488 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6489 ipr_cmd->done = ipr_reset_ioa_job;
6490 return IPR_RC_JOB_RETURN;
6491}
6492
6493/**
6494 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6495 * @pdev: PCI device struct
6496 *
6497 * Description: This routine is called to tell us that the PCI bus
6498 * is down. Can't do anything here, except put the device driver
6499 * into a holding pattern, waiting for the PCI bus to come back.
6500 */
6501static void ipr_pci_frozen(struct pci_dev *pdev)
6502{
6503 unsigned long flags = 0;
6504 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6505
6506 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6507 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6508 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6509}
6510
6511/**
6512 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6513 * @pdev: PCI device struct
6514 *
6515 * Description: This routine is called by the pci error recovery
6516 * code after the PCI slot has been reset, just before we
6517 * should resume normal operations.
6518 */
6519static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6520{
6521 unsigned long flags = 0;
6522 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6523
6524 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6525 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6526 IPR_SHUTDOWN_NONE);
6527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6528 return PCI_ERS_RESULT_RECOVERED;
6529}
6530
6531/**
6532 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6533 * @pdev: PCI device struct
6534 *
6535 * Description: This routine is called when the PCI bus has
6536 * permanently failed.
6537 */
6538static void ipr_pci_perm_failure(struct pci_dev *pdev)
6539{
6540 unsigned long flags = 0;
6541 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6542
6543 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6544 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6545 ioa_cfg->sdt_state = ABORT_DUMP;
6546 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6547 ioa_cfg->in_ioa_bringdown = 1;
6548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6550}
6551
6552/**
6553 * ipr_pci_error_detected - Called when a PCI error is detected.
6554 * @pdev: PCI device struct
6555 * @state: PCI channel state
6556 *
6557 * Description: Called when a PCI error is detected.
6558 *
6559 * Return value:
6560 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6561 */
6562static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6563 pci_channel_state_t state)
6564{
6565 switch (state) {
6566 case pci_channel_io_frozen:
6567 ipr_pci_frozen(pdev);
6568 return PCI_ERS_RESULT_NEED_RESET;
6569 case pci_channel_io_perm_failure:
6570 ipr_pci_perm_failure(pdev);
6571 return PCI_ERS_RESULT_DISCONNECT;
6572 break;
6573 default:
6574 break;
6575 }
6576 return PCI_ERS_RESULT_NEED_RESET;
6577}
6578
1da177e4
LT
6579/**
6580 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6581 * @ioa_cfg: ioa cfg struct
6582 *
6583 * Description: This is the second phase of adapter intialization
6584 * This function takes care of initilizing the adapter to the point
6585 * where it can accept new commands.
6586
6587 * Return value:
6588 * 0 on sucess / -EIO on failure
6589 **/
6590static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6591{
6592 int rc = 0;
6593 unsigned long host_lock_flags = 0;
6594
6595 ENTER;
6596 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6597 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
6598 if (ioa_cfg->needs_hard_reset) {
6599 ioa_cfg->needs_hard_reset = 0;
6600 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6601 } else
6602 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6603 IPR_SHUTDOWN_NONE);
1da177e4
LT
6604
6605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6606 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6607 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6608
6609 if (ioa_cfg->ioa_is_dead) {
6610 rc = -EIO;
6611 } else if (ipr_invalid_adapter(ioa_cfg)) {
6612 if (!ipr_testmode)
6613 rc = -EIO;
6614
6615 dev_err(&ioa_cfg->pdev->dev,
6616 "Adapter not supported in this hardware configuration.\n");
6617 }
6618
6619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6620
6621 LEAVE;
6622 return rc;
6623}
6624
6625/**
6626 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6627 * @ioa_cfg: ioa config struct
6628 *
6629 * Return value:
6630 * none
6631 **/
6632static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6633{
6634 int i;
6635
6636 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6637 if (ioa_cfg->ipr_cmnd_list[i])
6638 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6639 ioa_cfg->ipr_cmnd_list[i],
6640 ioa_cfg->ipr_cmnd_list_dma[i]);
6641
6642 ioa_cfg->ipr_cmnd_list[i] = NULL;
6643 }
6644
6645 if (ioa_cfg->ipr_cmd_pool)
6646 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6647
6648 ioa_cfg->ipr_cmd_pool = NULL;
6649}
6650
6651/**
6652 * ipr_free_mem - Frees memory allocated for an adapter
6653 * @ioa_cfg: ioa cfg struct
6654 *
6655 * Return value:
6656 * nothing
6657 **/
6658static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6659{
6660 int i;
6661
6662 kfree(ioa_cfg->res_entries);
6663 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6664 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6665 ipr_free_cmd_blks(ioa_cfg);
6666 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6667 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6668 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6669 ioa_cfg->cfg_table,
6670 ioa_cfg->cfg_table_dma);
6671
6672 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6673 pci_free_consistent(ioa_cfg->pdev,
6674 sizeof(struct ipr_hostrcb),
6675 ioa_cfg->hostrcb[i],
6676 ioa_cfg->hostrcb_dma[i]);
6677 }
6678
6679 ipr_free_dump(ioa_cfg);
1da177e4
LT
6680 kfree(ioa_cfg->trace);
6681}
6682
6683/**
6684 * ipr_free_all_resources - Free all allocated resources for an adapter.
6685 * @ipr_cmd: ipr command struct
6686 *
6687 * This function frees all allocated resources for the
6688 * specified adapter.
6689 *
6690 * Return value:
6691 * none
6692 **/
6693static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6694{
6695 struct pci_dev *pdev = ioa_cfg->pdev;
6696
6697 ENTER;
6698 free_irq(pdev->irq, ioa_cfg);
6699 iounmap(ioa_cfg->hdw_dma_regs);
6700 pci_release_regions(pdev);
6701 ipr_free_mem(ioa_cfg);
6702 scsi_host_put(ioa_cfg->host);
6703 pci_disable_device(pdev);
6704 LEAVE;
6705}
6706
6707/**
6708 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6709 * @ioa_cfg: ioa config struct
6710 *
6711 * Return value:
6712 * 0 on success / -ENOMEM on allocation failure
6713 **/
6714static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6715{
6716 struct ipr_cmnd *ipr_cmd;
6717 struct ipr_ioarcb *ioarcb;
6718 dma_addr_t dma_addr;
6719 int i;
6720
6721 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6722 sizeof(struct ipr_cmnd), 8, 0);
6723
6724 if (!ioa_cfg->ipr_cmd_pool)
6725 return -ENOMEM;
6726
6727 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6728 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6729
6730 if (!ipr_cmd) {
6731 ipr_free_cmd_blks(ioa_cfg);
6732 return -ENOMEM;
6733 }
6734
6735 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6736 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6737 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6738
6739 ioarcb = &ipr_cmd->ioarcb;
6740 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6741 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6742 ioarcb->write_ioadl_addr =
6743 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6744 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6745 ioarcb->ioasa_host_pci_addr =
6746 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6747 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6748 ipr_cmd->cmd_index = i;
6749 ipr_cmd->ioa_cfg = ioa_cfg;
6750 ipr_cmd->sense_buffer_dma = dma_addr +
6751 offsetof(struct ipr_cmnd, sense_buffer);
6752
6753 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6754 }
6755
6756 return 0;
6757}
6758
6759/**
6760 * ipr_alloc_mem - Allocate memory for an adapter
6761 * @ioa_cfg: ioa config struct
6762 *
6763 * Return value:
6764 * 0 on success / non-zero for error
6765 **/
6766static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6767{
6768 struct pci_dev *pdev = ioa_cfg->pdev;
6769 int i, rc = -ENOMEM;
6770
6771 ENTER;
0bc42e35 6772 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
6773 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6774
6775 if (!ioa_cfg->res_entries)
6776 goto out;
6777
1da177e4
LT
6778 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6779 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6780
6781 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6782 sizeof(struct ipr_misc_cbs),
6783 &ioa_cfg->vpd_cbs_dma);
6784
6785 if (!ioa_cfg->vpd_cbs)
6786 goto out_free_res_entries;
6787
6788 if (ipr_alloc_cmd_blks(ioa_cfg))
6789 goto out_free_vpd_cbs;
6790
6791 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6792 sizeof(u32) * IPR_NUM_CMD_BLKS,
6793 &ioa_cfg->host_rrq_dma);
6794
6795 if (!ioa_cfg->host_rrq)
6796 goto out_ipr_free_cmd_blocks;
6797
6798 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6799 sizeof(struct ipr_config_table),
6800 &ioa_cfg->cfg_table_dma);
6801
6802 if (!ioa_cfg->cfg_table)
6803 goto out_free_host_rrq;
6804
6805 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6806 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6807 sizeof(struct ipr_hostrcb),
6808 &ioa_cfg->hostrcb_dma[i]);
6809
6810 if (!ioa_cfg->hostrcb[i])
6811 goto out_free_hostrcb_dma;
6812
6813 ioa_cfg->hostrcb[i]->hostrcb_dma =
6814 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6815 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6816 }
6817
0bc42e35 6818 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
6819 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6820
6821 if (!ioa_cfg->trace)
6822 goto out_free_hostrcb_dma;
6823
1da177e4
LT
6824 rc = 0;
6825out:
6826 LEAVE;
6827 return rc;
6828
6829out_free_hostrcb_dma:
6830 while (i-- > 0) {
6831 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6832 ioa_cfg->hostrcb[i],
6833 ioa_cfg->hostrcb_dma[i]);
6834 }
6835 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6836 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6837out_free_host_rrq:
6838 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6839 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6840out_ipr_free_cmd_blocks:
6841 ipr_free_cmd_blks(ioa_cfg);
6842out_free_vpd_cbs:
6843 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6844 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6845out_free_res_entries:
6846 kfree(ioa_cfg->res_entries);
6847 goto out;
6848}
6849
6850/**
6851 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6852 * @ioa_cfg: ioa config struct
6853 *
6854 * Return value:
6855 * none
6856 **/
6857static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6858{
6859 int i;
6860
6861 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6862 ioa_cfg->bus_attr[i].bus = i;
6863 ioa_cfg->bus_attr[i].qas_enabled = 0;
6864 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6865 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6866 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6867 else
6868 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6869 }
6870}
6871
6872/**
6873 * ipr_init_ioa_cfg - Initialize IOA config struct
6874 * @ioa_cfg: ioa config struct
6875 * @host: scsi host struct
6876 * @pdev: PCI dev struct
6877 *
6878 * Return value:
6879 * none
6880 **/
6881static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6882 struct Scsi_Host *host, struct pci_dev *pdev)
6883{
6884 const struct ipr_interrupt_offsets *p;
6885 struct ipr_interrupts *t;
6886 void __iomem *base;
6887
6888 ioa_cfg->host = host;
6889 ioa_cfg->pdev = pdev;
6890 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 6891 ioa_cfg->doorbell = IPR_DOORBELL;
32d29776
BK
6892 if (!ipr_auto_create)
6893 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6894 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6895 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6896 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6897 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6898 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6899 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6900 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6901 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6902
6903 INIT_LIST_HEAD(&ioa_cfg->free_q);
6904 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6905 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6906 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6907 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6908 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6909 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6910 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6911 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
6912 if (ipr_enable_cache)
6913 ioa_cfg->cache_state = CACHE_ENABLED;
6914 else
6915 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
6916
6917 ipr_initialize_bus_attr(ioa_cfg);
6918
6919 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6920 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6921 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6922 host->unique_id = host->host_no;
6923 host->max_cmd_len = IPR_MAX_CDB_LEN;
6924 pci_set_drvdata(pdev, ioa_cfg);
6925
6926 p = &ioa_cfg->chip_cfg->regs;
6927 t = &ioa_cfg->regs;
6928 base = ioa_cfg->hdw_dma_regs;
6929
6930 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6931 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6932 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6933 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6934 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6935 t->ioarrin_reg = base + p->ioarrin_reg;
6936 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6937 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6938 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6939}
6940
6941/**
6942 * ipr_get_chip_cfg - Find adapter chip configuration
6943 * @dev_id: PCI device id struct
6944 *
6945 * Return value:
6946 * ptr to chip config on success / NULL on failure
6947 **/
6948static const struct ipr_chip_cfg_t * __devinit
6949ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6950{
6951 int i;
6952
6953 if (dev_id->driver_data)
6954 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6955
6956 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6957 if (ipr_chip[i].vendor == dev_id->vendor &&
6958 ipr_chip[i].device == dev_id->device)
6959 return ipr_chip[i].cfg;
6960 return NULL;
6961}
6962
6963/**
6964 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6965 * @pdev: PCI device struct
6966 * @dev_id: PCI device id struct
6967 *
6968 * Return value:
6969 * 0 on success / non-zero on failure
6970 **/
6971static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6972 const struct pci_device_id *dev_id)
6973{
6974 struct ipr_ioa_cfg *ioa_cfg;
6975 struct Scsi_Host *host;
6976 unsigned long ipr_regs_pci;
6977 void __iomem *ipr_regs;
a2a65a3e 6978 int rc = PCIBIOS_SUCCESSFUL;
ce155cce 6979 volatile u32 mask, uproc;
1da177e4
LT
6980
6981 ENTER;
6982
6983 if ((rc = pci_enable_device(pdev))) {
6984 dev_err(&pdev->dev, "Cannot enable adapter\n");
6985 goto out;
6986 }
6987
6988 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6989
6990 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6991
6992 if (!host) {
6993 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6994 rc = -ENOMEM;
6995 goto out_disable;
6996 }
6997
6998 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6999 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
7000 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7001 sata_port_info.flags, &ipr_sata_ops);
1da177e4
LT
7002
7003 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7004
7005 if (!ioa_cfg->chip_cfg) {
7006 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7007 dev_id->vendor, dev_id->device);
7008 goto out_scsi_host_put;
7009 }
7010
7011 ipr_regs_pci = pci_resource_start(pdev, 0);
7012
7013 rc = pci_request_regions(pdev, IPR_NAME);
7014 if (rc < 0) {
7015 dev_err(&pdev->dev,
7016 "Couldn't register memory range of registers\n");
7017 goto out_scsi_host_put;
7018 }
7019
7020 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7021
7022 if (!ipr_regs) {
7023 dev_err(&pdev->dev,
7024 "Couldn't map memory range of registers\n");
7025 rc = -ENOMEM;
7026 goto out_release_regions;
7027 }
7028
7029 ioa_cfg->hdw_dma_regs = ipr_regs;
7030 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7031 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7032
7033 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7034
7035 pci_set_master(pdev);
7036
7037 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7038 if (rc < 0) {
7039 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7040 goto cleanup_nomem;
7041 }
7042
7043 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7044 ioa_cfg->chip_cfg->cache_line_size);
7045
7046 if (rc != PCIBIOS_SUCCESSFUL) {
7047 dev_err(&pdev->dev, "Write of cache line size failed\n");
7048 rc = -EIO;
7049 goto cleanup_nomem;
7050 }
7051
7052 /* Save away PCI config space for use following IOA reset */
7053 rc = pci_save_state(pdev);
7054
7055 if (rc != PCIBIOS_SUCCESSFUL) {
7056 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7057 rc = -EIO;
7058 goto cleanup_nomem;
7059 }
7060
7061 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7062 goto cleanup_nomem;
7063
7064 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7065 goto cleanup_nomem;
7066
7067 rc = ipr_alloc_mem(ioa_cfg);
7068 if (rc < 0) {
7069 dev_err(&pdev->dev,
7070 "Couldn't allocate enough memory for device driver!\n");
7071 goto cleanup_nomem;
7072 }
7073
ce155cce
BK
7074 /*
7075 * If HRRQ updated interrupt is not masked, or reset alert is set,
7076 * the card is in an unknown state and needs a hard reset
7077 */
7078 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7079 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7080 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7081 ioa_cfg->needs_hard_reset = 1;
7082
1da177e4 7083 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 7084 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
7085
7086 if (rc) {
7087 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7088 pdev->irq, rc);
7089 goto cleanup_nolog;
7090 }
7091
7092 spin_lock(&ipr_driver_lock);
7093 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7094 spin_unlock(&ipr_driver_lock);
7095
7096 LEAVE;
7097out:
7098 return rc;
7099
7100cleanup_nolog:
7101 ipr_free_mem(ioa_cfg);
7102cleanup_nomem:
7103 iounmap(ipr_regs);
7104out_release_regions:
7105 pci_release_regions(pdev);
7106out_scsi_host_put:
7107 scsi_host_put(host);
7108out_disable:
7109 pci_disable_device(pdev);
7110 goto out;
7111}
7112
7113/**
7114 * ipr_scan_vsets - Scans for VSET devices
7115 * @ioa_cfg: ioa config struct
7116 *
7117 * Description: Since the VSET resources do not follow SAM in that we can have
7118 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7119 *
7120 * Return value:
7121 * none
7122 **/
7123static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7124{
7125 int target, lun;
7126
7127 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7128 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7129 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7130}
7131
7132/**
7133 * ipr_initiate_ioa_bringdown - Bring down an adapter
7134 * @ioa_cfg: ioa config struct
7135 * @shutdown_type: shutdown type
7136 *
7137 * Description: This function will initiate bringing down the adapter.
7138 * This consists of issuing an IOA shutdown to the adapter
7139 * to flush the cache, and running BIST.
7140 * If the caller needs to wait on the completion of the reset,
7141 * the caller must sleep on the reset_wait_q.
7142 *
7143 * Return value:
7144 * none
7145 **/
7146static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7147 enum ipr_shutdown_type shutdown_type)
7148{
7149 ENTER;
7150 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7151 ioa_cfg->sdt_state = ABORT_DUMP;
7152 ioa_cfg->reset_retries = 0;
7153 ioa_cfg->in_ioa_bringdown = 1;
7154 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7155 LEAVE;
7156}
7157
7158/**
7159 * __ipr_remove - Remove a single adapter
7160 * @pdev: pci device struct
7161 *
7162 * Adapter hot plug remove entry point.
7163 *
7164 * Return value:
7165 * none
7166 **/
7167static void __ipr_remove(struct pci_dev *pdev)
7168{
7169 unsigned long host_lock_flags = 0;
7170 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7171 ENTER;
7172
7173 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7174 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7175
7176 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7177 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 7178 flush_scheduled_work();
1da177e4
LT
7179 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7180
7181 spin_lock(&ipr_driver_lock);
7182 list_del(&ioa_cfg->queue);
7183 spin_unlock(&ipr_driver_lock);
7184
7185 if (ioa_cfg->sdt_state == ABORT_DUMP)
7186 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7187 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7188
7189 ipr_free_all_resources(ioa_cfg);
7190
7191 LEAVE;
7192}
7193
7194/**
7195 * ipr_remove - IOA hot plug remove entry point
7196 * @pdev: pci device struct
7197 *
7198 * Adapter hot plug remove entry point.
7199 *
7200 * Return value:
7201 * none
7202 **/
7203static void ipr_remove(struct pci_dev *pdev)
7204{
7205 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7206
7207 ENTER;
7208
1da177e4
LT
7209 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7210 &ipr_trace_attr);
7211 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7212 &ipr_dump_attr);
7213 scsi_remove_host(ioa_cfg->host);
7214
7215 __ipr_remove(pdev);
7216
7217 LEAVE;
7218}
7219
7220/**
7221 * ipr_probe - Adapter hot plug add entry point
7222 *
7223 * Return value:
7224 * 0 on success / non-zero on failure
7225 **/
7226static int __devinit ipr_probe(struct pci_dev *pdev,
7227 const struct pci_device_id *dev_id)
7228{
7229 struct ipr_ioa_cfg *ioa_cfg;
7230 int rc;
7231
7232 rc = ipr_probe_ioa(pdev, dev_id);
7233
7234 if (rc)
7235 return rc;
7236
7237 ioa_cfg = pci_get_drvdata(pdev);
7238 rc = ipr_probe_ioa_part2(ioa_cfg);
7239
7240 if (rc) {
7241 __ipr_remove(pdev);
7242 return rc;
7243 }
7244
7245 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7246
7247 if (rc) {
7248 __ipr_remove(pdev);
7249 return rc;
7250 }
7251
7252 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7253 &ipr_trace_attr);
7254
7255 if (rc) {
7256 scsi_remove_host(ioa_cfg->host);
7257 __ipr_remove(pdev);
7258 return rc;
7259 }
7260
7261 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7262 &ipr_dump_attr);
7263
7264 if (rc) {
7265 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7266 &ipr_trace_attr);
7267 scsi_remove_host(ioa_cfg->host);
7268 __ipr_remove(pdev);
7269 return rc;
7270 }
7271
7272 scsi_scan_host(ioa_cfg->host);
7273 ipr_scan_vsets(ioa_cfg);
7274 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7275 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 7276 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
7277 schedule_work(&ioa_cfg->work_q);
7278 return 0;
7279}
7280
7281/**
7282 * ipr_shutdown - Shutdown handler.
d18c3db5 7283 * @pdev: pci device struct
1da177e4
LT
7284 *
7285 * This function is invoked upon system shutdown/reboot. It will issue
7286 * an adapter shutdown to the adapter to flush the write cache.
7287 *
7288 * Return value:
7289 * none
7290 **/
d18c3db5 7291static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 7292{
d18c3db5 7293 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
7294 unsigned long lock_flags = 0;
7295
7296 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7297 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7299 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7300}
7301
7302static struct pci_device_id ipr_pci_table[] __devinitdata = {
7303 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7304 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
7305 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7306 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7307 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
7308 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7309 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7310 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
7311 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7312 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7313 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
7314 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7315 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7316 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
7317 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7318 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7319 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
7320 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7321 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7322 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
7323 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
86f51436
BK
7324 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7325 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
7326 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7327 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7328 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7329 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7330 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7331 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7332 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
60e7486b
BK
7333 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7334 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7335 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
86f51436
BK
7336 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7337 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7338 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7339 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7340 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7341 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
60e7486b
BK
7342 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7343 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7344 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7345 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7346 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7347 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7348 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7349 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7350 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
1da177e4
LT
7351 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7352 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
7353 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7354 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7355 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
7356 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
86f51436
BK
7357 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7358 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
7359 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
60e7486b
BK
7360 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7361 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
7362 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
1da177e4
LT
7363 { }
7364};
7365MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7366
f8a88b19
LV
7367static struct pci_error_handlers ipr_err_handler = {
7368 .error_detected = ipr_pci_error_detected,
7369 .slot_reset = ipr_pci_slot_reset,
7370};
7371
1da177e4
LT
7372static struct pci_driver ipr_driver = {
7373 .name = IPR_NAME,
7374 .id_table = ipr_pci_table,
7375 .probe = ipr_probe,
7376 .remove = ipr_remove,
d18c3db5 7377 .shutdown = ipr_shutdown,
f8a88b19 7378 .err_handler = &ipr_err_handler,
1da177e4
LT
7379};
7380
7381/**
7382 * ipr_init - Module entry point
7383 *
7384 * Return value:
7385 * 0 on success / negative value on failure
7386 **/
7387static int __init ipr_init(void)
7388{
7389 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7390 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7391
dcbccbde 7392 return pci_register_driver(&ipr_driver);
1da177e4
LT
7393}
7394
7395/**
7396 * ipr_exit - Module unload
7397 *
7398 * Module unload entry point.
7399 *
7400 * Return value:
7401 * none
7402 **/
7403static void __exit ipr_exit(void)
7404{
7405 pci_unregister_driver(&ipr_driver);
7406}
7407
7408module_init(ipr_init);
7409module_exit(ipr_exit);