]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: New adapter error types
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 94static unsigned int ipr_enable_cache = 1;
d3c74871 95static unsigned int ipr_debug = 0;
32d29776 96static int ipr_auto_create = 1;
1da177e4
LT
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone and Citrine */
102 .mailbox = 0x0042C,
103 .cache_line_size = 0x20,
104 {
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
114 }
115 },
116 { /* Snipe and Scamp */
117 .mailbox = 0x0052C,
118 .cache_line_size = 0x20,
119 {
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
129 }
130 },
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
138};
139
140static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
142};
143
144MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146module_param_named(max_speed, ipr_max_speed, uint, 0);
147MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148module_param_named(log_level, ipr_log_level, uint, 0);
149MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150module_param_named(testmode, ipr_testmode, int, 0);
151MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152module_param_named(fastfail, ipr_fastfail, int, 0);
153MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
156module_param_named(enable_cache, ipr_enable_cache, int, 0);
157MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871
BK
158module_param_named(debug, ipr_debug, int, 0);
159MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
32d29776
BK
160module_param_named(auto_create, ipr_auto_create, int, 0);
161MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
1da177e4
LT
162MODULE_LICENSE("GPL");
163MODULE_VERSION(IPR_DRIVER_VERSION);
164
165static const char *ipr_gpdd_dev_end_states[] = {
166 "Command complete",
167 "Terminated by host",
168 "Terminated by device reset",
169 "Terminated by bus reset",
170 "Unknown",
171 "Command not started"
172};
173
174static const char *ipr_gpdd_dev_bus_phases[] = {
175 "Bus free",
176 "Arbitration",
177 "Selection",
178 "Message out",
179 "Command",
180 "Message in",
181 "Data out",
182 "Data in",
183 "Status",
184 "Reselection",
185 "Unknown"
186};
187
188/* A constant array of IOASCs/URCs/Error Messages */
189static const
190struct ipr_error_table_t ipr_error_table[] = {
191 {0x00000000, 1, 1,
192 "8155: An unknown error was received"},
193 {0x00330000, 0, 0,
194 "Soft underlength error"},
195 {0x005A0000, 0, 0,
196 "Command to be cancelled not found"},
197 {0x00808000, 0, 0,
198 "Qualified success"},
199 {0x01080000, 1, 1,
200 "FFFE: Soft device bus error recovered by the IOA"},
201 {0x01170600, 0, 1,
202 "FFF9: Device sector reassign successful"},
203 {0x01170900, 0, 1,
204 "FFF7: Media error recovered by device rewrite procedures"},
205 {0x01180200, 0, 1,
206 "7001: IOA sector reassignment successful"},
207 {0x01180500, 0, 1,
208 "FFF9: Soft media error. Sector reassignment recommended"},
209 {0x01180600, 0, 1,
210 "FFF7: Media error recovered by IOA rewrite procedures"},
211 {0x01418000, 0, 1,
212 "FF3D: Soft PCI bus error recovered by the IOA"},
213 {0x01440000, 1, 1,
214 "FFF6: Device hardware error recovered by the IOA"},
215 {0x01448100, 0, 1,
216 "FFF6: Device hardware error recovered by the device"},
217 {0x01448200, 1, 1,
218 "FF3D: Soft IOA error recovered by the IOA"},
219 {0x01448300, 0, 1,
220 "FFFA: Undefined device response recovered by the IOA"},
221 {0x014A0000, 1, 1,
222 "FFF6: Device bus error, message or command phase"},
223 {0x015D0000, 0, 1,
224 "FFF6: Failure prediction threshold exceeded"},
225 {0x015D9200, 0, 1,
226 "8009: Impending cache battery pack failure"},
227 {0x02040400, 0, 0,
228 "34FF: Disk device format in progress"},
229 {0x023F0000, 0, 0,
230 "Synchronization required"},
231 {0x024E0000, 0, 0,
232 "No ready, IOA shutdown"},
233 {0x025A0000, 0, 0,
234 "Not ready, IOA has been shutdown"},
235 {0x02670100, 0, 1,
236 "3020: Storage subsystem configuration error"},
237 {0x03110B00, 0, 0,
238 "FFF5: Medium error, data unreadable, recommend reassign"},
239 {0x03110C00, 0, 0,
240 "7000: Medium error, data unreadable, do not reassign"},
241 {0x03310000, 0, 1,
242 "FFF3: Disk media format bad"},
243 {0x04050000, 0, 1,
244 "3002: Addressed device failed to respond to selection"},
245 {0x04080000, 1, 1,
246 "3100: Device bus error"},
247 {0x04080100, 0, 1,
248 "3109: IOA timed out a device command"},
249 {0x04088000, 0, 0,
250 "3120: SCSI bus is not operational"},
251 {0x04118000, 0, 1,
252 "9000: IOA reserved area data check"},
253 {0x04118100, 0, 1,
254 "9001: IOA reserved area invalid data pattern"},
255 {0x04118200, 0, 1,
256 "9002: IOA reserved area LRC error"},
257 {0x04320000, 0, 1,
258 "102E: Out of alternate sectors for disk storage"},
259 {0x04330000, 1, 1,
260 "FFF4: Data transfer underlength error"},
261 {0x04338000, 1, 1,
262 "FFF4: Data transfer overlength error"},
263 {0x043E0100, 0, 1,
264 "3400: Logical unit failure"},
265 {0x04408500, 0, 1,
266 "FFF4: Device microcode is corrupt"},
267 {0x04418000, 1, 1,
268 "8150: PCI bus error"},
269 {0x04430000, 1, 0,
270 "Unsupported device bus message received"},
271 {0x04440000, 1, 1,
272 "FFF4: Disk device problem"},
273 {0x04448200, 1, 1,
274 "8150: Permanent IOA failure"},
275 {0x04448300, 0, 1,
276 "3010: Disk device returned wrong response to IOA"},
277 {0x04448400, 0, 1,
278 "8151: IOA microcode error"},
279 {0x04448500, 0, 0,
280 "Device bus status error"},
281 {0x04448600, 0, 1,
282 "8157: IOA error requiring IOA reset to recover"},
283 {0x04490000, 0, 0,
284 "Message reject received from the device"},
285 {0x04449200, 0, 1,
286 "8008: A permanent cache battery pack failure occurred"},
287 {0x0444A000, 0, 1,
288 "9090: Disk unit has been modified after the last known status"},
289 {0x0444A200, 0, 1,
290 "9081: IOA detected device error"},
291 {0x0444A300, 0, 1,
292 "9082: IOA detected device error"},
293 {0x044A0000, 1, 1,
294 "3110: Device bus error, message or command phase"},
295 {0x04670400, 0, 1,
296 "9091: Incorrect hardware configuration change has been detected"},
b0df54bb
BK
297 {0x04678000, 0, 1,
298 "9073: Invalid multi-adapter configuration"},
1da177e4
LT
299 {0x046E0000, 0, 1,
300 "FFF4: Command to logical unit failed"},
301 {0x05240000, 1, 0,
302 "Illegal request, invalid request type or request packet"},
303 {0x05250000, 0, 0,
304 "Illegal request, invalid resource handle"},
b0df54bb
BK
305 {0x05258000, 0, 0,
306 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
309 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0,
312 "Illegal request, parameter not supported"},
313 {0x05260200, 0, 0,
314 "Illegal request, parameter value invalid"},
315 {0x052C0000, 0, 0,
316 "Illegal request, command sequence error"},
b0df54bb
BK
317 {0x052C8000, 1, 0,
318 "Illegal request, dual adapter support not enabled"},
1da177e4
LT
319 {0x06040500, 0, 1,
320 "9031: Array protection temporarily suspended, protection resuming"},
321 {0x06040600, 0, 1,
322 "9040: Array protection temporarily suspended, protection resuming"},
323 {0x06290000, 0, 1,
324 "FFFB: SCSI bus was reset"},
325 {0x06290500, 0, 0,
326 "FFFE: SCSI bus transition to single ended"},
327 {0x06290600, 0, 0,
328 "FFFE: SCSI bus transition to LVD"},
329 {0x06298000, 0, 1,
330 "FFFB: SCSI bus was reset by another initiator"},
331 {0x063F0300, 0, 1,
332 "3029: A device replacement has occurred"},
333 {0x064C8000, 0, 1,
334 "9051: IOA cache data exists for a missing or failed device"},
b0df54bb
BK
335 {0x064C8100, 0, 1,
336 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
1da177e4
LT
337 {0x06670100, 0, 1,
338 "9025: Disk unit is not supported at its physical location"},
339 {0x06670600, 0, 1,
340 "3020: IOA detected a SCSI bus configuration error"},
341 {0x06678000, 0, 1,
342 "3150: SCSI bus configuration error"},
b0df54bb
BK
343 {0x06678100, 0, 1,
344 "9074: Asymmetric advanced function disk configuration"},
1da177e4
LT
345 {0x06690200, 0, 1,
346 "9041: Array protection temporarily suspended"},
347 {0x06698200, 0, 1,
348 "9042: Corrupt array parity detected on specified device"},
349 {0x066B0200, 0, 1,
350 "9030: Array no longer protected due to missing or failed disk unit"},
b0df54bb
BK
351 {0x066B8000, 0, 1,
352 "9071: Link operational transition"},
353 {0x066B8100, 0, 1,
354 "9072: Link not operational transition"},
1da177e4
LT
355 {0x066B8200, 0, 1,
356 "9032: Array exposed but still protected"},
357 {0x07270000, 0, 0,
358 "Failure due to other device"},
359 {0x07278000, 0, 1,
360 "9008: IOA does not support functions expected by devices"},
361 {0x07278100, 0, 1,
362 "9010: Cache data associated with attached devices cannot be found"},
363 {0x07278200, 0, 1,
364 "9011: Cache data belongs to devices other than those attached"},
365 {0x07278400, 0, 1,
366 "9020: Array missing 2 or more devices with only 1 device present"},
367 {0x07278500, 0, 1,
368 "9021: Array missing 2 or more devices with 2 or more devices present"},
369 {0x07278600, 0, 1,
370 "9022: Exposed array is missing a required device"},
371 {0x07278700, 0, 1,
372 "9023: Array member(s) not at required physical locations"},
373 {0x07278800, 0, 1,
374 "9024: Array not functional due to present hardware configuration"},
375 {0x07278900, 0, 1,
376 "9026: Array not functional due to present hardware configuration"},
377 {0x07278A00, 0, 1,
378 "9027: Array is missing a device and parity is out of sync"},
379 {0x07278B00, 0, 1,
380 "9028: Maximum number of arrays already exist"},
381 {0x07278C00, 0, 1,
382 "9050: Required cache data cannot be located for a disk unit"},
383 {0x07278D00, 0, 1,
384 "9052: Cache data exists for a device that has been modified"},
385 {0x07278F00, 0, 1,
386 "9054: IOA resources not available due to previous problems"},
387 {0x07279100, 0, 1,
388 "9092: Disk unit requires initialization before use"},
389 {0x07279200, 0, 1,
390 "9029: Incorrect hardware configuration change has been detected"},
391 {0x07279600, 0, 1,
392 "9060: One or more disk pairs are missing from an array"},
393 {0x07279700, 0, 1,
394 "9061: One or more disks are missing from an array"},
395 {0x07279800, 0, 1,
396 "9062: One or more disks are missing from an array"},
397 {0x07279900, 0, 1,
398 "9063: Maximum number of functional arrays has been exceeded"},
399 {0x0B260000, 0, 0,
400 "Aborted command, invalid descriptor"},
401 {0x0B5A0000, 0, 0,
402 "Command terminated by host"}
403};
404
405static const struct ipr_ses_table_entry ipr_ses_table[] = {
406 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
407 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
408 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
409 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
410 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
411 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
412 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
413 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
414 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
415 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
416 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
417 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
419};
420
421/*
422 * Function Prototypes
423 */
424static int ipr_reset_alert(struct ipr_cmnd *);
425static void ipr_process_ccn(struct ipr_cmnd *);
426static void ipr_process_error(struct ipr_cmnd *);
427static void ipr_reset_ioa_job(struct ipr_cmnd *);
428static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
429 enum ipr_shutdown_type);
430
431#ifdef CONFIG_SCSI_IPR_TRACE
432/**
433 * ipr_trc_hook - Add a trace entry to the driver trace
434 * @ipr_cmd: ipr command struct
435 * @type: trace type
436 * @add_data: additional data
437 *
438 * Return value:
439 * none
440 **/
441static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
442 u8 type, u32 add_data)
443{
444 struct ipr_trace_entry *trace_entry;
445 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
446
447 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
448 trace_entry->time = jiffies;
449 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
450 trace_entry->type = type;
451 trace_entry->cmd_index = ipr_cmd->cmd_index;
452 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
453 trace_entry->u.add_data = add_data;
454}
455#else
456#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
457#endif
458
459/**
460 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
461 * @ipr_cmd: ipr command struct
462 *
463 * Return value:
464 * none
465 **/
466static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
467{
468 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
469 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
470
471 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
472 ioarcb->write_data_transfer_length = 0;
473 ioarcb->read_data_transfer_length = 0;
474 ioarcb->write_ioadl_len = 0;
475 ioarcb->read_ioadl_len = 0;
476 ioasa->ioasc = 0;
477 ioasa->residual_data_len = 0;
478
479 ipr_cmd->scsi_cmd = NULL;
480 ipr_cmd->sense_buffer[0] = 0;
481 ipr_cmd->dma_use_sg = 0;
482}
483
484/**
485 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
486 * @ipr_cmd: ipr command struct
487 *
488 * Return value:
489 * none
490 **/
491static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
492{
493 ipr_reinit_ipr_cmnd(ipr_cmd);
494 ipr_cmd->u.scratch = 0;
495 ipr_cmd->sibling = NULL;
496 init_timer(&ipr_cmd->timer);
497}
498
499/**
500 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
501 * @ioa_cfg: ioa config struct
502 *
503 * Return value:
504 * pointer to ipr command struct
505 **/
506static
507struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
508{
509 struct ipr_cmnd *ipr_cmd;
510
511 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
512 list_del(&ipr_cmd->queue);
513 ipr_init_ipr_cmnd(ipr_cmd);
514
515 return ipr_cmd;
516}
517
518/**
519 * ipr_unmap_sglist - Unmap scatterlist if mapped
520 * @ioa_cfg: ioa config struct
521 * @ipr_cmd: ipr command struct
522 *
523 * Return value:
524 * nothing
525 **/
526static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
527 struct ipr_cmnd *ipr_cmd)
528{
529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
530
531 if (ipr_cmd->dma_use_sg) {
532 if (scsi_cmd->use_sg > 0) {
533 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
534 scsi_cmd->use_sg,
535 scsi_cmd->sc_data_direction);
536 } else {
537 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
538 scsi_cmd->request_bufflen,
539 scsi_cmd->sc_data_direction);
540 }
541 }
542}
543
544/**
545 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
546 * @ioa_cfg: ioa config struct
547 * @clr_ints: interrupts to clear
548 *
549 * This function masks all interrupts on the adapter, then clears the
550 * interrupts specified in the mask
551 *
552 * Return value:
553 * none
554 **/
555static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
556 u32 clr_ints)
557{
558 volatile u32 int_reg;
559
560 /* Stop new interrupts */
561 ioa_cfg->allow_interrupts = 0;
562
563 /* Set interrupt mask to stop all new interrupts */
564 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
565
566 /* Clear any pending interrupts */
567 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
568 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
569}
570
571/**
572 * ipr_save_pcix_cmd_reg - Save PCI-X command register
573 * @ioa_cfg: ioa config struct
574 *
575 * Return value:
576 * 0 on success / -EIO on failure
577 **/
578static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
579{
580 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
581
582 if (pcix_cmd_reg == 0) {
583 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
584 return -EIO;
585 }
586
587 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
590 return -EIO;
591 }
592
593 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
594 return 0;
595}
596
597/**
598 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
599 * @ioa_cfg: ioa config struct
600 *
601 * Return value:
602 * 0 on success / -EIO on failure
603 **/
604static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
605{
606 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
607
608 if (pcix_cmd_reg) {
609 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
610 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
611 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
612 return -EIO;
613 }
614 } else {
615 dev_err(&ioa_cfg->pdev->dev,
616 "Failed to setup PCI-X command register\n");
617 return -EIO;
618 }
619
620 return 0;
621}
622
623/**
624 * ipr_scsi_eh_done - mid-layer done function for aborted ops
625 * @ipr_cmd: ipr command struct
626 *
627 * This function is invoked by the interrupt handler for
628 * ops generated by the SCSI mid-layer which are being aborted.
629 *
630 * Return value:
631 * none
632 **/
633static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
634{
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
637
638 scsi_cmd->result |= (DID_ERROR << 16);
639
640 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
641 scsi_cmd->scsi_done(scsi_cmd);
642 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
643}
644
645/**
646 * ipr_fail_all_ops - Fails all outstanding ops.
647 * @ioa_cfg: ioa config struct
648 *
649 * This function fails all outstanding ops.
650 *
651 * Return value:
652 * none
653 **/
654static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
655{
656 struct ipr_cmnd *ipr_cmd, *temp;
657
658 ENTER;
659 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
660 list_del(&ipr_cmd->queue);
661
662 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
663 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
664
665 if (ipr_cmd->scsi_cmd)
666 ipr_cmd->done = ipr_scsi_eh_done;
667
668 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
669 del_timer(&ipr_cmd->timer);
670 ipr_cmd->done(ipr_cmd);
671 }
672
673 LEAVE;
674}
675
676/**
677 * ipr_do_req - Send driver initiated requests.
678 * @ipr_cmd: ipr command struct
679 * @done: done function
680 * @timeout_func: timeout function
681 * @timeout: timeout value
682 *
683 * This function sends the specified command to the adapter with the
684 * timeout given. The done function is invoked on command completion.
685 *
686 * Return value:
687 * none
688 **/
689static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
690 void (*done) (struct ipr_cmnd *),
691 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
692{
693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
694
695 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
696
697 ipr_cmd->done = done;
698
699 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
700 ipr_cmd->timer.expires = jiffies + timeout;
701 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
702
703 add_timer(&ipr_cmd->timer);
704
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
706
707 mb();
708 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
709 ioa_cfg->regs.ioarrin_reg);
710}
711
712/**
713 * ipr_internal_cmd_done - Op done function for an internally generated op.
714 * @ipr_cmd: ipr command struct
715 *
716 * This function is the op done function for an internally generated,
717 * blocking op. It simply wakes the sleeping thread.
718 *
719 * Return value:
720 * none
721 **/
722static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
723{
724 if (ipr_cmd->sibling)
725 ipr_cmd->sibling = NULL;
726 else
727 complete(&ipr_cmd->completion);
728}
729
730/**
731 * ipr_send_blocking_cmd - Send command and sleep on its completion.
732 * @ipr_cmd: ipr command struct
733 * @timeout_func: function to invoke if command times out
734 * @timeout: timeout
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
740 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
741 u32 timeout)
742{
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
744
745 init_completion(&ipr_cmd->completion);
746 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
747
748 spin_unlock_irq(ioa_cfg->host->host_lock);
749 wait_for_completion(&ipr_cmd->completion);
750 spin_lock_irq(ioa_cfg->host->host_lock);
751}
752
753/**
754 * ipr_send_hcam - Send an HCAM to the adapter.
755 * @ioa_cfg: ioa config struct
756 * @type: HCAM type
757 * @hostrcb: hostrcb struct
758 *
759 * This function will send a Host Controlled Async command to the adapter.
760 * If HCAMs are currently not allowed to be issued to the adapter, it will
761 * place the hostrcb on the free queue.
762 *
763 * Return value:
764 * none
765 **/
766static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
767 struct ipr_hostrcb *hostrcb)
768{
769 struct ipr_cmnd *ipr_cmd;
770 struct ipr_ioarcb *ioarcb;
771
772 if (ioa_cfg->allow_cmds) {
773 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
775 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
776
777 ipr_cmd->u.hostrcb = hostrcb;
778 ioarcb = &ipr_cmd->ioarcb;
779
780 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
781 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
782 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
783 ioarcb->cmd_pkt.cdb[1] = type;
784 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
785 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
786
787 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
788 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
789 ipr_cmd->ioadl[0].flags_and_data_len =
790 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
791 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
792
793 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
794 ipr_cmd->done = ipr_process_ccn;
795 else
796 ipr_cmd->done = ipr_process_error;
797
798 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
799
800 mb();
801 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
802 ioa_cfg->regs.ioarrin_reg);
803 } else {
804 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
805 }
806}
807
808/**
809 * ipr_init_res_entry - Initialize a resource entry struct.
810 * @res: resource entry struct
811 *
812 * Return value:
813 * none
814 **/
815static void ipr_init_res_entry(struct ipr_resource_entry *res)
816{
817 res->needs_sync_complete = 1;
818 res->in_erp = 0;
819 res->add_to_ml = 0;
820 res->del_from_ml = 0;
821 res->resetting_device = 0;
822 res->sdev = NULL;
823}
824
825/**
826 * ipr_handle_config_change - Handle a config change from the adapter
827 * @ioa_cfg: ioa config struct
828 * @hostrcb: hostrcb
829 *
830 * Return value:
831 * none
832 **/
833static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
834 struct ipr_hostrcb *hostrcb)
835{
836 struct ipr_resource_entry *res = NULL;
837 struct ipr_config_table_entry *cfgte;
838 u32 is_ndn = 1;
839
840 cfgte = &hostrcb->hcam.u.ccn.cfgte;
841
842 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
843 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
844 sizeof(cfgte->res_addr))) {
845 is_ndn = 0;
846 break;
847 }
848 }
849
850 if (is_ndn) {
851 if (list_empty(&ioa_cfg->free_res_q)) {
852 ipr_send_hcam(ioa_cfg,
853 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
854 hostrcb);
855 return;
856 }
857
858 res = list_entry(ioa_cfg->free_res_q.next,
859 struct ipr_resource_entry, queue);
860
861 list_del(&res->queue);
862 ipr_init_res_entry(res);
863 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
864 }
865
866 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
867
868 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
869 if (res->sdev) {
870 res->sdev->hostdata = NULL;
871 res->del_from_ml = 1;
872 if (ioa_cfg->allow_ml_add_del)
873 schedule_work(&ioa_cfg->work_q);
874 } else
875 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
876 } else if (!res->sdev) {
877 res->add_to_ml = 1;
878 if (ioa_cfg->allow_ml_add_del)
879 schedule_work(&ioa_cfg->work_q);
880 }
881
882 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
883}
884
885/**
886 * ipr_process_ccn - Op done function for a CCN.
887 * @ipr_cmd: ipr command struct
888 *
889 * This function is the op done function for a configuration
890 * change notification host controlled async from the adapter.
891 *
892 * Return value:
893 * none
894 **/
895static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
896{
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
898 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
899 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
900
901 list_del(&hostrcb->queue);
902 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
903
904 if (ioasc) {
905 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
906 dev_err(&ioa_cfg->pdev->dev,
907 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
908
909 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
910 } else {
911 ipr_handle_config_change(ioa_cfg, hostrcb);
912 }
913}
914
915/**
916 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 917 * @vpd: vendor/product id/sn struct
1da177e4
LT
918 *
919 * Return value:
920 * none
921 **/
cfc32139 922static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
923{
924 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
925 + IPR_SERIAL_NUM_LEN];
926
cfc32139
BK
927 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
928 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
929 IPR_PROD_ID_LEN);
930 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
931 ipr_err("Vendor/Product ID: %s\n", buffer);
932
cfc32139 933 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
934 buffer[IPR_SERIAL_NUM_LEN] = '\0';
935 ipr_err(" Serial Number: %s\n", buffer);
936}
937
ee0f05b8
BK
938/**
939 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
940 * @vpd: vendor/product id/sn/wwn struct
941 *
942 * Return value:
943 * none
944 **/
945static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
946{
947 ipr_log_vpd(&vpd->vpd);
948 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
949 be32_to_cpu(vpd->wwid[1]));
950}
951
952/**
953 * ipr_log_enhanced_cache_error - Log a cache error.
954 * @ioa_cfg: ioa config struct
955 * @hostrcb: hostrcb struct
956 *
957 * Return value:
958 * none
959 **/
960static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
961 struct ipr_hostrcb *hostrcb)
962{
963 struct ipr_hostrcb_type_12_error *error =
964 &hostrcb->hcam.u.error.u.type_12_error;
965
966 ipr_err("-----Current Configuration-----\n");
967 ipr_err("Cache Directory Card Information:\n");
968 ipr_log_ext_vpd(&error->ioa_vpd);
969 ipr_err("Adapter Card Information:\n");
970 ipr_log_ext_vpd(&error->cfc_vpd);
971
972 ipr_err("-----Expected Configuration-----\n");
973 ipr_err("Cache Directory Card Information:\n");
974 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
975 ipr_err("Adapter Card Information:\n");
976 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
977
978 ipr_err("Additional IOA Data: %08X %08X %08X\n",
979 be32_to_cpu(error->ioa_data[0]),
980 be32_to_cpu(error->ioa_data[1]),
981 be32_to_cpu(error->ioa_data[2]));
982}
983
1da177e4
LT
984/**
985 * ipr_log_cache_error - Log a cache error.
986 * @ioa_cfg: ioa config struct
987 * @hostrcb: hostrcb struct
988 *
989 * Return value:
990 * none
991 **/
992static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
993 struct ipr_hostrcb *hostrcb)
994{
995 struct ipr_hostrcb_type_02_error *error =
996 &hostrcb->hcam.u.error.u.type_02_error;
997
998 ipr_err("-----Current Configuration-----\n");
999 ipr_err("Cache Directory Card Information:\n");
cfc32139 1000 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1001 ipr_err("Adapter Card Information:\n");
cfc32139 1002 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1003
1004 ipr_err("-----Expected Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
cfc32139 1006 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1007 ipr_err("Adapter Card Information:\n");
cfc32139 1008 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1009
1010 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1011 be32_to_cpu(error->ioa_data[0]),
1012 be32_to_cpu(error->ioa_data[1]),
1013 be32_to_cpu(error->ioa_data[2]));
1014}
1015
ee0f05b8
BK
1016/**
1017 * ipr_log_enhanced_config_error - Log a configuration error.
1018 * @ioa_cfg: ioa config struct
1019 * @hostrcb: hostrcb struct
1020 *
1021 * Return value:
1022 * none
1023 **/
1024static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1025 struct ipr_hostrcb *hostrcb)
1026{
1027 int errors_logged, i;
1028 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1029 struct ipr_hostrcb_type_13_error *error;
1030
1031 error = &hostrcb->hcam.u.error.u.type_13_error;
1032 errors_logged = be32_to_cpu(error->errors_logged);
1033
1034 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1035 be32_to_cpu(error->errors_detected), errors_logged);
1036
1037 dev_entry = error->dev;
1038
1039 for (i = 0; i < errors_logged; i++, dev_entry++) {
1040 ipr_err_separator;
1041
1042 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1043 ipr_log_ext_vpd(&dev_entry->vpd);
1044
1045 ipr_err("-----New Device Information-----\n");
1046 ipr_log_ext_vpd(&dev_entry->new_vpd);
1047
1048 ipr_err("Cache Directory Card Information:\n");
1049 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1050
1051 ipr_err("Adapter Card Information:\n");
1052 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1053 }
1054}
1055
1da177e4
LT
1056/**
1057 * ipr_log_config_error - Log a configuration error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1060 *
1061 * Return value:
1062 * none
1063 **/
1064static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1066{
1067 int errors_logged, i;
1068 struct ipr_hostrcb_device_data_entry *dev_entry;
1069 struct ipr_hostrcb_type_03_error *error;
1070
1071 error = &hostrcb->hcam.u.error.u.type_03_error;
1072 errors_logged = be32_to_cpu(error->errors_logged);
1073
1074 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1075 be32_to_cpu(error->errors_detected), errors_logged);
1076
cfc32139 1077 dev_entry = error->dev;
1da177e4
LT
1078
1079 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_err_separator;
1081
fa15b1f6 1082 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1083 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1084
1085 ipr_err("-----New Device Information-----\n");
cfc32139 1086 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1087
1088 ipr_err("Cache Directory Card Information:\n");
cfc32139 1089 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1090
1091 ipr_err("Adapter Card Information:\n");
cfc32139 1092 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1093
1094 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1095 be32_to_cpu(dev_entry->ioa_data[0]),
1096 be32_to_cpu(dev_entry->ioa_data[1]),
1097 be32_to_cpu(dev_entry->ioa_data[2]),
1098 be32_to_cpu(dev_entry->ioa_data[3]),
1099 be32_to_cpu(dev_entry->ioa_data[4]));
1100 }
1101}
1102
ee0f05b8
BK
1103/**
1104 * ipr_log_enhanced_array_error - Log an array configuration error.
1105 * @ioa_cfg: ioa config struct
1106 * @hostrcb: hostrcb struct
1107 *
1108 * Return value:
1109 * none
1110 **/
1111static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1113{
1114 int i, num_entries;
1115 struct ipr_hostrcb_type_14_error *error;
1116 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1117 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1118
1119 error = &hostrcb->hcam.u.error.u.type_14_error;
1120
1121 ipr_err_separator;
1122
1123 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1124 error->protection_level,
1125 ioa_cfg->host->host_no,
1126 error->last_func_vset_res_addr.bus,
1127 error->last_func_vset_res_addr.target,
1128 error->last_func_vset_res_addr.lun);
1129
1130 ipr_err_separator;
1131
1132 array_entry = error->array_member;
1133 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1134 sizeof(error->array_member));
1135
1136 for (i = 0; i < num_entries; i++, array_entry++) {
1137 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1138 continue;
1139
1140 if (be32_to_cpu(error->exposed_mode_adn) == i)
1141 ipr_err("Exposed Array Member %d:\n", i);
1142 else
1143 ipr_err("Array Member %d:\n", i);
1144
1145 ipr_log_ext_vpd(&array_entry->vpd);
1146 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1147 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1148 "Expected Location");
1149
1150 ipr_err_separator;
1151 }
1152}
1153
1da177e4
LT
1154/**
1155 * ipr_log_array_error - Log an array configuration error.
1156 * @ioa_cfg: ioa config struct
1157 * @hostrcb: hostrcb struct
1158 *
1159 * Return value:
1160 * none
1161 **/
1162static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1163 struct ipr_hostrcb *hostrcb)
1164{
1165 int i;
1166 struct ipr_hostrcb_type_04_error *error;
1167 struct ipr_hostrcb_array_data_entry *array_entry;
1168 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1169
1170 error = &hostrcb->hcam.u.error.u.type_04_error;
1171
1172 ipr_err_separator;
1173
1174 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1175 error->protection_level,
1176 ioa_cfg->host->host_no,
1177 error->last_func_vset_res_addr.bus,
1178 error->last_func_vset_res_addr.target,
1179 error->last_func_vset_res_addr.lun);
1180
1181 ipr_err_separator;
1182
1183 array_entry = error->array_member;
1184
1185 for (i = 0; i < 18; i++) {
cfc32139 1186 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1187 continue;
1188
fa15b1f6 1189 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1190 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1191 else
1da177e4 1192 ipr_err("Array Member %d:\n", i);
1da177e4 1193
cfc32139 1194 ipr_log_vpd(&array_entry->vpd);
1da177e4 1195
fa15b1f6
BK
1196 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1197 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1198 "Expected Location");
1da177e4
LT
1199
1200 ipr_err_separator;
1201
1202 if (i == 9)
1203 array_entry = error->array_member2;
1204 else
1205 array_entry++;
1206 }
1207}
1208
1209/**
b0df54bb
BK
1210 * ipr_log_hex_data - Log additional hex IOA error data.
1211 * @data: IOA error data
1212 * @len: data length
1da177e4
LT
1213 *
1214 * Return value:
1215 * none
1216 **/
b0df54bb 1217static void ipr_log_hex_data(u32 *data, int len)
1da177e4
LT
1218{
1219 int i;
1da177e4 1220
b0df54bb 1221 if (len == 0)
1da177e4
LT
1222 return;
1223
b0df54bb 1224 for (i = 0; i < len / 4; i += 4) {
1da177e4 1225 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1226 be32_to_cpu(data[i]),
1227 be32_to_cpu(data[i+1]),
1228 be32_to_cpu(data[i+2]),
1229 be32_to_cpu(data[i+3]));
1da177e4
LT
1230 }
1231}
1232
ee0f05b8
BK
1233/**
1234 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1235 * @ioa_cfg: ioa config struct
1236 * @hostrcb: hostrcb struct
1237 *
1238 * Return value:
1239 * none
1240 **/
1241static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1242 struct ipr_hostrcb *hostrcb)
1243{
1244 struct ipr_hostrcb_type_17_error *error;
1245
1246 error = &hostrcb->hcam.u.error.u.type_17_error;
1247 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1248
1249 ipr_err("%s\n", error->failure_reason);
1250 ipr_err("Remote Adapter VPD:\n");
1251 ipr_log_ext_vpd(&error->vpd);
1252 ipr_log_hex_data(error->data,
1253 be32_to_cpu(hostrcb->hcam.length) -
1254 (offsetof(struct ipr_hostrcb_error, u) +
1255 offsetof(struct ipr_hostrcb_type_17_error, data)));
1256}
1257
b0df54bb
BK
1258/**
1259 * ipr_log_dual_ioa_error - Log a dual adapter error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1262 *
1263 * Return value:
1264 * none
1265 **/
1266static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1268{
1269 struct ipr_hostrcb_type_07_error *error;
1270
1271 error = &hostrcb->hcam.u.error.u.type_07_error;
1272 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1273
1274 ipr_err("%s\n", error->failure_reason);
1275 ipr_err("Remote Adapter VPD:\n");
1276 ipr_log_vpd(&error->vpd);
1277 ipr_log_hex_data(error->data,
1278 be32_to_cpu(hostrcb->hcam.length) -
1279 (offsetof(struct ipr_hostrcb_error, u) +
1280 offsetof(struct ipr_hostrcb_type_07_error, data)));
1281}
1282
1283/**
1284 * ipr_log_generic_error - Log an adapter error.
1285 * @ioa_cfg: ioa config struct
1286 * @hostrcb: hostrcb struct
1287 *
1288 * Return value:
1289 * none
1290 **/
1291static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1292 struct ipr_hostrcb *hostrcb)
1293{
1294 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1295 be32_to_cpu(hostrcb->hcam.length));
1296}
1297
1da177e4
LT
1298/**
1299 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1300 * @ioasc: IOASC
1301 *
1302 * This function will return the index of into the ipr_error_table
1303 * for the specified IOASC. If the IOASC is not in the table,
1304 * 0 will be returned, which points to the entry used for unknown errors.
1305 *
1306 * Return value:
1307 * index into the ipr_error_table
1308 **/
1309static u32 ipr_get_error(u32 ioasc)
1310{
1311 int i;
1312
1313 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1314 if (ipr_error_table[i].ioasc == ioasc)
1315 return i;
1316
1317 return 0;
1318}
1319
1320/**
1321 * ipr_handle_log_data - Log an adapter error.
1322 * @ioa_cfg: ioa config struct
1323 * @hostrcb: hostrcb struct
1324 *
1325 * This function logs an adapter error to the system.
1326 *
1327 * Return value:
1328 * none
1329 **/
1330static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1331 struct ipr_hostrcb *hostrcb)
1332{
1333 u32 ioasc;
1334 int error_index;
1335
1336 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1337 return;
1338
1339 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1340 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1341
1342 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1343
1344 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1345 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1346 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1347 scsi_report_bus_reset(ioa_cfg->host,
1348 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1349 }
1350
1351 error_index = ipr_get_error(ioasc);
1352
1353 if (!ipr_error_table[error_index].log_hcam)
1354 return;
1355
1356 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1357 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1358 "%s\n", ipr_error_table[error_index].error);
1359 } else {
1360 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1361 ipr_error_table[error_index].error);
1362 }
1363
1364 /* Set indication we have logged an error */
1365 ioa_cfg->errors_logged++;
1366
1367 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1368 return;
cf852037
BK
1369 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1370 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1371
1372 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1373 case IPR_HOST_RCB_OVERLAY_ID_2:
1374 ipr_log_cache_error(ioa_cfg, hostrcb);
1375 break;
1376 case IPR_HOST_RCB_OVERLAY_ID_3:
1377 ipr_log_config_error(ioa_cfg, hostrcb);
1378 break;
1379 case IPR_HOST_RCB_OVERLAY_ID_4:
1380 case IPR_HOST_RCB_OVERLAY_ID_6:
1381 ipr_log_array_error(ioa_cfg, hostrcb);
1382 break;
b0df54bb
BK
1383 case IPR_HOST_RCB_OVERLAY_ID_7:
1384 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1385 break;
ee0f05b8
BK
1386 case IPR_HOST_RCB_OVERLAY_ID_12:
1387 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1388 break;
1389 case IPR_HOST_RCB_OVERLAY_ID_13:
1390 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1391 break;
1392 case IPR_HOST_RCB_OVERLAY_ID_14:
1393 case IPR_HOST_RCB_OVERLAY_ID_16:
1394 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1395 break;
1396 case IPR_HOST_RCB_OVERLAY_ID_17:
1397 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1398 break;
cf852037 1399 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1400 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1401 default:
a9cfca96 1402 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1403 break;
1404 }
1405}
1406
1407/**
1408 * ipr_process_error - Op done function for an adapter error log.
1409 * @ipr_cmd: ipr command struct
1410 *
1411 * This function is the op done function for an error log host
1412 * controlled async from the adapter. It will log the error and
1413 * send the HCAM back to the adapter.
1414 *
1415 * Return value:
1416 * none
1417 **/
1418static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1419{
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1423
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1426
1427 if (!ioasc) {
1428 ipr_handle_log_data(ioa_cfg, hostrcb);
1429 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1430 dev_err(&ioa_cfg->pdev->dev,
1431 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1432 }
1433
1434 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1435}
1436
1437/**
1438 * ipr_timeout - An internally generated op has timed out.
1439 * @ipr_cmd: ipr command struct
1440 *
1441 * This function blocks host requests and initiates an
1442 * adapter reset.
1443 *
1444 * Return value:
1445 * none
1446 **/
1447static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1448{
1449 unsigned long lock_flags = 0;
1450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1451
1452 ENTER;
1453 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1454
1455 ioa_cfg->errors_logged++;
1456 dev_err(&ioa_cfg->pdev->dev,
1457 "Adapter being reset due to command timeout.\n");
1458
1459 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1460 ioa_cfg->sdt_state = GET_DUMP;
1461
1462 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1463 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1464
1465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1466 LEAVE;
1467}
1468
1469/**
1470 * ipr_oper_timeout - Adapter timed out transitioning to operational
1471 * @ipr_cmd: ipr command struct
1472 *
1473 * This function blocks host requests and initiates an
1474 * adapter reset.
1475 *
1476 * Return value:
1477 * none
1478 **/
1479static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1480{
1481 unsigned long lock_flags = 0;
1482 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1483
1484 ENTER;
1485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1486
1487 ioa_cfg->errors_logged++;
1488 dev_err(&ioa_cfg->pdev->dev,
1489 "Adapter timed out transitioning to operational.\n");
1490
1491 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1492 ioa_cfg->sdt_state = GET_DUMP;
1493
1494 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1495 if (ipr_fastfail)
1496 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1498 }
1499
1500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1501 LEAVE;
1502}
1503
1504/**
1505 * ipr_reset_reload - Reset/Reload the IOA
1506 * @ioa_cfg: ioa config struct
1507 * @shutdown_type: shutdown type
1508 *
1509 * This function resets the adapter and re-initializes it.
1510 * This function assumes that all new host commands have been stopped.
1511 * Return value:
1512 * SUCCESS / FAILED
1513 **/
1514static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1515 enum ipr_shutdown_type shutdown_type)
1516{
1517 if (!ioa_cfg->in_reset_reload)
1518 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1519
1520 spin_unlock_irq(ioa_cfg->host->host_lock);
1521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1522 spin_lock_irq(ioa_cfg->host->host_lock);
1523
1524 /* If we got hit with a host reset while we were already resetting
1525 the adapter for some reason, and the reset failed. */
1526 if (ioa_cfg->ioa_is_dead) {
1527 ipr_trace;
1528 return FAILED;
1529 }
1530
1531 return SUCCESS;
1532}
1533
1534/**
1535 * ipr_find_ses_entry - Find matching SES in SES table
1536 * @res: resource entry struct of SES
1537 *
1538 * Return value:
1539 * pointer to SES table entry / NULL on failure
1540 **/
1541static const struct ipr_ses_table_entry *
1542ipr_find_ses_entry(struct ipr_resource_entry *res)
1543{
1544 int i, j, matches;
1545 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1546
1547 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1548 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1549 if (ste->compare_product_id_byte[j] == 'X') {
1550 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1551 matches++;
1552 else
1553 break;
1554 } else
1555 matches++;
1556 }
1557
1558 if (matches == IPR_PROD_ID_LEN)
1559 return ste;
1560 }
1561
1562 return NULL;
1563}
1564
1565/**
1566 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1567 * @ioa_cfg: ioa config struct
1568 * @bus: SCSI bus
1569 * @bus_width: bus width
1570 *
1571 * Return value:
1572 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1573 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1574 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1575 * max 160MHz = max 320MB/sec).
1576 **/
1577static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1578{
1579 struct ipr_resource_entry *res;
1580 const struct ipr_ses_table_entry *ste;
1581 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1582
1583 /* Loop through each config table entry in the config table buffer */
1584 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1585 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1586 continue;
1587
1588 if (bus != res->cfgte.res_addr.bus)
1589 continue;
1590
1591 if (!(ste = ipr_find_ses_entry(res)))
1592 continue;
1593
1594 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1595 }
1596
1597 return max_xfer_rate;
1598}
1599
1600/**
1601 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1602 * @ioa_cfg: ioa config struct
1603 * @max_delay: max delay in micro-seconds to wait
1604 *
1605 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1606 *
1607 * Return value:
1608 * 0 on success / other on failure
1609 **/
1610static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1611{
1612 volatile u32 pcii_reg;
1613 int delay = 1;
1614
1615 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1616 while (delay < max_delay) {
1617 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1618
1619 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1620 return 0;
1621
1622 /* udelay cannot be used if delay is more than a few milliseconds */
1623 if ((delay / 1000) > MAX_UDELAY_MS)
1624 mdelay(delay / 1000);
1625 else
1626 udelay(delay);
1627
1628 delay += delay;
1629 }
1630 return -EIO;
1631}
1632
1633/**
1634 * ipr_get_ldump_data_section - Dump IOA memory
1635 * @ioa_cfg: ioa config struct
1636 * @start_addr: adapter address to dump
1637 * @dest: destination kernel buffer
1638 * @length_in_words: length to dump in 4 byte words
1639 *
1640 * Return value:
1641 * 0 on success / -EIO on failure
1642 **/
1643static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1644 u32 start_addr,
1645 __be32 *dest, u32 length_in_words)
1646{
1647 volatile u32 temp_pcii_reg;
1648 int i, delay = 0;
1649
1650 /* Write IOA interrupt reg starting LDUMP state */
1651 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1652 ioa_cfg->regs.set_uproc_interrupt_reg);
1653
1654 /* Wait for IO debug acknowledge */
1655 if (ipr_wait_iodbg_ack(ioa_cfg,
1656 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1657 dev_err(&ioa_cfg->pdev->dev,
1658 "IOA dump long data transfer timeout\n");
1659 return -EIO;
1660 }
1661
1662 /* Signal LDUMP interlocked - clear IO debug ack */
1663 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1664 ioa_cfg->regs.clr_interrupt_reg);
1665
1666 /* Write Mailbox with starting address */
1667 writel(start_addr, ioa_cfg->ioa_mailbox);
1668
1669 /* Signal address valid - clear IOA Reset alert */
1670 writel(IPR_UPROCI_RESET_ALERT,
1671 ioa_cfg->regs.clr_uproc_interrupt_reg);
1672
1673 for (i = 0; i < length_in_words; i++) {
1674 /* Wait for IO debug acknowledge */
1675 if (ipr_wait_iodbg_ack(ioa_cfg,
1676 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1677 dev_err(&ioa_cfg->pdev->dev,
1678 "IOA dump short data transfer timeout\n");
1679 return -EIO;
1680 }
1681
1682 /* Read data from mailbox and increment destination pointer */
1683 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1684 dest++;
1685
1686 /* For all but the last word of data, signal data received */
1687 if (i < (length_in_words - 1)) {
1688 /* Signal dump data received - Clear IO debug Ack */
1689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1690 ioa_cfg->regs.clr_interrupt_reg);
1691 }
1692 }
1693
1694 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1695 writel(IPR_UPROCI_RESET_ALERT,
1696 ioa_cfg->regs.set_uproc_interrupt_reg);
1697
1698 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1699 ioa_cfg->regs.clr_uproc_interrupt_reg);
1700
1701 /* Signal dump data received - Clear IO debug Ack */
1702 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1703 ioa_cfg->regs.clr_interrupt_reg);
1704
1705 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1706 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1707 temp_pcii_reg =
1708 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1709
1710 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1711 return 0;
1712
1713 udelay(10);
1714 delay += 10;
1715 }
1716
1717 return 0;
1718}
1719
1720#ifdef CONFIG_SCSI_IPR_DUMP
1721/**
1722 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1723 * @ioa_cfg: ioa config struct
1724 * @pci_address: adapter address
1725 * @length: length of data to copy
1726 *
1727 * Copy data from PCI adapter to kernel buffer.
1728 * Note: length MUST be a 4 byte multiple
1729 * Return value:
1730 * 0 on success / other on failure
1731 **/
1732static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1733 unsigned long pci_address, u32 length)
1734{
1735 int bytes_copied = 0;
1736 int cur_len, rc, rem_len, rem_page_len;
1737 __be32 *page;
1738 unsigned long lock_flags = 0;
1739 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1740
1741 while (bytes_copied < length &&
1742 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1743 if (ioa_dump->page_offset >= PAGE_SIZE ||
1744 ioa_dump->page_offset == 0) {
1745 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1746
1747 if (!page) {
1748 ipr_trace;
1749 return bytes_copied;
1750 }
1751
1752 ioa_dump->page_offset = 0;
1753 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1754 ioa_dump->next_page_index++;
1755 } else
1756 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1757
1758 rem_len = length - bytes_copied;
1759 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1760 cur_len = min(rem_len, rem_page_len);
1761
1762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1763 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1764 rc = -EIO;
1765 } else {
1766 rc = ipr_get_ldump_data_section(ioa_cfg,
1767 pci_address + bytes_copied,
1768 &page[ioa_dump->page_offset / 4],
1769 (cur_len / sizeof(u32)));
1770 }
1771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1772
1773 if (!rc) {
1774 ioa_dump->page_offset += cur_len;
1775 bytes_copied += cur_len;
1776 } else {
1777 ipr_trace;
1778 break;
1779 }
1780 schedule();
1781 }
1782
1783 return bytes_copied;
1784}
1785
1786/**
1787 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1788 * @hdr: dump entry header struct
1789 *
1790 * Return value:
1791 * nothing
1792 **/
1793static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1794{
1795 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1796 hdr->num_elems = 1;
1797 hdr->offset = sizeof(*hdr);
1798 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1799}
1800
1801/**
1802 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1803 * @ioa_cfg: ioa config struct
1804 * @driver_dump: driver dump struct
1805 *
1806 * Return value:
1807 * nothing
1808 **/
1809static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1810 struct ipr_driver_dump *driver_dump)
1811{
1812 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1813
1814 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1815 driver_dump->ioa_type_entry.hdr.len =
1816 sizeof(struct ipr_dump_ioa_type_entry) -
1817 sizeof(struct ipr_dump_entry_header);
1818 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1819 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1820 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1821 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1822 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1823 ucode_vpd->minor_release[1];
1824 driver_dump->hdr.num_entries++;
1825}
1826
1827/**
1828 * ipr_dump_version_data - Fill in the driver version in the dump.
1829 * @ioa_cfg: ioa config struct
1830 * @driver_dump: driver dump struct
1831 *
1832 * Return value:
1833 * nothing
1834 **/
1835static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1836 struct ipr_driver_dump *driver_dump)
1837{
1838 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1839 driver_dump->version_entry.hdr.len =
1840 sizeof(struct ipr_dump_version_entry) -
1841 sizeof(struct ipr_dump_entry_header);
1842 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1843 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1844 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1845 driver_dump->hdr.num_entries++;
1846}
1847
1848/**
1849 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1850 * @ioa_cfg: ioa config struct
1851 * @driver_dump: driver dump struct
1852 *
1853 * Return value:
1854 * nothing
1855 **/
1856static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1857 struct ipr_driver_dump *driver_dump)
1858{
1859 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1860 driver_dump->trace_entry.hdr.len =
1861 sizeof(struct ipr_dump_trace_entry) -
1862 sizeof(struct ipr_dump_entry_header);
1863 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1864 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1865 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1866 driver_dump->hdr.num_entries++;
1867}
1868
1869/**
1870 * ipr_dump_location_data - Fill in the IOA location in the dump.
1871 * @ioa_cfg: ioa config struct
1872 * @driver_dump: driver dump struct
1873 *
1874 * Return value:
1875 * nothing
1876 **/
1877static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1878 struct ipr_driver_dump *driver_dump)
1879{
1880 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1881 driver_dump->location_entry.hdr.len =
1882 sizeof(struct ipr_dump_location_entry) -
1883 sizeof(struct ipr_dump_entry_header);
1884 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1885 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1886 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1887 driver_dump->hdr.num_entries++;
1888}
1889
1890/**
1891 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1892 * @ioa_cfg: ioa config struct
1893 * @dump: dump struct
1894 *
1895 * Return value:
1896 * nothing
1897 **/
1898static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1899{
1900 unsigned long start_addr, sdt_word;
1901 unsigned long lock_flags = 0;
1902 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1903 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1904 u32 num_entries, start_off, end_off;
1905 u32 bytes_to_copy, bytes_copied, rc;
1906 struct ipr_sdt *sdt;
1907 int i;
1908
1909 ENTER;
1910
1911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1912
1913 if (ioa_cfg->sdt_state != GET_DUMP) {
1914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1915 return;
1916 }
1917
1918 start_addr = readl(ioa_cfg->ioa_mailbox);
1919
1920 if (!ipr_sdt_is_fmt2(start_addr)) {
1921 dev_err(&ioa_cfg->pdev->dev,
1922 "Invalid dump table format: %lx\n", start_addr);
1923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1924 return;
1925 }
1926
1927 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1928
1929 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1930
1931 /* Initialize the overall dump header */
1932 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1933 driver_dump->hdr.num_entries = 1;
1934 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1935 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1936 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1937 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1938
1939 ipr_dump_version_data(ioa_cfg, driver_dump);
1940 ipr_dump_location_data(ioa_cfg, driver_dump);
1941 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1942 ipr_dump_trace_data(ioa_cfg, driver_dump);
1943
1944 /* Update dump_header */
1945 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1946
1947 /* IOA Dump entry */
1948 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1949 ioa_dump->format = IPR_SDT_FMT2;
1950 ioa_dump->hdr.len = 0;
1951 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1952 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1953
1954 /* First entries in sdt are actually a list of dump addresses and
1955 lengths to gather the real dump data. sdt represents the pointer
1956 to the ioa generated dump table. Dump data will be extracted based
1957 on entries in this table */
1958 sdt = &ioa_dump->sdt;
1959
1960 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1961 sizeof(struct ipr_sdt) / sizeof(__be32));
1962
1963 /* Smart Dump table is ready to use and the first entry is valid */
1964 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1965 dev_err(&ioa_cfg->pdev->dev,
1966 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1967 rc, be32_to_cpu(sdt->hdr.state));
1968 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1969 ioa_cfg->sdt_state = DUMP_OBTAINED;
1970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1971 return;
1972 }
1973
1974 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1975
1976 if (num_entries > IPR_NUM_SDT_ENTRIES)
1977 num_entries = IPR_NUM_SDT_ENTRIES;
1978
1979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1980
1981 for (i = 0; i < num_entries; i++) {
1982 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1984 break;
1985 }
1986
1987 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1988 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1989 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1990 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1991
1992 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1993 bytes_to_copy = end_off - start_off;
1994 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1995 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1996 continue;
1997 }
1998
1999 /* Copy data from adapter to driver buffers */
2000 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2001 bytes_to_copy);
2002
2003 ioa_dump->hdr.len += bytes_copied;
2004
2005 if (bytes_copied != bytes_to_copy) {
2006 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2007 break;
2008 }
2009 }
2010 }
2011 }
2012
2013 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2014
2015 /* Update dump_header */
2016 driver_dump->hdr.len += ioa_dump->hdr.len;
2017 wmb();
2018 ioa_cfg->sdt_state = DUMP_OBTAINED;
2019 LEAVE;
2020}
2021
2022#else
2023#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2024#endif
2025
2026/**
2027 * ipr_release_dump - Free adapter dump memory
2028 * @kref: kref struct
2029 *
2030 * Return value:
2031 * nothing
2032 **/
2033static void ipr_release_dump(struct kref *kref)
2034{
2035 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2036 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2037 unsigned long lock_flags = 0;
2038 int i;
2039
2040 ENTER;
2041 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2042 ioa_cfg->dump = NULL;
2043 ioa_cfg->sdt_state = INACTIVE;
2044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2045
2046 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2047 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2048
2049 kfree(dump);
2050 LEAVE;
2051}
2052
2053/**
2054 * ipr_worker_thread - Worker thread
2055 * @data: ioa config struct
2056 *
2057 * Called at task level from a work thread. This function takes care
2058 * of adding and removing device from the mid-layer as configuration
2059 * changes are detected by the adapter.
2060 *
2061 * Return value:
2062 * nothing
2063 **/
2064static void ipr_worker_thread(void *data)
2065{
2066 unsigned long lock_flags;
2067 struct ipr_resource_entry *res;
2068 struct scsi_device *sdev;
2069 struct ipr_dump *dump;
2070 struct ipr_ioa_cfg *ioa_cfg = data;
2071 u8 bus, target, lun;
2072 int did_work;
2073
2074 ENTER;
2075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2076
2077 if (ioa_cfg->sdt_state == GET_DUMP) {
2078 dump = ioa_cfg->dump;
2079 if (!dump) {
2080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2081 return;
2082 }
2083 kref_get(&dump->kref);
2084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2085 ipr_get_ioa_dump(ioa_cfg, dump);
2086 kref_put(&dump->kref, ipr_release_dump);
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2090 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2092 return;
2093 }
2094
2095restart:
2096 do {
2097 did_work = 0;
2098 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2100 return;
2101 }
2102
2103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2104 if (res->del_from_ml && res->sdev) {
2105 did_work = 1;
2106 sdev = res->sdev;
2107 if (!scsi_device_get(sdev)) {
2108 res->sdev = NULL;
2109 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 scsi_remove_device(sdev);
2112 scsi_device_put(sdev);
2113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2114 }
2115 break;
2116 }
2117 }
2118 } while(did_work);
2119
2120 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2121 if (res->add_to_ml) {
2122 bus = res->cfgte.res_addr.bus;
2123 target = res->cfgte.res_addr.target;
2124 lun = res->cfgte.res_addr.lun;
2125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2126 scsi_add_device(ioa_cfg->host, bus, target, lun);
2127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2128 goto restart;
2129 }
2130 }
2131
2132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2133 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
2134 LEAVE;
2135}
2136
2137#ifdef CONFIG_SCSI_IPR_TRACE
2138/**
2139 * ipr_read_trace - Dump the adapter trace
2140 * @kobj: kobject struct
2141 * @buf: buffer
2142 * @off: offset
2143 * @count: buffer size
2144 *
2145 * Return value:
2146 * number of bytes printed to buffer
2147 **/
2148static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2149 loff_t off, size_t count)
2150{
2151 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2152 struct Scsi_Host *shost = class_to_shost(cdev);
2153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2154 unsigned long lock_flags = 0;
2155 int size = IPR_TRACE_SIZE;
2156 char *src = (char *)ioa_cfg->trace;
2157
2158 if (off > size)
2159 return 0;
2160 if (off + count > size) {
2161 size -= off;
2162 count = size;
2163 }
2164
2165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2166 memcpy(buf, &src[off], count);
2167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2168 return count;
2169}
2170
2171static struct bin_attribute ipr_trace_attr = {
2172 .attr = {
2173 .name = "trace",
2174 .mode = S_IRUGO,
2175 },
2176 .size = 0,
2177 .read = ipr_read_trace,
2178};
2179#endif
2180
62275040
BK
2181static const struct {
2182 enum ipr_cache_state state;
2183 char *name;
2184} cache_state [] = {
2185 { CACHE_NONE, "none" },
2186 { CACHE_DISABLED, "disabled" },
2187 { CACHE_ENABLED, "enabled" }
2188};
2189
2190/**
2191 * ipr_show_write_caching - Show the write caching attribute
2192 * @class_dev: class device struct
2193 * @buf: buffer
2194 *
2195 * Return value:
2196 * number of bytes printed to buffer
2197 **/
2198static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2199{
2200 struct Scsi_Host *shost = class_to_shost(class_dev);
2201 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2202 unsigned long lock_flags = 0;
2203 int i, len = 0;
2204
2205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2206 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2207 if (cache_state[i].state == ioa_cfg->cache_state) {
2208 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2209 break;
2210 }
2211 }
2212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2213 return len;
2214}
2215
2216
2217/**
2218 * ipr_store_write_caching - Enable/disable adapter write cache
2219 * @class_dev: class_device struct
2220 * @buf: buffer
2221 * @count: buffer size
2222 *
2223 * This function will enable/disable adapter write cache.
2224 *
2225 * Return value:
2226 * count on success / other on failure
2227 **/
2228static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2229 const char *buf, size_t count)
2230{
2231 struct Scsi_Host *shost = class_to_shost(class_dev);
2232 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2233 unsigned long lock_flags = 0;
2234 enum ipr_cache_state new_state = CACHE_INVALID;
2235 int i;
2236
2237 if (!capable(CAP_SYS_ADMIN))
2238 return -EACCES;
2239 if (ioa_cfg->cache_state == CACHE_NONE)
2240 return -EINVAL;
2241
2242 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2243 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2244 new_state = cache_state[i].state;
2245 break;
2246 }
2247 }
2248
2249 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2250 return -EINVAL;
2251
2252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2253 if (ioa_cfg->cache_state == new_state) {
2254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2255 return count;
2256 }
2257
2258 ioa_cfg->cache_state = new_state;
2259 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2260 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2261 if (!ioa_cfg->in_reset_reload)
2262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2264 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2265
2266 return count;
2267}
2268
2269static struct class_device_attribute ipr_ioa_cache_attr = {
2270 .attr = {
2271 .name = "write_cache",
2272 .mode = S_IRUGO | S_IWUSR,
2273 },
2274 .show = ipr_show_write_caching,
2275 .store = ipr_store_write_caching
2276};
2277
1da177e4
LT
2278/**
2279 * ipr_show_fw_version - Show the firmware version
2280 * @class_dev: class device struct
2281 * @buf: buffer
2282 *
2283 * Return value:
2284 * number of bytes printed to buffer
2285 **/
2286static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2287{
2288 struct Scsi_Host *shost = class_to_shost(class_dev);
2289 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2290 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2291 unsigned long lock_flags = 0;
2292 int len;
2293
2294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2295 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2296 ucode_vpd->major_release, ucode_vpd->card_type,
2297 ucode_vpd->minor_release[0],
2298 ucode_vpd->minor_release[1]);
2299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2300 return len;
2301}
2302
2303static struct class_device_attribute ipr_fw_version_attr = {
2304 .attr = {
2305 .name = "fw_version",
2306 .mode = S_IRUGO,
2307 },
2308 .show = ipr_show_fw_version,
2309};
2310
2311/**
2312 * ipr_show_log_level - Show the adapter's error logging level
2313 * @class_dev: class device struct
2314 * @buf: buffer
2315 *
2316 * Return value:
2317 * number of bytes printed to buffer
2318 **/
2319static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2320{
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2324 int len;
2325
2326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329 return len;
2330}
2331
2332/**
2333 * ipr_store_log_level - Change the adapter's error logging level
2334 * @class_dev: class device struct
2335 * @buf: buffer
2336 *
2337 * Return value:
2338 * number of bytes printed to buffer
2339 **/
2340static ssize_t ipr_store_log_level(struct class_device *class_dev,
2341 const char *buf, size_t count)
2342{
2343 struct Scsi_Host *shost = class_to_shost(class_dev);
2344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2345 unsigned long lock_flags = 0;
2346
2347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2350 return strlen(buf);
2351}
2352
2353static struct class_device_attribute ipr_log_level_attr = {
2354 .attr = {
2355 .name = "log_level",
2356 .mode = S_IRUGO | S_IWUSR,
2357 },
2358 .show = ipr_show_log_level,
2359 .store = ipr_store_log_level
2360};
2361
2362/**
2363 * ipr_store_diagnostics - IOA Diagnostics interface
2364 * @class_dev: class_device struct
2365 * @buf: buffer
2366 * @count: buffer size
2367 *
2368 * This function will reset the adapter and wait a reasonable
2369 * amount of time for any errors that the adapter might log.
2370 *
2371 * Return value:
2372 * count on success / other on failure
2373 **/
2374static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2375 const char *buf, size_t count)
2376{
2377 struct Scsi_Host *shost = class_to_shost(class_dev);
2378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2379 unsigned long lock_flags = 0;
2380 int rc = count;
2381
2382 if (!capable(CAP_SYS_ADMIN))
2383 return -EACCES;
2384
2385 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2387 ioa_cfg->errors_logged = 0;
2388 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2389
2390 if (ioa_cfg->in_reset_reload) {
2391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2392 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2393
2394 /* Wait for a second for any errors to be logged */
2395 msleep(1000);
2396 } else {
2397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2398 return -EIO;
2399 }
2400
2401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2402 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2403 rc = -EIO;
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2405
2406 return rc;
2407}
2408
2409static struct class_device_attribute ipr_diagnostics_attr = {
2410 .attr = {
2411 .name = "run_diagnostics",
2412 .mode = S_IWUSR,
2413 },
2414 .store = ipr_store_diagnostics
2415};
2416
f37eb54b
BK
2417/**
2418 * ipr_show_adapter_state - Show the adapter's state
2419 * @class_dev: class device struct
2420 * @buf: buffer
2421 *
2422 * Return value:
2423 * number of bytes printed to buffer
2424 **/
2425static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2426{
2427 struct Scsi_Host *shost = class_to_shost(class_dev);
2428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2429 unsigned long lock_flags = 0;
2430 int len;
2431
2432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2433 if (ioa_cfg->ioa_is_dead)
2434 len = snprintf(buf, PAGE_SIZE, "offline\n");
2435 else
2436 len = snprintf(buf, PAGE_SIZE, "online\n");
2437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2438 return len;
2439}
2440
2441/**
2442 * ipr_store_adapter_state - Change adapter state
2443 * @class_dev: class_device struct
2444 * @buf: buffer
2445 * @count: buffer size
2446 *
2447 * This function will change the adapter's state.
2448 *
2449 * Return value:
2450 * count on success / other on failure
2451 **/
2452static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2453 const char *buf, size_t count)
2454{
2455 struct Scsi_Host *shost = class_to_shost(class_dev);
2456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2457 unsigned long lock_flags;
2458 int result = count;
2459
2460 if (!capable(CAP_SYS_ADMIN))
2461 return -EACCES;
2462
2463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2464 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2465 ioa_cfg->ioa_is_dead = 0;
2466 ioa_cfg->reset_retries = 0;
2467 ioa_cfg->in_ioa_bringdown = 0;
2468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2469 }
2470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2472
2473 return result;
2474}
2475
2476static struct class_device_attribute ipr_ioa_state_attr = {
2477 .attr = {
2478 .name = "state",
2479 .mode = S_IRUGO | S_IWUSR,
2480 },
2481 .show = ipr_show_adapter_state,
2482 .store = ipr_store_adapter_state
2483};
2484
1da177e4
LT
2485/**
2486 * ipr_store_reset_adapter - Reset the adapter
2487 * @class_dev: class_device struct
2488 * @buf: buffer
2489 * @count: buffer size
2490 *
2491 * This function will reset the adapter.
2492 *
2493 * Return value:
2494 * count on success / other on failure
2495 **/
2496static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2497 const char *buf, size_t count)
2498{
2499 struct Scsi_Host *shost = class_to_shost(class_dev);
2500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2501 unsigned long lock_flags;
2502 int result = count;
2503
2504 if (!capable(CAP_SYS_ADMIN))
2505 return -EACCES;
2506
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508 if (!ioa_cfg->in_reset_reload)
2509 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2512
2513 return result;
2514}
2515
2516static struct class_device_attribute ipr_ioa_reset_attr = {
2517 .attr = {
2518 .name = "reset_host",
2519 .mode = S_IWUSR,
2520 },
2521 .store = ipr_store_reset_adapter
2522};
2523
2524/**
2525 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2526 * @buf_len: buffer length
2527 *
2528 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2529 * list to use for microcode download
2530 *
2531 * Return value:
2532 * pointer to sglist / NULL on failure
2533 **/
2534static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2535{
2536 int sg_size, order, bsize_elem, num_elem, i, j;
2537 struct ipr_sglist *sglist;
2538 struct scatterlist *scatterlist;
2539 struct page *page;
2540
2541 /* Get the minimum size per scatter/gather element */
2542 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2543
2544 /* Get the actual size per element */
2545 order = get_order(sg_size);
2546
2547 /* Determine the actual number of bytes per element */
2548 bsize_elem = PAGE_SIZE * (1 << order);
2549
2550 /* Determine the actual number of sg entries needed */
2551 if (buf_len % bsize_elem)
2552 num_elem = (buf_len / bsize_elem) + 1;
2553 else
2554 num_elem = buf_len / bsize_elem;
2555
2556 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2557 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2558 (sizeof(struct scatterlist) * (num_elem - 1)),
2559 GFP_KERNEL);
2560
2561 if (sglist == NULL) {
2562 ipr_trace;
2563 return NULL;
2564 }
2565
1da177e4
LT
2566 scatterlist = sglist->scatterlist;
2567
2568 sglist->order = order;
2569 sglist->num_sg = num_elem;
2570
2571 /* Allocate a bunch of sg elements */
2572 for (i = 0; i < num_elem; i++) {
2573 page = alloc_pages(GFP_KERNEL, order);
2574 if (!page) {
2575 ipr_trace;
2576
2577 /* Free up what we already allocated */
2578 for (j = i - 1; j >= 0; j--)
2579 __free_pages(scatterlist[j].page, order);
2580 kfree(sglist);
2581 return NULL;
2582 }
2583
2584 scatterlist[i].page = page;
2585 }
2586
2587 return sglist;
2588}
2589
2590/**
2591 * ipr_free_ucode_buffer - Frees a microcode download buffer
2592 * @p_dnld: scatter/gather list pointer
2593 *
2594 * Free a DMA'able ucode download buffer previously allocated with
2595 * ipr_alloc_ucode_buffer
2596 *
2597 * Return value:
2598 * nothing
2599 **/
2600static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2601{
2602 int i;
2603
2604 for (i = 0; i < sglist->num_sg; i++)
2605 __free_pages(sglist->scatterlist[i].page, sglist->order);
2606
2607 kfree(sglist);
2608}
2609
2610/**
2611 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2612 * @sglist: scatter/gather list pointer
2613 * @buffer: buffer pointer
2614 * @len: buffer length
2615 *
2616 * Copy a microcode image from a user buffer into a buffer allocated by
2617 * ipr_alloc_ucode_buffer
2618 *
2619 * Return value:
2620 * 0 on success / other on failure
2621 **/
2622static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2623 u8 *buffer, u32 len)
2624{
2625 int bsize_elem, i, result = 0;
2626 struct scatterlist *scatterlist;
2627 void *kaddr;
2628
2629 /* Determine the actual number of bytes per element */
2630 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2631
2632 scatterlist = sglist->scatterlist;
2633
2634 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2635 kaddr = kmap(scatterlist[i].page);
2636 memcpy(kaddr, buffer, bsize_elem);
2637 kunmap(scatterlist[i].page);
2638
2639 scatterlist[i].length = bsize_elem;
2640
2641 if (result != 0) {
2642 ipr_trace;
2643 return result;
2644 }
2645 }
2646
2647 if (len % bsize_elem) {
2648 kaddr = kmap(scatterlist[i].page);
2649 memcpy(kaddr, buffer, len % bsize_elem);
2650 kunmap(scatterlist[i].page);
2651
2652 scatterlist[i].length = len % bsize_elem;
2653 }
2654
2655 sglist->buffer_len = len;
2656 return result;
2657}
2658
2659/**
12baa420 2660 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2661 * @ipr_cmd: ipr command struct
2662 * @sglist: scatter/gather list
1da177e4 2663 *
12baa420 2664 * Builds a microcode download IOA data list (IOADL).
1da177e4 2665 *
1da177e4 2666 **/
12baa420
BK
2667static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2668 struct ipr_sglist *sglist)
1da177e4 2669{
1da177e4
LT
2670 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2671 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2672 struct scatterlist *scatterlist = sglist->scatterlist;
2673 int i;
2674
12baa420 2675 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2677 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2678 ioarcb->write_ioadl_len =
2679 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2680
2681 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2682 ioadl[i].flags_and_data_len =
2683 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2684 ioadl[i].address =
2685 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2686 }
2687
12baa420
BK
2688 ioadl[i-1].flags_and_data_len |=
2689 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2690}
2691
2692/**
2693 * ipr_update_ioa_ucode - Update IOA's microcode
2694 * @ioa_cfg: ioa config struct
2695 * @sglist: scatter/gather list
2696 *
2697 * Initiate an adapter reset to update the IOA's microcode
2698 *
2699 * Return value:
2700 * 0 on success / -EIO on failure
2701 **/
2702static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2703 struct ipr_sglist *sglist)
2704{
2705 unsigned long lock_flags;
2706
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708
2709 if (ioa_cfg->ucode_sglist) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Microcode download already in progress\n");
2713 return -EIO;
1da177e4 2714 }
12baa420
BK
2715
2716 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2717 sglist->num_sg, DMA_TO_DEVICE);
2718
2719 if (!sglist->num_dma_sg) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 dev_err(&ioa_cfg->pdev->dev,
2722 "Failed to map microcode download buffer!\n");
1da177e4
LT
2723 return -EIO;
2724 }
2725
12baa420
BK
2726 ioa_cfg->ucode_sglist = sglist;
2727 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2730
2731 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2732 ioa_cfg->ucode_sglist = NULL;
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2734 return 0;
2735}
2736
2737/**
2738 * ipr_store_update_fw - Update the firmware on the adapter
2739 * @class_dev: class_device struct
2740 * @buf: buffer
2741 * @count: buffer size
2742 *
2743 * This function will update the firmware on the adapter.
2744 *
2745 * Return value:
2746 * count on success / other on failure
2747 **/
2748static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2749 const char *buf, size_t count)
2750{
2751 struct Scsi_Host *shost = class_to_shost(class_dev);
2752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2753 struct ipr_ucode_image_header *image_hdr;
2754 const struct firmware *fw_entry;
2755 struct ipr_sglist *sglist;
1da177e4
LT
2756 char fname[100];
2757 char *src;
2758 int len, result, dnld_size;
2759
2760 if (!capable(CAP_SYS_ADMIN))
2761 return -EACCES;
2762
2763 len = snprintf(fname, 99, "%s", buf);
2764 fname[len-1] = '\0';
2765
2766 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2767 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2768 return -EIO;
2769 }
2770
2771 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2772
2773 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2774 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2775 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2776 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2777 release_firmware(fw_entry);
2778 return -EINVAL;
2779 }
2780
2781 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2782 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2783 sglist = ipr_alloc_ucode_buffer(dnld_size);
2784
2785 if (!sglist) {
2786 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2787 release_firmware(fw_entry);
2788 return -ENOMEM;
2789 }
2790
2791 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2792
2793 if (result) {
2794 dev_err(&ioa_cfg->pdev->dev,
2795 "Microcode buffer copy to DMA buffer failed\n");
12baa420 2796 goto out;
1da177e4
LT
2797 }
2798
12baa420 2799 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 2800
12baa420
BK
2801 if (!result)
2802 result = count;
2803out:
1da177e4
LT
2804 ipr_free_ucode_buffer(sglist);
2805 release_firmware(fw_entry);
12baa420 2806 return result;
1da177e4
LT
2807}
2808
2809static struct class_device_attribute ipr_update_fw_attr = {
2810 .attr = {
2811 .name = "update_fw",
2812 .mode = S_IWUSR,
2813 },
2814 .store = ipr_store_update_fw
2815};
2816
2817static struct class_device_attribute *ipr_ioa_attrs[] = {
2818 &ipr_fw_version_attr,
2819 &ipr_log_level_attr,
2820 &ipr_diagnostics_attr,
f37eb54b 2821 &ipr_ioa_state_attr,
1da177e4
LT
2822 &ipr_ioa_reset_attr,
2823 &ipr_update_fw_attr,
62275040 2824 &ipr_ioa_cache_attr,
1da177e4
LT
2825 NULL,
2826};
2827
2828#ifdef CONFIG_SCSI_IPR_DUMP
2829/**
2830 * ipr_read_dump - Dump the adapter
2831 * @kobj: kobject struct
2832 * @buf: buffer
2833 * @off: offset
2834 * @count: buffer size
2835 *
2836 * Return value:
2837 * number of bytes printed to buffer
2838 **/
2839static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2840 loff_t off, size_t count)
2841{
2842 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2843 struct Scsi_Host *shost = class_to_shost(cdev);
2844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2845 struct ipr_dump *dump;
2846 unsigned long lock_flags = 0;
2847 char *src;
2848 int len;
2849 size_t rc = count;
2850
2851 if (!capable(CAP_SYS_ADMIN))
2852 return -EACCES;
2853
2854 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2855 dump = ioa_cfg->dump;
2856
2857 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2859 return 0;
2860 }
2861 kref_get(&dump->kref);
2862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2863
2864 if (off > dump->driver_dump.hdr.len) {
2865 kref_put(&dump->kref, ipr_release_dump);
2866 return 0;
2867 }
2868
2869 if (off + count > dump->driver_dump.hdr.len) {
2870 count = dump->driver_dump.hdr.len - off;
2871 rc = count;
2872 }
2873
2874 if (count && off < sizeof(dump->driver_dump)) {
2875 if (off + count > sizeof(dump->driver_dump))
2876 len = sizeof(dump->driver_dump) - off;
2877 else
2878 len = count;
2879 src = (u8 *)&dump->driver_dump + off;
2880 memcpy(buf, src, len);
2881 buf += len;
2882 off += len;
2883 count -= len;
2884 }
2885
2886 off -= sizeof(dump->driver_dump);
2887
2888 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2889 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2890 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2891 else
2892 len = count;
2893 src = (u8 *)&dump->ioa_dump + off;
2894 memcpy(buf, src, len);
2895 buf += len;
2896 off += len;
2897 count -= len;
2898 }
2899
2900 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2901
2902 while (count) {
2903 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2904 len = PAGE_ALIGN(off) - off;
2905 else
2906 len = count;
2907 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2908 src += off & ~PAGE_MASK;
2909 memcpy(buf, src, len);
2910 buf += len;
2911 off += len;
2912 count -= len;
2913 }
2914
2915 kref_put(&dump->kref, ipr_release_dump);
2916 return rc;
2917}
2918
2919/**
2920 * ipr_alloc_dump - Prepare for adapter dump
2921 * @ioa_cfg: ioa config struct
2922 *
2923 * Return value:
2924 * 0 on success / other on failure
2925 **/
2926static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2927{
2928 struct ipr_dump *dump;
2929 unsigned long lock_flags = 0;
2930
2931 ENTER;
0bc42e35 2932 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2933
2934 if (!dump) {
2935 ipr_err("Dump memory allocation failed\n");
2936 return -ENOMEM;
2937 }
2938
1da177e4
LT
2939 kref_init(&dump->kref);
2940 dump->ioa_cfg = ioa_cfg;
2941
2942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2943
2944 if (INACTIVE != ioa_cfg->sdt_state) {
2945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946 kfree(dump);
2947 return 0;
2948 }
2949
2950 ioa_cfg->dump = dump;
2951 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2952 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2953 ioa_cfg->dump_taken = 1;
2954 schedule_work(&ioa_cfg->work_q);
2955 }
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2957
2958 LEAVE;
2959 return 0;
2960}
2961
2962/**
2963 * ipr_free_dump - Free adapter dump memory
2964 * @ioa_cfg: ioa config struct
2965 *
2966 * Return value:
2967 * 0 on success / other on failure
2968 **/
2969static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2970{
2971 struct ipr_dump *dump;
2972 unsigned long lock_flags = 0;
2973
2974 ENTER;
2975
2976 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2977 dump = ioa_cfg->dump;
2978 if (!dump) {
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2980 return 0;
2981 }
2982
2983 ioa_cfg->dump = NULL;
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985
2986 kref_put(&dump->kref, ipr_release_dump);
2987
2988 LEAVE;
2989 return 0;
2990}
2991
2992/**
2993 * ipr_write_dump - Setup dump state of adapter
2994 * @kobj: kobject struct
2995 * @buf: buffer
2996 * @off: offset
2997 * @count: buffer size
2998 *
2999 * Return value:
3000 * number of bytes printed to buffer
3001 **/
3002static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3003 loff_t off, size_t count)
3004{
3005 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3006 struct Scsi_Host *shost = class_to_shost(cdev);
3007 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3008 int rc;
3009
3010 if (!capable(CAP_SYS_ADMIN))
3011 return -EACCES;
3012
3013 if (buf[0] == '1')
3014 rc = ipr_alloc_dump(ioa_cfg);
3015 else if (buf[0] == '0')
3016 rc = ipr_free_dump(ioa_cfg);
3017 else
3018 return -EINVAL;
3019
3020 if (rc)
3021 return rc;
3022 else
3023 return count;
3024}
3025
3026static struct bin_attribute ipr_dump_attr = {
3027 .attr = {
3028 .name = "dump",
3029 .mode = S_IRUSR | S_IWUSR,
3030 },
3031 .size = 0,
3032 .read = ipr_read_dump,
3033 .write = ipr_write_dump
3034};
3035#else
3036static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3037#endif
3038
3039/**
3040 * ipr_change_queue_depth - Change the device's queue depth
3041 * @sdev: scsi device struct
3042 * @qdepth: depth to set
3043 *
3044 * Return value:
3045 * actual depth set
3046 **/
3047static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3048{
3049 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3050 return sdev->queue_depth;
3051}
3052
3053/**
3054 * ipr_change_queue_type - Change the device's queue type
3055 * @dsev: scsi device struct
3056 * @tag_type: type of tags to use
3057 *
3058 * Return value:
3059 * actual queue type set
3060 **/
3061static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3062{
3063 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3064 struct ipr_resource_entry *res;
3065 unsigned long lock_flags = 0;
3066
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 res = (struct ipr_resource_entry *)sdev->hostdata;
3069
3070 if (res) {
3071 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3072 /*
3073 * We don't bother quiescing the device here since the
3074 * adapter firmware does it for us.
3075 */
3076 scsi_set_tag_type(sdev, tag_type);
3077
3078 if (tag_type)
3079 scsi_activate_tcq(sdev, sdev->queue_depth);
3080 else
3081 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3082 } else
3083 tag_type = 0;
3084 } else
3085 tag_type = 0;
3086
3087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3088 return tag_type;
3089}
3090
3091/**
3092 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3093 * @dev: device struct
3094 * @buf: buffer
3095 *
3096 * Return value:
3097 * number of bytes printed to buffer
3098 **/
10523b3b 3099static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3100{
3101 struct scsi_device *sdev = to_scsi_device(dev);
3102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3103 struct ipr_resource_entry *res;
3104 unsigned long lock_flags = 0;
3105 ssize_t len = -ENXIO;
3106
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 res = (struct ipr_resource_entry *)sdev->hostdata;
3109 if (res)
3110 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3112 return len;
3113}
3114
3115static struct device_attribute ipr_adapter_handle_attr = {
3116 .attr = {
3117 .name = "adapter_handle",
3118 .mode = S_IRUSR,
3119 },
3120 .show = ipr_show_adapter_handle
3121};
3122
3123static struct device_attribute *ipr_dev_attrs[] = {
3124 &ipr_adapter_handle_attr,
3125 NULL,
3126};
3127
3128/**
3129 * ipr_biosparam - Return the HSC mapping
3130 * @sdev: scsi device struct
3131 * @block_device: block device pointer
3132 * @capacity: capacity of the device
3133 * @parm: Array containing returned HSC values.
3134 *
3135 * This function generates the HSC parms that fdisk uses.
3136 * We want to make sure we return something that places partitions
3137 * on 4k boundaries for best performance with the IOA.
3138 *
3139 * Return value:
3140 * 0 on success
3141 **/
3142static int ipr_biosparam(struct scsi_device *sdev,
3143 struct block_device *block_device,
3144 sector_t capacity, int *parm)
3145{
3146 int heads, sectors;
3147 sector_t cylinders;
3148
3149 heads = 128;
3150 sectors = 32;
3151
3152 cylinders = capacity;
3153 sector_div(cylinders, (128 * 32));
3154
3155 /* return result */
3156 parm[0] = heads;
3157 parm[1] = sectors;
3158 parm[2] = cylinders;
3159
3160 return 0;
3161}
3162
3163/**
3164 * ipr_slave_destroy - Unconfigure a SCSI device
3165 * @sdev: scsi device struct
3166 *
3167 * Return value:
3168 * nothing
3169 **/
3170static void ipr_slave_destroy(struct scsi_device *sdev)
3171{
3172 struct ipr_resource_entry *res;
3173 struct ipr_ioa_cfg *ioa_cfg;
3174 unsigned long lock_flags = 0;
3175
3176 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3177
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 res = (struct ipr_resource_entry *) sdev->hostdata;
3180 if (res) {
3181 sdev->hostdata = NULL;
3182 res->sdev = NULL;
3183 }
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185}
3186
3187/**
3188 * ipr_slave_configure - Configure a SCSI device
3189 * @sdev: scsi device struct
3190 *
3191 * This function configures the specified scsi device.
3192 *
3193 * Return value:
3194 * 0 on success
3195 **/
3196static int ipr_slave_configure(struct scsi_device *sdev)
3197{
3198 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3199 struct ipr_resource_entry *res;
3200 unsigned long lock_flags = 0;
3201
3202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3203 res = sdev->hostdata;
3204 if (res) {
3205 if (ipr_is_af_dasd_device(res))
3206 sdev->type = TYPE_RAID;
0726ce26 3207 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3208 sdev->scsi_level = 4;
0726ce26
BK
3209 sdev->no_uld_attach = 1;
3210 }
1da177e4
LT
3211 if (ipr_is_vset_device(res)) {
3212 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3213 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3214 }
3215 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3216 sdev->allow_restart = 1;
3217 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3218 }
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 return 0;
3221}
3222
3223/**
3224 * ipr_slave_alloc - Prepare for commands to a device.
3225 * @sdev: scsi device struct
3226 *
3227 * This function saves a pointer to the resource entry
3228 * in the scsi device struct if the device exists. We
3229 * can then use this pointer in ipr_queuecommand when
3230 * handling new commands.
3231 *
3232 * Return value:
692aebfc 3233 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3234 **/
3235static int ipr_slave_alloc(struct scsi_device *sdev)
3236{
3237 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3238 struct ipr_resource_entry *res;
3239 unsigned long lock_flags;
692aebfc 3240 int rc = -ENXIO;
1da177e4
LT
3241
3242 sdev->hostdata = NULL;
3243
3244 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3245
3246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3247 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3248 (res->cfgte.res_addr.target == sdev->id) &&
3249 (res->cfgte.res_addr.lun == sdev->lun)) {
3250 res->sdev = sdev;
3251 res->add_to_ml = 0;
3252 res->in_erp = 0;
3253 sdev->hostdata = res;
3254 res->needs_sync_complete = 1;
692aebfc 3255 rc = 0;
1da177e4
LT
3256 break;
3257 }
3258 }
3259
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261
692aebfc 3262 return rc;
1da177e4
LT
3263}
3264
3265/**
3266 * ipr_eh_host_reset - Reset the host adapter
3267 * @scsi_cmd: scsi command struct
3268 *
3269 * Return value:
3270 * SUCCESS / FAILED
3271 **/
df0ae249 3272static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3273{
3274 struct ipr_ioa_cfg *ioa_cfg;
3275 int rc;
3276
3277 ENTER;
3278 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3279
3280 dev_err(&ioa_cfg->pdev->dev,
3281 "Adapter being reset as a result of error recovery.\n");
3282
3283 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3284 ioa_cfg->sdt_state = GET_DUMP;
3285
3286 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3287
3288 LEAVE;
3289 return rc;
3290}
3291
df0ae249
JG
3292static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3293{
3294 int rc;
3295
3296 spin_lock_irq(cmd->device->host->host_lock);
3297 rc = __ipr_eh_host_reset(cmd);
3298 spin_unlock_irq(cmd->device->host->host_lock);
3299
3300 return rc;
3301}
3302
1da177e4
LT
3303/**
3304 * ipr_eh_dev_reset - Reset the device
3305 * @scsi_cmd: scsi command struct
3306 *
3307 * This function issues a device reset to the affected device.
3308 * A LUN reset will be sent to the device first. If that does
3309 * not work, a target reset will be sent.
3310 *
3311 * Return value:
3312 * SUCCESS / FAILED
3313 **/
94d0e7b8 3314static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3315{
3316 struct ipr_cmnd *ipr_cmd;
3317 struct ipr_ioa_cfg *ioa_cfg;
3318 struct ipr_resource_entry *res;
3319 struct ipr_cmd_pkt *cmd_pkt;
3320 u32 ioasc;
3321
3322 ENTER;
3323 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3324 res = scsi_cmd->device->hostdata;
3325
3326 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3327 return FAILED;
3328
3329 /*
3330 * If we are currently going through reset/reload, return failed. This will force the
3331 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3332 * reset to complete
3333 */
3334 if (ioa_cfg->in_reset_reload)
3335 return FAILED;
3336 if (ioa_cfg->ioa_is_dead)
3337 return FAILED;
3338
3339 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3340 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3341 if (ipr_cmd->scsi_cmd)
3342 ipr_cmd->done = ipr_scsi_eh_done;
3343 }
3344 }
3345
3346 res->resetting_device = 1;
3347
3348 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3349
3350 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3351 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3352 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3353 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3354
3355 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3356 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3357
3358 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3359
3360 res->resetting_device = 0;
3361
3362 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3363
3364 LEAVE;
3365 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3366}
3367
94d0e7b8
JG
3368static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3369{
3370 int rc;
3371
3372 spin_lock_irq(cmd->device->host->host_lock);
3373 rc = __ipr_eh_dev_reset(cmd);
3374 spin_unlock_irq(cmd->device->host->host_lock);
3375
3376 return rc;
3377}
3378
1da177e4
LT
3379/**
3380 * ipr_bus_reset_done - Op done function for bus reset.
3381 * @ipr_cmd: ipr command struct
3382 *
3383 * This function is the op done function for a bus reset
3384 *
3385 * Return value:
3386 * none
3387 **/
3388static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3389{
3390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3391 struct ipr_resource_entry *res;
3392
3393 ENTER;
3394 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3395 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3396 sizeof(res->cfgte.res_handle))) {
3397 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3398 break;
3399 }
3400 }
3401
3402 /*
3403 * If abort has not completed, indicate the reset has, else call the
3404 * abort's done function to wake the sleeping eh thread
3405 */
3406 if (ipr_cmd->sibling->sibling)
3407 ipr_cmd->sibling->sibling = NULL;
3408 else
3409 ipr_cmd->sibling->done(ipr_cmd->sibling);
3410
3411 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3412 LEAVE;
3413}
3414
3415/**
3416 * ipr_abort_timeout - An abort task has timed out
3417 * @ipr_cmd: ipr command struct
3418 *
3419 * This function handles when an abort task times out. If this
3420 * happens we issue a bus reset since we have resources tied
3421 * up that must be freed before returning to the midlayer.
3422 *
3423 * Return value:
3424 * none
3425 **/
3426static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3427{
3428 struct ipr_cmnd *reset_cmd;
3429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3430 struct ipr_cmd_pkt *cmd_pkt;
3431 unsigned long lock_flags = 0;
3432
3433 ENTER;
3434 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3435 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3437 return;
3438 }
3439
3440 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3441 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3442 ipr_cmd->sibling = reset_cmd;
3443 reset_cmd->sibling = ipr_cmd;
3444 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3445 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3446 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3447 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3448 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3449
3450 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3451 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3452 LEAVE;
3453}
3454
3455/**
3456 * ipr_cancel_op - Cancel specified op
3457 * @scsi_cmd: scsi command struct
3458 *
3459 * This function cancels specified op.
3460 *
3461 * Return value:
3462 * SUCCESS / FAILED
3463 **/
3464static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3465{
3466 struct ipr_cmnd *ipr_cmd;
3467 struct ipr_ioa_cfg *ioa_cfg;
3468 struct ipr_resource_entry *res;
3469 struct ipr_cmd_pkt *cmd_pkt;
3470 u32 ioasc;
3471 int op_found = 0;
3472
3473 ENTER;
3474 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3475 res = scsi_cmd->device->hostdata;
3476
8fa728a2
JG
3477 /* If we are currently going through reset/reload, return failed.
3478 * This will force the mid-layer to call ipr_eh_host_reset,
3479 * which will then go to sleep and wait for the reset to complete
3480 */
3481 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3482 return FAILED;
1da177e4
LT
3483 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3484 return FAILED;
3485
3486 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3487 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3488 ipr_cmd->done = ipr_scsi_eh_done;
3489 op_found = 1;
3490 break;
3491 }
3492 }
3493
3494 if (!op_found)
3495 return SUCCESS;
3496
3497 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3498 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3499 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3500 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3501 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3502 ipr_cmd->u.sdev = scsi_cmd->device;
3503
3504 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3505 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3506 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3507
3508 /*
3509 * If the abort task timed out and we sent a bus reset, we will get
3510 * one the following responses to the abort
3511 */
3512 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3513 ioasc = 0;
3514 ipr_trace;
3515 }
3516
3517 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3518 res->needs_sync_complete = 1;
3519
3520 LEAVE;
3521 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3522}
3523
3524/**
3525 * ipr_eh_abort - Abort a single op
3526 * @scsi_cmd: scsi command struct
3527 *
3528 * Return value:
3529 * SUCCESS / FAILED
3530 **/
3531static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3532{
8fa728a2
JG
3533 unsigned long flags;
3534 int rc;
1da177e4
LT
3535
3536 ENTER;
1da177e4 3537
8fa728a2
JG
3538 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3539 rc = ipr_cancel_op(scsi_cmd);
3540 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3541
3542 LEAVE;
8fa728a2 3543 return rc;
1da177e4
LT
3544}
3545
3546/**
3547 * ipr_handle_other_interrupt - Handle "other" interrupts
3548 * @ioa_cfg: ioa config struct
3549 * @int_reg: interrupt register
3550 *
3551 * Return value:
3552 * IRQ_NONE / IRQ_HANDLED
3553 **/
3554static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3555 volatile u32 int_reg)
3556{
3557 irqreturn_t rc = IRQ_HANDLED;
3558
3559 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3560 /* Mask the interrupt */
3561 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3562
3563 /* Clear the interrupt */
3564 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3565 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3566
3567 list_del(&ioa_cfg->reset_cmd->queue);
3568 del_timer(&ioa_cfg->reset_cmd->timer);
3569 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3570 } else {
3571 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3572 ioa_cfg->ioa_unit_checked = 1;
3573 else
3574 dev_err(&ioa_cfg->pdev->dev,
3575 "Permanent IOA failure. 0x%08X\n", int_reg);
3576
3577 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3578 ioa_cfg->sdt_state = GET_DUMP;
3579
3580 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3581 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3582 }
3583
3584 return rc;
3585}
3586
3587/**
3588 * ipr_isr - Interrupt service routine
3589 * @irq: irq number
3590 * @devp: pointer to ioa config struct
3591 * @regs: pt_regs struct
3592 *
3593 * Return value:
3594 * IRQ_NONE / IRQ_HANDLED
3595 **/
3596static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3597{
3598 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3599 unsigned long lock_flags = 0;
3600 volatile u32 int_reg, int_mask_reg;
3601 u32 ioasc;
3602 u16 cmd_index;
3603 struct ipr_cmnd *ipr_cmd;
3604 irqreturn_t rc = IRQ_NONE;
3605
3606 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3607
3608 /* If interrupts are disabled, ignore the interrupt */
3609 if (!ioa_cfg->allow_interrupts) {
3610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611 return IRQ_NONE;
3612 }
3613
3614 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3615 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3616
3617 /* If an interrupt on the adapter did not occur, ignore it */
3618 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3620 return IRQ_NONE;
3621 }
3622
3623 while (1) {
3624 ipr_cmd = NULL;
3625
3626 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3627 ioa_cfg->toggle_bit) {
3628
3629 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3630 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3631
3632 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3633 ioa_cfg->errors_logged++;
3634 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3635
3636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3637 ioa_cfg->sdt_state = GET_DUMP;
3638
3639 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3641 return IRQ_HANDLED;
3642 }
3643
3644 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3645
3646 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3647
3648 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3649
3650 list_del(&ipr_cmd->queue);
3651 del_timer(&ipr_cmd->timer);
3652 ipr_cmd->done(ipr_cmd);
3653
3654 rc = IRQ_HANDLED;
3655
3656 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3657 ioa_cfg->hrrq_curr++;
3658 } else {
3659 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3660 ioa_cfg->toggle_bit ^= 1u;
3661 }
3662 }
3663
3664 if (ipr_cmd != NULL) {
3665 /* Clear the PCI interrupt */
3666 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3667 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3668 } else
3669 break;
3670 }
3671
3672 if (unlikely(rc == IRQ_NONE))
3673 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3674
3675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3676 return rc;
3677}
3678
3679/**
3680 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3681 * @ioa_cfg: ioa config struct
3682 * @ipr_cmd: ipr command struct
3683 *
3684 * Return value:
3685 * 0 on success / -1 on failure
3686 **/
3687static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3688 struct ipr_cmnd *ipr_cmd)
3689{
3690 int i;
3691 struct scatterlist *sglist;
3692 u32 length;
3693 u32 ioadl_flags = 0;
3694 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3695 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3696 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3697
3698 length = scsi_cmd->request_bufflen;
3699
3700 if (length == 0)
3701 return 0;
3702
3703 if (scsi_cmd->use_sg) {
3704 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3705 scsi_cmd->request_buffer,
3706 scsi_cmd->use_sg,
3707 scsi_cmd->sc_data_direction);
3708
3709 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3710 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3711 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3712 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3713 ioarcb->write_ioadl_len =
3714 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3715 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3716 ioadl_flags = IPR_IOADL_FLAGS_READ;
3717 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3718 ioarcb->read_ioadl_len =
3719 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3720 }
3721
3722 sglist = scsi_cmd->request_buffer;
3723
3724 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3725 ioadl[i].flags_and_data_len =
3726 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3727 ioadl[i].address =
3728 cpu_to_be32(sg_dma_address(&sglist[i]));
3729 }
3730
3731 if (likely(ipr_cmd->dma_use_sg)) {
3732 ioadl[i-1].flags_and_data_len |=
3733 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3734 return 0;
3735 } else
3736 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3737 } else {
3738 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3739 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3740 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3741 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3742 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3743 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3744 ioadl_flags = IPR_IOADL_FLAGS_READ;
3745 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3746 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3747 }
3748
3749 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3750 scsi_cmd->request_buffer, length,
3751 scsi_cmd->sc_data_direction);
3752
3753 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3754 ipr_cmd->dma_use_sg = 1;
3755 ioadl[0].flags_and_data_len =
3756 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3757 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3758 return 0;
3759 } else
3760 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3761 }
3762
3763 return -1;
3764}
3765
3766/**
3767 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3768 * @scsi_cmd: scsi command struct
3769 *
3770 * Return value:
3771 * task attributes
3772 **/
3773static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3774{
3775 u8 tag[2];
3776 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3777
3778 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3779 switch (tag[0]) {
3780 case MSG_SIMPLE_TAG:
3781 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3782 break;
3783 case MSG_HEAD_TAG:
3784 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3785 break;
3786 case MSG_ORDERED_TAG:
3787 rc = IPR_FLAGS_LO_ORDERED_TASK;
3788 break;
3789 };
3790 }
3791
3792 return rc;
3793}
3794
3795/**
3796 * ipr_erp_done - Process completion of ERP for a device
3797 * @ipr_cmd: ipr command struct
3798 *
3799 * This function copies the sense buffer into the scsi_cmd
3800 * struct and pushes the scsi_done function.
3801 *
3802 * Return value:
3803 * nothing
3804 **/
3805static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3806{
3807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3808 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3810 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3811
3812 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3813 scsi_cmd->result |= (DID_ERROR << 16);
3814 ipr_sdev_err(scsi_cmd->device,
3815 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3816 } else {
3817 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3818 SCSI_SENSE_BUFFERSIZE);
3819 }
3820
3821 if (res) {
3822 res->needs_sync_complete = 1;
3823 res->in_erp = 0;
3824 }
3825 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3826 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3827 scsi_cmd->scsi_done(scsi_cmd);
3828}
3829
3830/**
3831 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3832 * @ipr_cmd: ipr command struct
3833 *
3834 * Return value:
3835 * none
3836 **/
3837static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3838{
3839 struct ipr_ioarcb *ioarcb;
3840 struct ipr_ioasa *ioasa;
3841
3842 ioarcb = &ipr_cmd->ioarcb;
3843 ioasa = &ipr_cmd->ioasa;
3844
3845 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3846 ioarcb->write_data_transfer_length = 0;
3847 ioarcb->read_data_transfer_length = 0;
3848 ioarcb->write_ioadl_len = 0;
3849 ioarcb->read_ioadl_len = 0;
3850 ioasa->ioasc = 0;
3851 ioasa->residual_data_len = 0;
3852}
3853
3854/**
3855 * ipr_erp_request_sense - Send request sense to a device
3856 * @ipr_cmd: ipr command struct
3857 *
3858 * This function sends a request sense to a device as a result
3859 * of a check condition.
3860 *
3861 * Return value:
3862 * nothing
3863 **/
3864static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3865{
3866 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3867 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3868
3869 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3870 ipr_erp_done(ipr_cmd);
3871 return;
3872 }
3873
3874 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3875
3876 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3877 cmd_pkt->cdb[0] = REQUEST_SENSE;
3878 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3879 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3880 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3881 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3882
3883 ipr_cmd->ioadl[0].flags_and_data_len =
3884 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3885 ipr_cmd->ioadl[0].address =
3886 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3887
3888 ipr_cmd->ioarcb.read_ioadl_len =
3889 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3890 ipr_cmd->ioarcb.read_data_transfer_length =
3891 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3892
3893 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3894 IPR_REQUEST_SENSE_TIMEOUT * 2);
3895}
3896
3897/**
3898 * ipr_erp_cancel_all - Send cancel all to a device
3899 * @ipr_cmd: ipr command struct
3900 *
3901 * This function sends a cancel all to a device to clear the
3902 * queue. If we are running TCQ on the device, QERR is set to 1,
3903 * which means all outstanding ops have been dropped on the floor.
3904 * Cancel all will return them to us.
3905 *
3906 * Return value:
3907 * nothing
3908 **/
3909static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3910{
3911 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3912 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3913 struct ipr_cmd_pkt *cmd_pkt;
3914
3915 res->in_erp = 1;
3916
3917 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3918
3919 if (!scsi_get_tag_type(scsi_cmd->device)) {
3920 ipr_erp_request_sense(ipr_cmd);
3921 return;
3922 }
3923
3924 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3925 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3926 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3927
3928 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3929 IPR_CANCEL_ALL_TIMEOUT);
3930}
3931
3932/**
3933 * ipr_dump_ioasa - Dump contents of IOASA
3934 * @ioa_cfg: ioa config struct
3935 * @ipr_cmd: ipr command struct
3936 *
3937 * This function is invoked by the interrupt handler when ops
3938 * fail. It will log the IOASA if appropriate. Only called
3939 * for GPDD ops.
3940 *
3941 * Return value:
3942 * none
3943 **/
3944static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3945 struct ipr_cmnd *ipr_cmd)
3946{
3947 int i;
3948 u16 data_len;
3949 u32 ioasc;
3950 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3951 __be32 *ioasa_data = (__be32 *)ioasa;
3952 int error_index;
3953
3954 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3955
3956 if (0 == ioasc)
3957 return;
3958
3959 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3960 return;
3961
3962 error_index = ipr_get_error(ioasc);
3963
3964 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3965 /* Don't log an error if the IOA already logged one */
3966 if (ioasa->ilid != 0)
3967 return;
3968
3969 if (ipr_error_table[error_index].log_ioasa == 0)
3970 return;
3971 }
3972
3973 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3974 ipr_error_table[error_index].error);
3975
3976 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3977 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3978 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3979 "Device End state: %s Phase: %s\n",
3980 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3981 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3982 }
3983
3984 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3985 data_len = sizeof(struct ipr_ioasa);
3986 else
3987 data_len = be16_to_cpu(ioasa->ret_stat_len);
3988
3989 ipr_err("IOASA Dump:\n");
3990
3991 for (i = 0; i < data_len / 4; i += 4) {
3992 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3993 be32_to_cpu(ioasa_data[i]),
3994 be32_to_cpu(ioasa_data[i+1]),
3995 be32_to_cpu(ioasa_data[i+2]),
3996 be32_to_cpu(ioasa_data[i+3]));
3997 }
3998}
3999
4000/**
4001 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4002 * @ioasa: IOASA
4003 * @sense_buf: sense data buffer
4004 *
4005 * Return value:
4006 * none
4007 **/
4008static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4009{
4010 u32 failing_lba;
4011 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4012 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4013 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4014 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4015
4016 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4017
4018 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4019 return;
4020
4021 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4022
4023 if (ipr_is_vset_device(res) &&
4024 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4025 ioasa->u.vset.failing_lba_hi != 0) {
4026 sense_buf[0] = 0x72;
4027 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4028 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4029 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4030
4031 sense_buf[7] = 12;
4032 sense_buf[8] = 0;
4033 sense_buf[9] = 0x0A;
4034 sense_buf[10] = 0x80;
4035
4036 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4037
4038 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4039 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4040 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4041 sense_buf[15] = failing_lba & 0x000000ff;
4042
4043 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4044
4045 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4046 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4047 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4048 sense_buf[19] = failing_lba & 0x000000ff;
4049 } else {
4050 sense_buf[0] = 0x70;
4051 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4052 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4053 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4054
4055 /* Illegal request */
4056 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4057 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4058 sense_buf[7] = 10; /* additional length */
4059
4060 /* IOARCB was in error */
4061 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4062 sense_buf[15] = 0xC0;
4063 else /* Parameter data was invalid */
4064 sense_buf[15] = 0x80;
4065
4066 sense_buf[16] =
4067 ((IPR_FIELD_POINTER_MASK &
4068 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4069 sense_buf[17] =
4070 (IPR_FIELD_POINTER_MASK &
4071 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4072 } else {
4073 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4074 if (ipr_is_vset_device(res))
4075 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4076 else
4077 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4078
4079 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4080 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4081 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4082 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4083 sense_buf[6] = failing_lba & 0x000000ff;
4084 }
4085
4086 sense_buf[7] = 6; /* additional length */
4087 }
4088 }
4089}
4090
4091/**
4092 * ipr_erp_start - Process an error response for a SCSI op
4093 * @ioa_cfg: ioa config struct
4094 * @ipr_cmd: ipr command struct
4095 *
4096 * This function determines whether or not to initiate ERP
4097 * on the affected device.
4098 *
4099 * Return value:
4100 * nothing
4101 **/
4102static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4103 struct ipr_cmnd *ipr_cmd)
4104{
4105 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4106 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4107 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4108
4109 if (!res) {
4110 ipr_scsi_eh_done(ipr_cmd);
4111 return;
4112 }
4113
4114 if (ipr_is_gscsi(res))
4115 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4116 else
4117 ipr_gen_sense(ipr_cmd);
4118
4119 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4120 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4121 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4122 break;
4123 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4124 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4125 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4126 break;
4127 case IPR_IOASC_HW_SEL_TIMEOUT:
4128 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4129 res->needs_sync_complete = 1;
4130 break;
4131 case IPR_IOASC_SYNC_REQUIRED:
4132 if (!res->in_erp)
4133 res->needs_sync_complete = 1;
4134 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4135 break;
4136 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4137 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4138 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4139 break;
4140 case IPR_IOASC_BUS_WAS_RESET:
4141 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4142 /*
4143 * Report the bus reset and ask for a retry. The device
4144 * will give CC/UA the next command.
4145 */
4146 if (!res->resetting_device)
4147 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4148 scsi_cmd->result |= (DID_ERROR << 16);
4149 res->needs_sync_complete = 1;
4150 break;
4151 case IPR_IOASC_HW_DEV_BUS_STATUS:
4152 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4153 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4154 ipr_erp_cancel_all(ipr_cmd);
4155 return;
4156 }
4157 res->needs_sync_complete = 1;
4158 break;
4159 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4160 break;
4161 default:
4162 scsi_cmd->result |= (DID_ERROR << 16);
4163 if (!ipr_is_vset_device(res))
4164 res->needs_sync_complete = 1;
4165 break;
4166 }
4167
4168 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4169 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4170 scsi_cmd->scsi_done(scsi_cmd);
4171}
4172
4173/**
4174 * ipr_scsi_done - mid-layer done function
4175 * @ipr_cmd: ipr command struct
4176 *
4177 * This function is invoked by the interrupt handler for
4178 * ops generated by the SCSI mid-layer
4179 *
4180 * Return value:
4181 * none
4182 **/
4183static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4184{
4185 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4186 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4187 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4188
4189 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4190
4191 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4192 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4193 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4194 scsi_cmd->scsi_done(scsi_cmd);
4195 } else
4196 ipr_erp_start(ioa_cfg, ipr_cmd);
4197}
4198
4199/**
4200 * ipr_save_ioafp_mode_select - Save adapters mode select data
4201 * @ioa_cfg: ioa config struct
4202 * @scsi_cmd: scsi command struct
4203 *
4204 * This function saves mode select data for the adapter to
4205 * use following an adapter reset.
4206 *
4207 * Return value:
4208 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4209 **/
4210static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4211 struct scsi_cmnd *scsi_cmd)
4212{
4213 if (!ioa_cfg->saved_mode_pages) {
4214 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4215 GFP_ATOMIC);
4216 if (!ioa_cfg->saved_mode_pages) {
4217 dev_err(&ioa_cfg->pdev->dev,
4218 "IOA mode select buffer allocation failed\n");
4219 return SCSI_MLQUEUE_HOST_BUSY;
4220 }
4221 }
4222
4223 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4224 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4225 return 0;
4226}
4227
4228/**
4229 * ipr_queuecommand - Queue a mid-layer request
4230 * @scsi_cmd: scsi command struct
4231 * @done: done function
4232 *
4233 * This function queues a request generated by the mid-layer.
4234 *
4235 * Return value:
4236 * 0 on success
4237 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4238 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4239 **/
4240static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4241 void (*done) (struct scsi_cmnd *))
4242{
4243 struct ipr_ioa_cfg *ioa_cfg;
4244 struct ipr_resource_entry *res;
4245 struct ipr_ioarcb *ioarcb;
4246 struct ipr_cmnd *ipr_cmd;
4247 int rc = 0;
4248
4249 scsi_cmd->scsi_done = done;
4250 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4251 res = scsi_cmd->device->hostdata;
4252 scsi_cmd->result = (DID_OK << 16);
4253
4254 /*
4255 * We are currently blocking all devices due to a host reset
4256 * We have told the host to stop giving us new requests, but
4257 * ERP ops don't count. FIXME
4258 */
4259 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4260 return SCSI_MLQUEUE_HOST_BUSY;
4261
4262 /*
4263 * FIXME - Create scsi_set_host_offline interface
4264 * and the ioa_is_dead check can be removed
4265 */
4266 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4267 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4268 scsi_cmd->result = (DID_NO_CONNECT << 16);
4269 scsi_cmd->scsi_done(scsi_cmd);
4270 return 0;
4271 }
4272
4273 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4274 ioarcb = &ipr_cmd->ioarcb;
4275 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4276
4277 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4278 ipr_cmd->scsi_cmd = scsi_cmd;
4279 ioarcb->res_handle = res->cfgte.res_handle;
4280 ipr_cmd->done = ipr_scsi_done;
4281 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4282
4283 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4284 if (scsi_cmd->underflow == 0)
4285 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4286
4287 if (res->needs_sync_complete) {
4288 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4289 res->needs_sync_complete = 0;
4290 }
4291
4292 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4293 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4294 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4295 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4296 }
4297
4298 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4299 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4300 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4301
4302 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4303 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4304
4305 if (likely(rc == 0))
4306 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4307
4308 if (likely(rc == 0)) {
4309 mb();
4310 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4311 ioa_cfg->regs.ioarrin_reg);
4312 } else {
4313 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4314 return SCSI_MLQUEUE_HOST_BUSY;
4315 }
4316
4317 return 0;
4318}
4319
4320/**
4321 * ipr_info - Get information about the card/driver
4322 * @scsi_host: scsi host struct
4323 *
4324 * Return value:
4325 * pointer to buffer with description string
4326 **/
4327static const char * ipr_ioa_info(struct Scsi_Host *host)
4328{
4329 static char buffer[512];
4330 struct ipr_ioa_cfg *ioa_cfg;
4331 unsigned long lock_flags = 0;
4332
4333 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4334
4335 spin_lock_irqsave(host->host_lock, lock_flags);
4336 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4337 spin_unlock_irqrestore(host->host_lock, lock_flags);
4338
4339 return buffer;
4340}
4341
4342static struct scsi_host_template driver_template = {
4343 .module = THIS_MODULE,
4344 .name = "IPR",
4345 .info = ipr_ioa_info,
4346 .queuecommand = ipr_queuecommand,
4347 .eh_abort_handler = ipr_eh_abort,
4348 .eh_device_reset_handler = ipr_eh_dev_reset,
4349 .eh_host_reset_handler = ipr_eh_host_reset,
4350 .slave_alloc = ipr_slave_alloc,
4351 .slave_configure = ipr_slave_configure,
4352 .slave_destroy = ipr_slave_destroy,
4353 .change_queue_depth = ipr_change_queue_depth,
4354 .change_queue_type = ipr_change_queue_type,
4355 .bios_param = ipr_biosparam,
4356 .can_queue = IPR_MAX_COMMANDS,
4357 .this_id = -1,
4358 .sg_tablesize = IPR_MAX_SGLIST,
4359 .max_sectors = IPR_IOA_MAX_SECTORS,
4360 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4361 .use_clustering = ENABLE_CLUSTERING,
4362 .shost_attrs = ipr_ioa_attrs,
4363 .sdev_attrs = ipr_dev_attrs,
4364 .proc_name = IPR_NAME
4365};
4366
4367#ifdef CONFIG_PPC_PSERIES
4368static const u16 ipr_blocked_processors[] = {
4369 PV_NORTHSTAR,
4370 PV_PULSAR,
4371 PV_POWER4,
4372 PV_ICESTAR,
4373 PV_SSTAR,
4374 PV_POWER4p,
4375 PV_630,
4376 PV_630p
4377};
4378
4379/**
4380 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4381 * @ioa_cfg: ioa cfg struct
4382 *
4383 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4384 * certain pSeries hardware. This function determines if the given
4385 * adapter is in one of these confgurations or not.
4386 *
4387 * Return value:
4388 * 1 if adapter is not supported / 0 if adapter is supported
4389 **/
4390static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4391{
4392 u8 rev_id;
4393 int i;
4394
4395 if (ioa_cfg->type == 0x5702) {
4396 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4397 &rev_id) == PCIBIOS_SUCCESSFUL) {
4398 if (rev_id < 4) {
4399 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4400 if (__is_processor(ipr_blocked_processors[i]))
4401 return 1;
4402 }
4403 }
4404 }
4405 }
4406 return 0;
4407}
4408#else
4409#define ipr_invalid_adapter(ioa_cfg) 0
4410#endif
4411
4412/**
4413 * ipr_ioa_bringdown_done - IOA bring down completion.
4414 * @ipr_cmd: ipr command struct
4415 *
4416 * This function processes the completion of an adapter bring down.
4417 * It wakes any reset sleepers.
4418 *
4419 * Return value:
4420 * IPR_RC_JOB_RETURN
4421 **/
4422static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4423{
4424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4425
4426 ENTER;
4427 ioa_cfg->in_reset_reload = 0;
4428 ioa_cfg->reset_retries = 0;
4429 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4430 wake_up_all(&ioa_cfg->reset_wait_q);
4431
4432 spin_unlock_irq(ioa_cfg->host->host_lock);
4433 scsi_unblock_requests(ioa_cfg->host);
4434 spin_lock_irq(ioa_cfg->host->host_lock);
4435 LEAVE;
4436
4437 return IPR_RC_JOB_RETURN;
4438}
4439
4440/**
4441 * ipr_ioa_reset_done - IOA reset completion.
4442 * @ipr_cmd: ipr command struct
4443 *
4444 * This function processes the completion of an adapter reset.
4445 * It schedules any necessary mid-layer add/removes and
4446 * wakes any reset sleepers.
4447 *
4448 * Return value:
4449 * IPR_RC_JOB_RETURN
4450 **/
4451static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4452{
4453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4454 struct ipr_resource_entry *res;
4455 struct ipr_hostrcb *hostrcb, *temp;
4456 int i = 0;
4457
4458 ENTER;
4459 ioa_cfg->in_reset_reload = 0;
4460 ioa_cfg->allow_cmds = 1;
4461 ioa_cfg->reset_cmd = NULL;
3d1d0da6 4462 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
4463
4464 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4465 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4466 ipr_trace;
4467 break;
4468 }
4469 }
4470 schedule_work(&ioa_cfg->work_q);
4471
4472 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4473 list_del(&hostrcb->queue);
4474 if (i++ < IPR_NUM_LOG_HCAMS)
4475 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4476 else
4477 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4478 }
4479
4480 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4481
4482 ioa_cfg->reset_retries = 0;
4483 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4484 wake_up_all(&ioa_cfg->reset_wait_q);
4485
4486 spin_unlock_irq(ioa_cfg->host->host_lock);
4487 scsi_unblock_requests(ioa_cfg->host);
4488 spin_lock_irq(ioa_cfg->host->host_lock);
4489
4490 if (!ioa_cfg->allow_cmds)
4491 scsi_block_requests(ioa_cfg->host);
4492
4493 LEAVE;
4494 return IPR_RC_JOB_RETURN;
4495}
4496
4497/**
4498 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4499 * @supported_dev: supported device struct
4500 * @vpids: vendor product id struct
4501 *
4502 * Return value:
4503 * none
4504 **/
4505static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4506 struct ipr_std_inq_vpids *vpids)
4507{
4508 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4509 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4510 supported_dev->num_records = 1;
4511 supported_dev->data_length =
4512 cpu_to_be16(sizeof(struct ipr_supported_device));
4513 supported_dev->reserved = 0;
4514}
4515
4516/**
4517 * ipr_set_supported_devs - Send Set Supported Devices for a device
4518 * @ipr_cmd: ipr command struct
4519 *
4520 * This function send a Set Supported Devices to the adapter
4521 *
4522 * Return value:
4523 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4524 **/
4525static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4526{
4527 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4528 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4529 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4530 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4531 struct ipr_resource_entry *res = ipr_cmd->u.res;
4532
4533 ipr_cmd->job_step = ipr_ioa_reset_done;
4534
4535 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
d0ad6f50 4536 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
1da177e4
LT
4537 continue;
4538
4539 ipr_cmd->u.res = res;
4540 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4541
4542 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4543 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4544 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4545
4546 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4547 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4548 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4549
4550 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4551 sizeof(struct ipr_supported_device));
4552 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4553 offsetof(struct ipr_misc_cbs, supp_dev));
4554 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4555 ioarcb->write_data_transfer_length =
4556 cpu_to_be32(sizeof(struct ipr_supported_device));
4557
4558 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4559 IPR_SET_SUP_DEVICE_TIMEOUT);
4560
4561 ipr_cmd->job_step = ipr_set_supported_devs;
4562 return IPR_RC_JOB_RETURN;
4563 }
4564
4565 return IPR_RC_JOB_CONTINUE;
4566}
4567
62275040
BK
4568/**
4569 * ipr_setup_write_cache - Disable write cache if needed
4570 * @ipr_cmd: ipr command struct
4571 *
4572 * This function sets up adapters write cache to desired setting
4573 *
4574 * Return value:
4575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4576 **/
4577static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4578{
4579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4580
4581 ipr_cmd->job_step = ipr_set_supported_devs;
4582 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4583 struct ipr_resource_entry, queue);
4584
4585 if (ioa_cfg->cache_state != CACHE_DISABLED)
4586 return IPR_RC_JOB_CONTINUE;
4587
4588 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4589 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4590 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4591 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4592
4593 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4594
4595 return IPR_RC_JOB_RETURN;
4596}
4597
1da177e4
LT
4598/**
4599 * ipr_get_mode_page - Locate specified mode page
4600 * @mode_pages: mode page buffer
4601 * @page_code: page code to find
4602 * @len: minimum required length for mode page
4603 *
4604 * Return value:
4605 * pointer to mode page / NULL on failure
4606 **/
4607static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4608 u32 page_code, u32 len)
4609{
4610 struct ipr_mode_page_hdr *mode_hdr;
4611 u32 page_length;
4612 u32 length;
4613
4614 if (!mode_pages || (mode_pages->hdr.length == 0))
4615 return NULL;
4616
4617 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4618 mode_hdr = (struct ipr_mode_page_hdr *)
4619 (mode_pages->data + mode_pages->hdr.block_desc_len);
4620
4621 while (length) {
4622 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4623 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4624 return mode_hdr;
4625 break;
4626 } else {
4627 page_length = (sizeof(struct ipr_mode_page_hdr) +
4628 mode_hdr->page_length);
4629 length -= page_length;
4630 mode_hdr = (struct ipr_mode_page_hdr *)
4631 ((unsigned long)mode_hdr + page_length);
4632 }
4633 }
4634 return NULL;
4635}
4636
4637/**
4638 * ipr_check_term_power - Check for term power errors
4639 * @ioa_cfg: ioa config struct
4640 * @mode_pages: IOAFP mode pages buffer
4641 *
4642 * Check the IOAFP's mode page 28 for term power errors
4643 *
4644 * Return value:
4645 * nothing
4646 **/
4647static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4648 struct ipr_mode_pages *mode_pages)
4649{
4650 int i;
4651 int entry_length;
4652 struct ipr_dev_bus_entry *bus;
4653 struct ipr_mode_page28 *mode_page;
4654
4655 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4656 sizeof(struct ipr_mode_page28));
4657
4658 entry_length = mode_page->entry_length;
4659
4660 bus = mode_page->bus;
4661
4662 for (i = 0; i < mode_page->num_entries; i++) {
4663 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4664 dev_err(&ioa_cfg->pdev->dev,
4665 "Term power is absent on scsi bus %d\n",
4666 bus->res_addr.bus);
4667 }
4668
4669 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4670 }
4671}
4672
4673/**
4674 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4675 * @ioa_cfg: ioa config struct
4676 *
4677 * Looks through the config table checking for SES devices. If
4678 * the SES device is in the SES table indicating a maximum SCSI
4679 * bus speed, the speed is limited for the bus.
4680 *
4681 * Return value:
4682 * none
4683 **/
4684static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4685{
4686 u32 max_xfer_rate;
4687 int i;
4688
4689 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4690 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4691 ioa_cfg->bus_attr[i].bus_width);
4692
4693 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4694 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4695 }
4696}
4697
4698/**
4699 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4700 * @ioa_cfg: ioa config struct
4701 * @mode_pages: mode page 28 buffer
4702 *
4703 * Updates mode page 28 based on driver configuration
4704 *
4705 * Return value:
4706 * none
4707 **/
4708static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4709 struct ipr_mode_pages *mode_pages)
4710{
4711 int i, entry_length;
4712 struct ipr_dev_bus_entry *bus;
4713 struct ipr_bus_attributes *bus_attr;
4714 struct ipr_mode_page28 *mode_page;
4715
4716 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4717 sizeof(struct ipr_mode_page28));
4718
4719 entry_length = mode_page->entry_length;
4720
4721 /* Loop for each device bus entry */
4722 for (i = 0, bus = mode_page->bus;
4723 i < mode_page->num_entries;
4724 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4725 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4726 dev_err(&ioa_cfg->pdev->dev,
4727 "Invalid resource address reported: 0x%08X\n",
4728 IPR_GET_PHYS_LOC(bus->res_addr));
4729 continue;
4730 }
4731
4732 bus_attr = &ioa_cfg->bus_attr[i];
4733 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4734 bus->bus_width = bus_attr->bus_width;
4735 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4736 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4737 if (bus_attr->qas_enabled)
4738 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4739 else
4740 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4741 }
4742}
4743
4744/**
4745 * ipr_build_mode_select - Build a mode select command
4746 * @ipr_cmd: ipr command struct
4747 * @res_handle: resource handle to send command to
4748 * @parm: Byte 2 of Mode Sense command
4749 * @dma_addr: DMA buffer address
4750 * @xfer_len: data transfer length
4751 *
4752 * Return value:
4753 * none
4754 **/
4755static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4756 __be32 res_handle, u8 parm, u32 dma_addr,
4757 u8 xfer_len)
4758{
4759 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4760 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4761
4762 ioarcb->res_handle = res_handle;
4763 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4764 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4765 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4766 ioarcb->cmd_pkt.cdb[1] = parm;
4767 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4768
4769 ioadl->flags_and_data_len =
4770 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4771 ioadl->address = cpu_to_be32(dma_addr);
4772 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4773 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4774}
4775
4776/**
4777 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4778 * @ipr_cmd: ipr command struct
4779 *
4780 * This function sets up the SCSI bus attributes and sends
4781 * a Mode Select for Page 28 to activate them.
4782 *
4783 * Return value:
4784 * IPR_RC_JOB_RETURN
4785 **/
4786static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4787{
4788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4789 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4790 int length;
4791
4792 ENTER;
4793 if (ioa_cfg->saved_mode_pages) {
4794 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4795 ioa_cfg->saved_mode_page_len);
4796 length = ioa_cfg->saved_mode_page_len;
4797 } else {
4798 ipr_scsi_bus_speed_limit(ioa_cfg);
4799 ipr_check_term_power(ioa_cfg, mode_pages);
4800 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4801 length = mode_pages->hdr.length + 1;
4802 mode_pages->hdr.length = 0;
4803 }
4804
4805 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4806 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4807 length);
4808
62275040 4809 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
4810 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4811
4812 LEAVE;
4813 return IPR_RC_JOB_RETURN;
4814}
4815
4816/**
4817 * ipr_build_mode_sense - Builds a mode sense command
4818 * @ipr_cmd: ipr command struct
4819 * @res: resource entry struct
4820 * @parm: Byte 2 of mode sense command
4821 * @dma_addr: DMA address of mode sense buffer
4822 * @xfer_len: Size of DMA buffer
4823 *
4824 * Return value:
4825 * none
4826 **/
4827static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4828 __be32 res_handle,
4829 u8 parm, u32 dma_addr, u8 xfer_len)
4830{
4831 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4832 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4833
4834 ioarcb->res_handle = res_handle;
4835 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4836 ioarcb->cmd_pkt.cdb[2] = parm;
4837 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4838 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4839
4840 ioadl->flags_and_data_len =
4841 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4842 ioadl->address = cpu_to_be32(dma_addr);
4843 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4844 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4845}
4846
4847/**
4848 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4849 * @ipr_cmd: ipr command struct
4850 *
4851 * This function send a Page 28 mode sense to the IOA to
4852 * retrieve SCSI bus attributes.
4853 *
4854 * Return value:
4855 * IPR_RC_JOB_RETURN
4856 **/
4857static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4858{
4859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4860
4861 ENTER;
4862 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4863 0x28, ioa_cfg->vpd_cbs_dma +
4864 offsetof(struct ipr_misc_cbs, mode_pages),
4865 sizeof(struct ipr_mode_pages));
4866
4867 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4868
4869 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4870
4871 LEAVE;
4872 return IPR_RC_JOB_RETURN;
4873}
4874
4875/**
4876 * ipr_init_res_table - Initialize the resource table
4877 * @ipr_cmd: ipr command struct
4878 *
4879 * This function looks through the existing resource table, comparing
4880 * it with the config table. This function will take care of old/new
4881 * devices and schedule adding/removing them from the mid-layer
4882 * as appropriate.
4883 *
4884 * Return value:
4885 * IPR_RC_JOB_CONTINUE
4886 **/
4887static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4888{
4889 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4890 struct ipr_resource_entry *res, *temp;
4891 struct ipr_config_table_entry *cfgte;
4892 int found, i;
4893 LIST_HEAD(old_res);
4894
4895 ENTER;
4896 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4897 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4898
4899 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4900 list_move_tail(&res->queue, &old_res);
4901
4902 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4903 cfgte = &ioa_cfg->cfg_table->dev[i];
4904 found = 0;
4905
4906 list_for_each_entry_safe(res, temp, &old_res, queue) {
4907 if (!memcmp(&res->cfgte.res_addr,
4908 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4909 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4910 found = 1;
4911 break;
4912 }
4913 }
4914
4915 if (!found) {
4916 if (list_empty(&ioa_cfg->free_res_q)) {
4917 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4918 break;
4919 }
4920
4921 found = 1;
4922 res = list_entry(ioa_cfg->free_res_q.next,
4923 struct ipr_resource_entry, queue);
4924 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4925 ipr_init_res_entry(res);
4926 res->add_to_ml = 1;
4927 }
4928
4929 if (found)
4930 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4931 }
4932
4933 list_for_each_entry_safe(res, temp, &old_res, queue) {
4934 if (res->sdev) {
4935 res->del_from_ml = 1;
4936 res->sdev->hostdata = NULL;
4937 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4938 } else {
4939 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4940 }
4941 }
4942
4943 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4944
4945 LEAVE;
4946 return IPR_RC_JOB_CONTINUE;
4947}
4948
4949/**
4950 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4951 * @ipr_cmd: ipr command struct
4952 *
4953 * This function sends a Query IOA Configuration command
4954 * to the adapter to retrieve the IOA configuration table.
4955 *
4956 * Return value:
4957 * IPR_RC_JOB_RETURN
4958 **/
4959static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4960{
4961 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4962 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4963 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4964 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4965
4966 ENTER;
4967 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4968 ucode_vpd->major_release, ucode_vpd->card_type,
4969 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4970 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4971 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4972
4973 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4974 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4975 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4976
4977 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4978 ioarcb->read_data_transfer_length =
4979 cpu_to_be32(sizeof(struct ipr_config_table));
4980
4981 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4982 ioadl->flags_and_data_len =
4983 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4984
4985 ipr_cmd->job_step = ipr_init_res_table;
4986
4987 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4988
4989 LEAVE;
4990 return IPR_RC_JOB_RETURN;
4991}
4992
4993/**
4994 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4995 * @ipr_cmd: ipr command struct
4996 *
4997 * This utility function sends an inquiry to the adapter.
4998 *
4999 * Return value:
5000 * none
5001 **/
5002static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5003 u32 dma_addr, u8 xfer_len)
5004{
5005 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5006 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5007
5008 ENTER;
5009 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5011
5012 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5013 ioarcb->cmd_pkt.cdb[1] = flags;
5014 ioarcb->cmd_pkt.cdb[2] = page;
5015 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5016
5017 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5018 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5019
5020 ioadl->address = cpu_to_be32(dma_addr);
5021 ioadl->flags_and_data_len =
5022 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5023
5024 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5025 LEAVE;
5026}
5027
62275040
BK
5028/**
5029 * ipr_inquiry_page_supported - Is the given inquiry page supported
5030 * @page0: inquiry page 0 buffer
5031 * @page: page code.
5032 *
5033 * This function determines if the specified inquiry page is supported.
5034 *
5035 * Return value:
5036 * 1 if page is supported / 0 if not
5037 **/
5038static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5039{
5040 int i;
5041
5042 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5043 if (page0->page[i] == page)
5044 return 1;
5045
5046 return 0;
5047}
5048
1da177e4
LT
5049/**
5050 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5051 * @ipr_cmd: ipr command struct
5052 *
5053 * This function sends a Page 3 inquiry to the adapter
5054 * to retrieve software VPD information.
5055 *
5056 * Return value:
5057 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5058 **/
5059static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
5060{
5061 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5062 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5063
5064 ENTER;
5065
5066 if (!ipr_inquiry_page_supported(page0, 1))
5067 ioa_cfg->cache_state = CACHE_NONE;
5068
5069 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5070
5071 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5072 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5073 sizeof(struct ipr_inquiry_page3));
5074
5075 LEAVE;
5076 return IPR_RC_JOB_RETURN;
5077}
5078
5079/**
5080 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5081 * @ipr_cmd: ipr command struct
5082 *
5083 * This function sends a Page 0 inquiry to the adapter
5084 * to retrieve supported inquiry pages.
5085 *
5086 * Return value:
5087 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5088 **/
5089static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
5090{
5091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5092 char type[5];
5093
5094 ENTER;
5095
5096 /* Grab the type out of the VPD and store it away */
5097 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5098 type[4] = '\0';
5099 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5100
62275040 5101 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 5102
62275040
BK
5103 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5104 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5105 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
5106
5107 LEAVE;
5108 return IPR_RC_JOB_RETURN;
5109}
5110
5111/**
5112 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5113 * @ipr_cmd: ipr command struct
5114 *
5115 * This function sends a standard inquiry to the adapter.
5116 *
5117 * Return value:
5118 * IPR_RC_JOB_RETURN
5119 **/
5120static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5121{
5122 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5123
5124 ENTER;
62275040 5125 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
5126
5127 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5128 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5129 sizeof(struct ipr_ioa_vpd));
5130
5131 LEAVE;
5132 return IPR_RC_JOB_RETURN;
5133}
5134
5135/**
5136 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5137 * @ipr_cmd: ipr command struct
5138 *
5139 * This function send an Identify Host Request Response Queue
5140 * command to establish the HRRQ with the adapter.
5141 *
5142 * Return value:
5143 * IPR_RC_JOB_RETURN
5144 **/
5145static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5146{
5147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5148 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5149
5150 ENTER;
5151 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5152
5153 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5154 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5155
5156 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5157 ioarcb->cmd_pkt.cdb[2] =
5158 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5159 ioarcb->cmd_pkt.cdb[3] =
5160 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5161 ioarcb->cmd_pkt.cdb[4] =
5162 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5163 ioarcb->cmd_pkt.cdb[5] =
5164 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5165 ioarcb->cmd_pkt.cdb[7] =
5166 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5167 ioarcb->cmd_pkt.cdb[8] =
5168 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5169
5170 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5171
5172 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5173
5174 LEAVE;
5175 return IPR_RC_JOB_RETURN;
5176}
5177
5178/**
5179 * ipr_reset_timer_done - Adapter reset timer function
5180 * @ipr_cmd: ipr command struct
5181 *
5182 * Description: This function is used in adapter reset processing
5183 * for timing events. If the reset_cmd pointer in the IOA
5184 * config struct is not this adapter's we are doing nested
5185 * resets and fail_all_ops will take care of freeing the
5186 * command block.
5187 *
5188 * Return value:
5189 * none
5190 **/
5191static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5192{
5193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5194 unsigned long lock_flags = 0;
5195
5196 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5197
5198 if (ioa_cfg->reset_cmd == ipr_cmd) {
5199 list_del(&ipr_cmd->queue);
5200 ipr_cmd->done(ipr_cmd);
5201 }
5202
5203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5204}
5205
5206/**
5207 * ipr_reset_start_timer - Start a timer for adapter reset job
5208 * @ipr_cmd: ipr command struct
5209 * @timeout: timeout value
5210 *
5211 * Description: This function is used in adapter reset processing
5212 * for timing events. If the reset_cmd pointer in the IOA
5213 * config struct is not this adapter's we are doing nested
5214 * resets and fail_all_ops will take care of freeing the
5215 * command block.
5216 *
5217 * Return value:
5218 * none
5219 **/
5220static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5221 unsigned long timeout)
5222{
5223 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5224 ipr_cmd->done = ipr_reset_ioa_job;
5225
5226 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5227 ipr_cmd->timer.expires = jiffies + timeout;
5228 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5229 add_timer(&ipr_cmd->timer);
5230}
5231
5232/**
5233 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5234 * @ioa_cfg: ioa cfg struct
5235 *
5236 * Return value:
5237 * nothing
5238 **/
5239static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5240{
5241 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5242
5243 /* Initialize Host RRQ pointers */
5244 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5245 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5246 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5247 ioa_cfg->toggle_bit = 1;
5248
5249 /* Zero out config table */
5250 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5251}
5252
5253/**
5254 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5255 * @ipr_cmd: ipr command struct
5256 *
5257 * This function reinitializes some control blocks and
5258 * enables destructive diagnostics on the adapter.
5259 *
5260 * Return value:
5261 * IPR_RC_JOB_RETURN
5262 **/
5263static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5264{
5265 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5266 volatile u32 int_reg;
5267
5268 ENTER;
5269 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5270 ipr_init_ioa_mem(ioa_cfg);
5271
5272 ioa_cfg->allow_interrupts = 1;
5273 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5274
5275 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5276 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5277 ioa_cfg->regs.clr_interrupt_mask_reg);
5278 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5279 return IPR_RC_JOB_CONTINUE;
5280 }
5281
5282 /* Enable destructive diagnostics on IOA */
3d1d0da6 5283 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
5284
5285 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5286 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5287
5288 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5289
5290 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5291 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5292 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5293 ipr_cmd->done = ipr_reset_ioa_job;
5294 add_timer(&ipr_cmd->timer);
5295 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5296
5297 LEAVE;
5298 return IPR_RC_JOB_RETURN;
5299}
5300
5301/**
5302 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5303 * @ipr_cmd: ipr command struct
5304 *
5305 * This function is invoked when an adapter dump has run out
5306 * of processing time.
5307 *
5308 * Return value:
5309 * IPR_RC_JOB_CONTINUE
5310 **/
5311static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5312{
5313 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5314
5315 if (ioa_cfg->sdt_state == GET_DUMP)
5316 ioa_cfg->sdt_state = ABORT_DUMP;
5317
5318 ipr_cmd->job_step = ipr_reset_alert;
5319
5320 return IPR_RC_JOB_CONTINUE;
5321}
5322
5323/**
5324 * ipr_unit_check_no_data - Log a unit check/no data error log
5325 * @ioa_cfg: ioa config struct
5326 *
5327 * Logs an error indicating the adapter unit checked, but for some
5328 * reason, we were unable to fetch the unit check buffer.
5329 *
5330 * Return value:
5331 * nothing
5332 **/
5333static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5334{
5335 ioa_cfg->errors_logged++;
5336 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5337}
5338
5339/**
5340 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5341 * @ioa_cfg: ioa config struct
5342 *
5343 * Fetches the unit check buffer from the adapter by clocking the data
5344 * through the mailbox register.
5345 *
5346 * Return value:
5347 * nothing
5348 **/
5349static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5350{
5351 unsigned long mailbox;
5352 struct ipr_hostrcb *hostrcb;
5353 struct ipr_uc_sdt sdt;
5354 int rc, length;
5355
5356 mailbox = readl(ioa_cfg->ioa_mailbox);
5357
5358 if (!ipr_sdt_is_fmt2(mailbox)) {
5359 ipr_unit_check_no_data(ioa_cfg);
5360 return;
5361 }
5362
5363 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5364 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5365 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5366
5367 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5368 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5369 ipr_unit_check_no_data(ioa_cfg);
5370 return;
5371 }
5372
5373 /* Find length of the first sdt entry (UC buffer) */
5374 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5375 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5376
5377 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5378 struct ipr_hostrcb, queue);
5379 list_del(&hostrcb->queue);
5380 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5381
5382 rc = ipr_get_ldump_data_section(ioa_cfg,
5383 be32_to_cpu(sdt.entry[0].bar_str_offset),
5384 (__be32 *)&hostrcb->hcam,
5385 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5386
5387 if (!rc)
5388 ipr_handle_log_data(ioa_cfg, hostrcb);
5389 else
5390 ipr_unit_check_no_data(ioa_cfg);
5391
5392 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5393}
5394
5395/**
5396 * ipr_reset_restore_cfg_space - Restore PCI config space.
5397 * @ipr_cmd: ipr command struct
5398 *
5399 * Description: This function restores the saved PCI config space of
5400 * the adapter, fails all outstanding ops back to the callers, and
5401 * fetches the dump/unit check if applicable to this reset.
5402 *
5403 * Return value:
5404 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5405 **/
5406static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5407{
5408 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5409 int rc;
5410
5411 ENTER;
b30197d2 5412 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5413 rc = pci_restore_state(ioa_cfg->pdev);
5414
5415 if (rc != PCIBIOS_SUCCESSFUL) {
5416 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5417 return IPR_RC_JOB_CONTINUE;
5418 }
5419
5420 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5421 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5422 return IPR_RC_JOB_CONTINUE;
5423 }
5424
5425 ipr_fail_all_ops(ioa_cfg);
5426
5427 if (ioa_cfg->ioa_unit_checked) {
5428 ioa_cfg->ioa_unit_checked = 0;
5429 ipr_get_unit_check_buffer(ioa_cfg);
5430 ipr_cmd->job_step = ipr_reset_alert;
5431 ipr_reset_start_timer(ipr_cmd, 0);
5432 return IPR_RC_JOB_RETURN;
5433 }
5434
5435 if (ioa_cfg->in_ioa_bringdown) {
5436 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5437 } else {
5438 ipr_cmd->job_step = ipr_reset_enable_ioa;
5439
5440 if (GET_DUMP == ioa_cfg->sdt_state) {
5441 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5442 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5443 schedule_work(&ioa_cfg->work_q);
5444 return IPR_RC_JOB_RETURN;
5445 }
5446 }
5447
5448 ENTER;
5449 return IPR_RC_JOB_CONTINUE;
5450}
5451
5452/**
5453 * ipr_reset_start_bist - Run BIST on the adapter.
5454 * @ipr_cmd: ipr command struct
5455 *
5456 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5457 *
5458 * Return value:
5459 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5460 **/
5461static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5462{
5463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5464 int rc;
5465
5466 ENTER;
b30197d2 5467 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5468 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5469
5470 if (rc != PCIBIOS_SUCCESSFUL) {
5471 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5472 rc = IPR_RC_JOB_CONTINUE;
5473 } else {
5474 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5475 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5476 rc = IPR_RC_JOB_RETURN;
5477 }
5478
5479 LEAVE;
5480 return rc;
5481}
5482
5483/**
5484 * ipr_reset_allowed - Query whether or not IOA can be reset
5485 * @ioa_cfg: ioa config struct
5486 *
5487 * Return value:
5488 * 0 if reset not allowed / non-zero if reset is allowed
5489 **/
5490static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5491{
5492 volatile u32 temp_reg;
5493
5494 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5495 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5496}
5497
5498/**
5499 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5500 * @ipr_cmd: ipr command struct
5501 *
5502 * Description: This function waits for adapter permission to run BIST,
5503 * then runs BIST. If the adapter does not give permission after a
5504 * reasonable time, we will reset the adapter anyway. The impact of
5505 * resetting the adapter without warning the adapter is the risk of
5506 * losing the persistent error log on the adapter. If the adapter is
5507 * reset while it is writing to the flash on the adapter, the flash
5508 * segment will have bad ECC and be zeroed.
5509 *
5510 * Return value:
5511 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5512 **/
5513static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5514{
5515 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5516 int rc = IPR_RC_JOB_RETURN;
5517
5518 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5519 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5520 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5521 } else {
5522 ipr_cmd->job_step = ipr_reset_start_bist;
5523 rc = IPR_RC_JOB_CONTINUE;
5524 }
5525
5526 return rc;
5527}
5528
5529/**
5530 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5531 * @ipr_cmd: ipr command struct
5532 *
5533 * Description: This function alerts the adapter that it will be reset.
5534 * If memory space is not currently enabled, proceed directly
5535 * to running BIST on the adapter. The timer must always be started
5536 * so we guarantee we do not run BIST from ipr_isr.
5537 *
5538 * Return value:
5539 * IPR_RC_JOB_RETURN
5540 **/
5541static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5542{
5543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5544 u16 cmd_reg;
5545 int rc;
5546
5547 ENTER;
5548 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5549
5550 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5551 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5552 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5553 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5554 } else {
5555 ipr_cmd->job_step = ipr_reset_start_bist;
5556 }
5557
5558 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5559 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5560
5561 LEAVE;
5562 return IPR_RC_JOB_RETURN;
5563}
5564
5565/**
5566 * ipr_reset_ucode_download_done - Microcode download completion
5567 * @ipr_cmd: ipr command struct
5568 *
5569 * Description: This function unmaps the microcode download buffer.
5570 *
5571 * Return value:
5572 * IPR_RC_JOB_CONTINUE
5573 **/
5574static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5575{
5576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5577 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5578
5579 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5580 sglist->num_sg, DMA_TO_DEVICE);
5581
5582 ipr_cmd->job_step = ipr_reset_alert;
5583 return IPR_RC_JOB_CONTINUE;
5584}
5585
5586/**
5587 * ipr_reset_ucode_download - Download microcode to the adapter
5588 * @ipr_cmd: ipr command struct
5589 *
5590 * Description: This function checks to see if it there is microcode
5591 * to download to the adapter. If there is, a download is performed.
5592 *
5593 * Return value:
5594 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5595 **/
5596static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5597{
5598 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5599 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5600
5601 ENTER;
5602 ipr_cmd->job_step = ipr_reset_alert;
5603
5604 if (!sglist)
5605 return IPR_RC_JOB_CONTINUE;
5606
5607 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5608 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5609 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5610 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5611 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5612 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5613 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5614
12baa420 5615 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
5616 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5617
5618 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5619 IPR_WRITE_BUFFER_TIMEOUT);
5620
5621 LEAVE;
5622 return IPR_RC_JOB_RETURN;
5623}
5624
5625/**
5626 * ipr_reset_shutdown_ioa - Shutdown the adapter
5627 * @ipr_cmd: ipr command struct
5628 *
5629 * Description: This function issues an adapter shutdown of the
5630 * specified type to the specified adapter as part of the
5631 * adapter reset job.
5632 *
5633 * Return value:
5634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5635 **/
5636static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5637{
5638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5639 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5640 unsigned long timeout;
5641 int rc = IPR_RC_JOB_CONTINUE;
5642
5643 ENTER;
5644 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5645 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5646 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5647 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5649
5650 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5651 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5652 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5653 timeout = IPR_INTERNAL_TIMEOUT;
5654 else
5655 timeout = IPR_SHUTDOWN_TIMEOUT;
5656
5657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5658
5659 rc = IPR_RC_JOB_RETURN;
5660 ipr_cmd->job_step = ipr_reset_ucode_download;
5661 } else
5662 ipr_cmd->job_step = ipr_reset_alert;
5663
5664 LEAVE;
5665 return rc;
5666}
5667
5668/**
5669 * ipr_reset_ioa_job - Adapter reset job
5670 * @ipr_cmd: ipr command struct
5671 *
5672 * Description: This function is the job router for the adapter reset job.
5673 *
5674 * Return value:
5675 * none
5676 **/
5677static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5678{
5679 u32 rc, ioasc;
5680 unsigned long scratch = ipr_cmd->u.scratch;
5681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5682
5683 do {
5684 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5685
5686 if (ioa_cfg->reset_cmd != ipr_cmd) {
5687 /*
5688 * We are doing nested adapter resets and this is
5689 * not the current reset job.
5690 */
5691 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5692 return;
5693 }
5694
5695 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5696 dev_err(&ioa_cfg->pdev->dev,
5697 "0x%02X failed with IOASC: 0x%08X\n",
5698 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5699
5700 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5701 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5702 return;
5703 }
5704
5705 ipr_reinit_ipr_cmnd(ipr_cmd);
5706 ipr_cmd->u.scratch = scratch;
5707 rc = ipr_cmd->job_step(ipr_cmd);
5708 } while(rc == IPR_RC_JOB_CONTINUE);
5709}
5710
5711/**
5712 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5713 * @ioa_cfg: ioa config struct
5714 * @job_step: first job step of reset job
5715 * @shutdown_type: shutdown type
5716 *
5717 * Description: This function will initiate the reset of the given adapter
5718 * starting at the selected job step.
5719 * If the caller needs to wait on the completion of the reset,
5720 * the caller must sleep on the reset_wait_q.
5721 *
5722 * Return value:
5723 * none
5724 **/
5725static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5726 int (*job_step) (struct ipr_cmnd *),
5727 enum ipr_shutdown_type shutdown_type)
5728{
5729 struct ipr_cmnd *ipr_cmd;
5730
5731 ioa_cfg->in_reset_reload = 1;
5732 ioa_cfg->allow_cmds = 0;
5733 scsi_block_requests(ioa_cfg->host);
5734
5735 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5736 ioa_cfg->reset_cmd = ipr_cmd;
5737 ipr_cmd->job_step = job_step;
5738 ipr_cmd->u.shutdown_type = shutdown_type;
5739
5740 ipr_reset_ioa_job(ipr_cmd);
5741}
5742
5743/**
5744 * ipr_initiate_ioa_reset - Initiate an adapter reset
5745 * @ioa_cfg: ioa config struct
5746 * @shutdown_type: shutdown type
5747 *
5748 * Description: This function will initiate the reset of the given adapter.
5749 * If the caller needs to wait on the completion of the reset,
5750 * the caller must sleep on the reset_wait_q.
5751 *
5752 * Return value:
5753 * none
5754 **/
5755static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5756 enum ipr_shutdown_type shutdown_type)
5757{
5758 if (ioa_cfg->ioa_is_dead)
5759 return;
5760
5761 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5762 ioa_cfg->sdt_state = ABORT_DUMP;
5763
5764 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5765 dev_err(&ioa_cfg->pdev->dev,
5766 "IOA taken offline - error recovery failed\n");
5767
5768 ioa_cfg->reset_retries = 0;
5769 ioa_cfg->ioa_is_dead = 1;
5770
5771 if (ioa_cfg->in_ioa_bringdown) {
5772 ioa_cfg->reset_cmd = NULL;
5773 ioa_cfg->in_reset_reload = 0;
5774 ipr_fail_all_ops(ioa_cfg);
5775 wake_up_all(&ioa_cfg->reset_wait_q);
5776
5777 spin_unlock_irq(ioa_cfg->host->host_lock);
5778 scsi_unblock_requests(ioa_cfg->host);
5779 spin_lock_irq(ioa_cfg->host->host_lock);
5780 return;
5781 } else {
5782 ioa_cfg->in_ioa_bringdown = 1;
5783 shutdown_type = IPR_SHUTDOWN_NONE;
5784 }
5785 }
5786
5787 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5788 shutdown_type);
5789}
5790
5791/**
5792 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5793 * @ioa_cfg: ioa cfg struct
5794 *
5795 * Description: This is the second phase of adapter intialization
5796 * This function takes care of initilizing the adapter to the point
5797 * where it can accept new commands.
5798
5799 * Return value:
5800 * 0 on sucess / -EIO on failure
5801 **/
5802static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5803{
5804 int rc = 0;
5805 unsigned long host_lock_flags = 0;
5806
5807 ENTER;
5808 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5809 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5810 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5811
5812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5813 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5814 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5815
5816 if (ioa_cfg->ioa_is_dead) {
5817 rc = -EIO;
5818 } else if (ipr_invalid_adapter(ioa_cfg)) {
5819 if (!ipr_testmode)
5820 rc = -EIO;
5821
5822 dev_err(&ioa_cfg->pdev->dev,
5823 "Adapter not supported in this hardware configuration.\n");
5824 }
5825
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5827
5828 LEAVE;
5829 return rc;
5830}
5831
5832/**
5833 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5834 * @ioa_cfg: ioa config struct
5835 *
5836 * Return value:
5837 * none
5838 **/
5839static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5840{
5841 int i;
5842
5843 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5844 if (ioa_cfg->ipr_cmnd_list[i])
5845 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5846 ioa_cfg->ipr_cmnd_list[i],
5847 ioa_cfg->ipr_cmnd_list_dma[i]);
5848
5849 ioa_cfg->ipr_cmnd_list[i] = NULL;
5850 }
5851
5852 if (ioa_cfg->ipr_cmd_pool)
5853 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5854
5855 ioa_cfg->ipr_cmd_pool = NULL;
5856}
5857
5858/**
5859 * ipr_free_mem - Frees memory allocated for an adapter
5860 * @ioa_cfg: ioa cfg struct
5861 *
5862 * Return value:
5863 * nothing
5864 **/
5865static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5866{
5867 int i;
5868
5869 kfree(ioa_cfg->res_entries);
5870 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5871 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5872 ipr_free_cmd_blks(ioa_cfg);
5873 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5874 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5875 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5876 ioa_cfg->cfg_table,
5877 ioa_cfg->cfg_table_dma);
5878
5879 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5880 pci_free_consistent(ioa_cfg->pdev,
5881 sizeof(struct ipr_hostrcb),
5882 ioa_cfg->hostrcb[i],
5883 ioa_cfg->hostrcb_dma[i]);
5884 }
5885
5886 ipr_free_dump(ioa_cfg);
5887 kfree(ioa_cfg->saved_mode_pages);
5888 kfree(ioa_cfg->trace);
5889}
5890
5891/**
5892 * ipr_free_all_resources - Free all allocated resources for an adapter.
5893 * @ipr_cmd: ipr command struct
5894 *
5895 * This function frees all allocated resources for the
5896 * specified adapter.
5897 *
5898 * Return value:
5899 * none
5900 **/
5901static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5902{
5903 struct pci_dev *pdev = ioa_cfg->pdev;
5904
5905 ENTER;
5906 free_irq(pdev->irq, ioa_cfg);
5907 iounmap(ioa_cfg->hdw_dma_regs);
5908 pci_release_regions(pdev);
5909 ipr_free_mem(ioa_cfg);
5910 scsi_host_put(ioa_cfg->host);
5911 pci_disable_device(pdev);
5912 LEAVE;
5913}
5914
5915/**
5916 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5917 * @ioa_cfg: ioa config struct
5918 *
5919 * Return value:
5920 * 0 on success / -ENOMEM on allocation failure
5921 **/
5922static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5923{
5924 struct ipr_cmnd *ipr_cmd;
5925 struct ipr_ioarcb *ioarcb;
5926 dma_addr_t dma_addr;
5927 int i;
5928
5929 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5930 sizeof(struct ipr_cmnd), 8, 0);
5931
5932 if (!ioa_cfg->ipr_cmd_pool)
5933 return -ENOMEM;
5934
5935 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5936 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5937
5938 if (!ipr_cmd) {
5939 ipr_free_cmd_blks(ioa_cfg);
5940 return -ENOMEM;
5941 }
5942
5943 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5944 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5945 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5946
5947 ioarcb = &ipr_cmd->ioarcb;
5948 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5949 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5950 ioarcb->write_ioadl_addr =
5951 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5952 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5953 ioarcb->ioasa_host_pci_addr =
5954 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5955 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5956 ipr_cmd->cmd_index = i;
5957 ipr_cmd->ioa_cfg = ioa_cfg;
5958 ipr_cmd->sense_buffer_dma = dma_addr +
5959 offsetof(struct ipr_cmnd, sense_buffer);
5960
5961 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5962 }
5963
5964 return 0;
5965}
5966
5967/**
5968 * ipr_alloc_mem - Allocate memory for an adapter
5969 * @ioa_cfg: ioa config struct
5970 *
5971 * Return value:
5972 * 0 on success / non-zero for error
5973 **/
5974static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5975{
5976 struct pci_dev *pdev = ioa_cfg->pdev;
5977 int i, rc = -ENOMEM;
5978
5979 ENTER;
0bc42e35 5980 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
5981 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5982
5983 if (!ioa_cfg->res_entries)
5984 goto out;
5985
1da177e4
LT
5986 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5987 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5988
5989 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5990 sizeof(struct ipr_misc_cbs),
5991 &ioa_cfg->vpd_cbs_dma);
5992
5993 if (!ioa_cfg->vpd_cbs)
5994 goto out_free_res_entries;
5995
5996 if (ipr_alloc_cmd_blks(ioa_cfg))
5997 goto out_free_vpd_cbs;
5998
5999 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6000 sizeof(u32) * IPR_NUM_CMD_BLKS,
6001 &ioa_cfg->host_rrq_dma);
6002
6003 if (!ioa_cfg->host_rrq)
6004 goto out_ipr_free_cmd_blocks;
6005
6006 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6007 sizeof(struct ipr_config_table),
6008 &ioa_cfg->cfg_table_dma);
6009
6010 if (!ioa_cfg->cfg_table)
6011 goto out_free_host_rrq;
6012
6013 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6014 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6015 sizeof(struct ipr_hostrcb),
6016 &ioa_cfg->hostrcb_dma[i]);
6017
6018 if (!ioa_cfg->hostrcb[i])
6019 goto out_free_hostrcb_dma;
6020
6021 ioa_cfg->hostrcb[i]->hostrcb_dma =
6022 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6023 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6024 }
6025
0bc42e35 6026 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
6027 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6028
6029 if (!ioa_cfg->trace)
6030 goto out_free_hostrcb_dma;
6031
1da177e4
LT
6032 rc = 0;
6033out:
6034 LEAVE;
6035 return rc;
6036
6037out_free_hostrcb_dma:
6038 while (i-- > 0) {
6039 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6040 ioa_cfg->hostrcb[i],
6041 ioa_cfg->hostrcb_dma[i]);
6042 }
6043 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6044 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6045out_free_host_rrq:
6046 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6047 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6048out_ipr_free_cmd_blocks:
6049 ipr_free_cmd_blks(ioa_cfg);
6050out_free_vpd_cbs:
6051 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6052 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6053out_free_res_entries:
6054 kfree(ioa_cfg->res_entries);
6055 goto out;
6056}
6057
6058/**
6059 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6060 * @ioa_cfg: ioa config struct
6061 *
6062 * Return value:
6063 * none
6064 **/
6065static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6066{
6067 int i;
6068
6069 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6070 ioa_cfg->bus_attr[i].bus = i;
6071 ioa_cfg->bus_attr[i].qas_enabled = 0;
6072 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6073 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6074 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6075 else
6076 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6077 }
6078}
6079
6080/**
6081 * ipr_init_ioa_cfg - Initialize IOA config struct
6082 * @ioa_cfg: ioa config struct
6083 * @host: scsi host struct
6084 * @pdev: PCI dev struct
6085 *
6086 * Return value:
6087 * none
6088 **/
6089static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6090 struct Scsi_Host *host, struct pci_dev *pdev)
6091{
6092 const struct ipr_interrupt_offsets *p;
6093 struct ipr_interrupts *t;
6094 void __iomem *base;
6095
6096 ioa_cfg->host = host;
6097 ioa_cfg->pdev = pdev;
6098 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 6099 ioa_cfg->doorbell = IPR_DOORBELL;
32d29776
BK
6100 if (!ipr_auto_create)
6101 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6102 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6103 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6104 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6105 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6106 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6107 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6108 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6109 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6110
6111 INIT_LIST_HEAD(&ioa_cfg->free_q);
6112 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6113 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6114 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6115 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6116 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6117 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6118 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6119 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
6120 if (ipr_enable_cache)
6121 ioa_cfg->cache_state = CACHE_ENABLED;
6122 else
6123 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
6124
6125 ipr_initialize_bus_attr(ioa_cfg);
6126
6127 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6128 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6129 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6130 host->unique_id = host->host_no;
6131 host->max_cmd_len = IPR_MAX_CDB_LEN;
6132 pci_set_drvdata(pdev, ioa_cfg);
6133
6134 p = &ioa_cfg->chip_cfg->regs;
6135 t = &ioa_cfg->regs;
6136 base = ioa_cfg->hdw_dma_regs;
6137
6138 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6139 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6140 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6141 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6142 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6143 t->ioarrin_reg = base + p->ioarrin_reg;
6144 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6145 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6146 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6147}
6148
6149/**
6150 * ipr_get_chip_cfg - Find adapter chip configuration
6151 * @dev_id: PCI device id struct
6152 *
6153 * Return value:
6154 * ptr to chip config on success / NULL on failure
6155 **/
6156static const struct ipr_chip_cfg_t * __devinit
6157ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6158{
6159 int i;
6160
6161 if (dev_id->driver_data)
6162 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6163
6164 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6165 if (ipr_chip[i].vendor == dev_id->vendor &&
6166 ipr_chip[i].device == dev_id->device)
6167 return ipr_chip[i].cfg;
6168 return NULL;
6169}
6170
6171/**
6172 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6173 * @pdev: PCI device struct
6174 * @dev_id: PCI device id struct
6175 *
6176 * Return value:
6177 * 0 on success / non-zero on failure
6178 **/
6179static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6180 const struct pci_device_id *dev_id)
6181{
6182 struct ipr_ioa_cfg *ioa_cfg;
6183 struct Scsi_Host *host;
6184 unsigned long ipr_regs_pci;
6185 void __iomem *ipr_regs;
6186 u32 rc = PCIBIOS_SUCCESSFUL;
6187
6188 ENTER;
6189
6190 if ((rc = pci_enable_device(pdev))) {
6191 dev_err(&pdev->dev, "Cannot enable adapter\n");
6192 goto out;
6193 }
6194
6195 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6196
6197 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6198
6199 if (!host) {
6200 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6201 rc = -ENOMEM;
6202 goto out_disable;
6203 }
6204
6205 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6206 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6207
6208 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6209
6210 if (!ioa_cfg->chip_cfg) {
6211 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6212 dev_id->vendor, dev_id->device);
6213 goto out_scsi_host_put;
6214 }
6215
6216 ipr_regs_pci = pci_resource_start(pdev, 0);
6217
6218 rc = pci_request_regions(pdev, IPR_NAME);
6219 if (rc < 0) {
6220 dev_err(&pdev->dev,
6221 "Couldn't register memory range of registers\n");
6222 goto out_scsi_host_put;
6223 }
6224
6225 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6226
6227 if (!ipr_regs) {
6228 dev_err(&pdev->dev,
6229 "Couldn't map memory range of registers\n");
6230 rc = -ENOMEM;
6231 goto out_release_regions;
6232 }
6233
6234 ioa_cfg->hdw_dma_regs = ipr_regs;
6235 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6236 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6237
6238 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6239
6240 pci_set_master(pdev);
6241
6242 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6243 if (rc < 0) {
6244 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6245 goto cleanup_nomem;
6246 }
6247
6248 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6249 ioa_cfg->chip_cfg->cache_line_size);
6250
6251 if (rc != PCIBIOS_SUCCESSFUL) {
6252 dev_err(&pdev->dev, "Write of cache line size failed\n");
6253 rc = -EIO;
6254 goto cleanup_nomem;
6255 }
6256
6257 /* Save away PCI config space for use following IOA reset */
6258 rc = pci_save_state(pdev);
6259
6260 if (rc != PCIBIOS_SUCCESSFUL) {
6261 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6262 rc = -EIO;
6263 goto cleanup_nomem;
6264 }
6265
6266 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6267 goto cleanup_nomem;
6268
6269 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6270 goto cleanup_nomem;
6271
6272 rc = ipr_alloc_mem(ioa_cfg);
6273 if (rc < 0) {
6274 dev_err(&pdev->dev,
6275 "Couldn't allocate enough memory for device driver!\n");
6276 goto cleanup_nomem;
6277 }
6278
6279 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6280 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6281
6282 if (rc) {
6283 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6284 pdev->irq, rc);
6285 goto cleanup_nolog;
6286 }
6287
6288 spin_lock(&ipr_driver_lock);
6289 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6290 spin_unlock(&ipr_driver_lock);
6291
6292 LEAVE;
6293out:
6294 return rc;
6295
6296cleanup_nolog:
6297 ipr_free_mem(ioa_cfg);
6298cleanup_nomem:
6299 iounmap(ipr_regs);
6300out_release_regions:
6301 pci_release_regions(pdev);
6302out_scsi_host_put:
6303 scsi_host_put(host);
6304out_disable:
6305 pci_disable_device(pdev);
6306 goto out;
6307}
6308
6309/**
6310 * ipr_scan_vsets - Scans for VSET devices
6311 * @ioa_cfg: ioa config struct
6312 *
6313 * Description: Since the VSET resources do not follow SAM in that we can have
6314 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6315 *
6316 * Return value:
6317 * none
6318 **/
6319static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6320{
6321 int target, lun;
6322
6323 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6324 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6325 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6326}
6327
6328/**
6329 * ipr_initiate_ioa_bringdown - Bring down an adapter
6330 * @ioa_cfg: ioa config struct
6331 * @shutdown_type: shutdown type
6332 *
6333 * Description: This function will initiate bringing down the adapter.
6334 * This consists of issuing an IOA shutdown to the adapter
6335 * to flush the cache, and running BIST.
6336 * If the caller needs to wait on the completion of the reset,
6337 * the caller must sleep on the reset_wait_q.
6338 *
6339 * Return value:
6340 * none
6341 **/
6342static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6343 enum ipr_shutdown_type shutdown_type)
6344{
6345 ENTER;
6346 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6347 ioa_cfg->sdt_state = ABORT_DUMP;
6348 ioa_cfg->reset_retries = 0;
6349 ioa_cfg->in_ioa_bringdown = 1;
6350 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6351 LEAVE;
6352}
6353
6354/**
6355 * __ipr_remove - Remove a single adapter
6356 * @pdev: pci device struct
6357 *
6358 * Adapter hot plug remove entry point.
6359 *
6360 * Return value:
6361 * none
6362 **/
6363static void __ipr_remove(struct pci_dev *pdev)
6364{
6365 unsigned long host_lock_flags = 0;
6366 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6367 ENTER;
6368
6369 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6370 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6371
6372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6373 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 6374 flush_scheduled_work();
1da177e4
LT
6375 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6376
6377 spin_lock(&ipr_driver_lock);
6378 list_del(&ioa_cfg->queue);
6379 spin_unlock(&ipr_driver_lock);
6380
6381 if (ioa_cfg->sdt_state == ABORT_DUMP)
6382 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6384
6385 ipr_free_all_resources(ioa_cfg);
6386
6387 LEAVE;
6388}
6389
6390/**
6391 * ipr_remove - IOA hot plug remove entry point
6392 * @pdev: pci device struct
6393 *
6394 * Adapter hot plug remove entry point.
6395 *
6396 * Return value:
6397 * none
6398 **/
6399static void ipr_remove(struct pci_dev *pdev)
6400{
6401 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6402
6403 ENTER;
6404
1da177e4
LT
6405 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6406 &ipr_trace_attr);
6407 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6408 &ipr_dump_attr);
6409 scsi_remove_host(ioa_cfg->host);
6410
6411 __ipr_remove(pdev);
6412
6413 LEAVE;
6414}
6415
6416/**
6417 * ipr_probe - Adapter hot plug add entry point
6418 *
6419 * Return value:
6420 * 0 on success / non-zero on failure
6421 **/
6422static int __devinit ipr_probe(struct pci_dev *pdev,
6423 const struct pci_device_id *dev_id)
6424{
6425 struct ipr_ioa_cfg *ioa_cfg;
6426 int rc;
6427
6428 rc = ipr_probe_ioa(pdev, dev_id);
6429
6430 if (rc)
6431 return rc;
6432
6433 ioa_cfg = pci_get_drvdata(pdev);
6434 rc = ipr_probe_ioa_part2(ioa_cfg);
6435
6436 if (rc) {
6437 __ipr_remove(pdev);
6438 return rc;
6439 }
6440
6441 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6442
6443 if (rc) {
6444 __ipr_remove(pdev);
6445 return rc;
6446 }
6447
6448 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6449 &ipr_trace_attr);
6450
6451 if (rc) {
6452 scsi_remove_host(ioa_cfg->host);
6453 __ipr_remove(pdev);
6454 return rc;
6455 }
6456
6457 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6458 &ipr_dump_attr);
6459
6460 if (rc) {
6461 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6462 &ipr_trace_attr);
6463 scsi_remove_host(ioa_cfg->host);
6464 __ipr_remove(pdev);
6465 return rc;
6466 }
6467
6468 scsi_scan_host(ioa_cfg->host);
6469 ipr_scan_vsets(ioa_cfg);
6470 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6471 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 6472 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
6473 schedule_work(&ioa_cfg->work_q);
6474 return 0;
6475}
6476
6477/**
6478 * ipr_shutdown - Shutdown handler.
d18c3db5 6479 * @pdev: pci device struct
1da177e4
LT
6480 *
6481 * This function is invoked upon system shutdown/reboot. It will issue
6482 * an adapter shutdown to the adapter to flush the write cache.
6483 *
6484 * Return value:
6485 * none
6486 **/
d18c3db5 6487static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 6488{
d18c3db5 6489 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
6490 unsigned long lock_flags = 0;
6491
6492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6493 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6495 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6496}
6497
6498static struct pci_device_id ipr_pci_table[] __devinitdata = {
6499 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6500 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6501 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6502 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6503 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6504 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6505 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6506 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6507 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6508 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6509 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6510 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6511 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6512 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6513 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6514 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6515 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6516 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6517 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6518 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6519 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6520 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6521 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6522 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6523 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6524 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6525 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6526 { }
6527};
6528MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6529
6530static struct pci_driver ipr_driver = {
6531 .name = IPR_NAME,
6532 .id_table = ipr_pci_table,
6533 .probe = ipr_probe,
6534 .remove = ipr_remove,
d18c3db5 6535 .shutdown = ipr_shutdown,
1da177e4
LT
6536};
6537
6538/**
6539 * ipr_init - Module entry point
6540 *
6541 * Return value:
6542 * 0 on success / negative value on failure
6543 **/
6544static int __init ipr_init(void)
6545{
6546 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6547 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6548
6549 return pci_module_init(&ipr_driver);
6550}
6551
6552/**
6553 * ipr_exit - Module unload
6554 *
6555 * Module unload entry point.
6556 *
6557 * Return value:
6558 * none
6559 **/
6560static void __exit ipr_exit(void)
6561{
6562 pci_unregister_driver(&ipr_driver);
6563}
6564
6565module_init(ipr_init);
6566module_exit(ipr_exit);