]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/ipr.c
[SCSI] ipr: Simplify status area dumping
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
84
85 /*
86 * Global Data
87 */
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
98
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone, Citrine, and Obsidian */
102 .mailbox = 0x0042C,
103 .cache_line_size = 0x20,
104 {
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
114 }
115 },
116 { /* Snipe and Scamp */
117 .mailbox = 0x0052C,
118 .cache_line_size = 0x20,
119 {
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
129 }
130 },
131 };
132
133 static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 };
141
142 static int ipr_max_bus_speeds [] = {
143 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 };
145
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed, ipr_max_speed, uint, 0);
149 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level, ipr_log_level, uint, 0);
151 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode, ipr_testmode, int, 0);
153 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail, ipr_fastfail, int, 0);
155 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache, ipr_enable_cache, int, 0);
159 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug, ipr_debug, int, 0);
161 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create, ipr_auto_create, int, 0);
163 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION);
166
167 /* A constant array of IOASCs/URCs/Error Messages */
168 static const
169 struct ipr_error_table_t ipr_error_table[] = {
170 {0x00000000, 1, 1,
171 "8155: An unknown error was received"},
172 {0x00330000, 0, 0,
173 "Soft underlength error"},
174 {0x005A0000, 0, 0,
175 "Command to be cancelled not found"},
176 {0x00808000, 0, 0,
177 "Qualified success"},
178 {0x01080000, 1, 1,
179 "FFFE: Soft device bus error recovered by the IOA"},
180 {0x01170600, 0, 1,
181 "FFF9: Device sector reassign successful"},
182 {0x01170900, 0, 1,
183 "FFF7: Media error recovered by device rewrite procedures"},
184 {0x01180200, 0, 1,
185 "7001: IOA sector reassignment successful"},
186 {0x01180500, 0, 1,
187 "FFF9: Soft media error. Sector reassignment recommended"},
188 {0x01180600, 0, 1,
189 "FFF7: Media error recovered by IOA rewrite procedures"},
190 {0x01418000, 0, 1,
191 "FF3D: Soft PCI bus error recovered by the IOA"},
192 {0x01440000, 1, 1,
193 "FFF6: Device hardware error recovered by the IOA"},
194 {0x01448100, 0, 1,
195 "FFF6: Device hardware error recovered by the device"},
196 {0x01448200, 1, 1,
197 "FF3D: Soft IOA error recovered by the IOA"},
198 {0x01448300, 0, 1,
199 "FFFA: Undefined device response recovered by the IOA"},
200 {0x014A0000, 1, 1,
201 "FFF6: Device bus error, message or command phase"},
202 {0x015D0000, 0, 1,
203 "FFF6: Failure prediction threshold exceeded"},
204 {0x015D9200, 0, 1,
205 "8009: Impending cache battery pack failure"},
206 {0x02040400, 0, 0,
207 "34FF: Disk device format in progress"},
208 {0x023F0000, 0, 0,
209 "Synchronization required"},
210 {0x024E0000, 0, 0,
211 "No ready, IOA shutdown"},
212 {0x025A0000, 0, 0,
213 "Not ready, IOA has been shutdown"},
214 {0x02670100, 0, 1,
215 "3020: Storage subsystem configuration error"},
216 {0x03110B00, 0, 0,
217 "FFF5: Medium error, data unreadable, recommend reassign"},
218 {0x03110C00, 0, 0,
219 "7000: Medium error, data unreadable, do not reassign"},
220 {0x03310000, 0, 1,
221 "FFF3: Disk media format bad"},
222 {0x04050000, 0, 1,
223 "3002: Addressed device failed to respond to selection"},
224 {0x04080000, 1, 1,
225 "3100: Device bus error"},
226 {0x04080100, 0, 1,
227 "3109: IOA timed out a device command"},
228 {0x04088000, 0, 0,
229 "3120: SCSI bus is not operational"},
230 {0x04118000, 0, 1,
231 "9000: IOA reserved area data check"},
232 {0x04118100, 0, 1,
233 "9001: IOA reserved area invalid data pattern"},
234 {0x04118200, 0, 1,
235 "9002: IOA reserved area LRC error"},
236 {0x04320000, 0, 1,
237 "102E: Out of alternate sectors for disk storage"},
238 {0x04330000, 1, 1,
239 "FFF4: Data transfer underlength error"},
240 {0x04338000, 1, 1,
241 "FFF4: Data transfer overlength error"},
242 {0x043E0100, 0, 1,
243 "3400: Logical unit failure"},
244 {0x04408500, 0, 1,
245 "FFF4: Device microcode is corrupt"},
246 {0x04418000, 1, 1,
247 "8150: PCI bus error"},
248 {0x04430000, 1, 0,
249 "Unsupported device bus message received"},
250 {0x04440000, 1, 1,
251 "FFF4: Disk device problem"},
252 {0x04448200, 1, 1,
253 "8150: Permanent IOA failure"},
254 {0x04448300, 0, 1,
255 "3010: Disk device returned wrong response to IOA"},
256 {0x04448400, 0, 1,
257 "8151: IOA microcode error"},
258 {0x04448500, 0, 0,
259 "Device bus status error"},
260 {0x04448600, 0, 1,
261 "8157: IOA error requiring IOA reset to recover"},
262 {0x04490000, 0, 0,
263 "Message reject received from the device"},
264 {0x04449200, 0, 1,
265 "8008: A permanent cache battery pack failure occurred"},
266 {0x0444A000, 0, 1,
267 "9090: Disk unit has been modified after the last known status"},
268 {0x0444A200, 0, 1,
269 "9081: IOA detected device error"},
270 {0x0444A300, 0, 1,
271 "9082: IOA detected device error"},
272 {0x044A0000, 1, 1,
273 "3110: Device bus error, message or command phase"},
274 {0x04670400, 0, 1,
275 "9091: Incorrect hardware configuration change has been detected"},
276 {0x04678000, 0, 1,
277 "9073: Invalid multi-adapter configuration"},
278 {0x046E0000, 0, 1,
279 "FFF4: Command to logical unit failed"},
280 {0x05240000, 1, 0,
281 "Illegal request, invalid request type or request packet"},
282 {0x05250000, 0, 0,
283 "Illegal request, invalid resource handle"},
284 {0x05258000, 0, 0,
285 "Illegal request, commands not allowed to this device"},
286 {0x05258100, 0, 0,
287 "Illegal request, command not allowed to a secondary adapter"},
288 {0x05260000, 0, 0,
289 "Illegal request, invalid field in parameter list"},
290 {0x05260100, 0, 0,
291 "Illegal request, parameter not supported"},
292 {0x05260200, 0, 0,
293 "Illegal request, parameter value invalid"},
294 {0x052C0000, 0, 0,
295 "Illegal request, command sequence error"},
296 {0x052C8000, 1, 0,
297 "Illegal request, dual adapter support not enabled"},
298 {0x06040500, 0, 1,
299 "9031: Array protection temporarily suspended, protection resuming"},
300 {0x06040600, 0, 1,
301 "9040: Array protection temporarily suspended, protection resuming"},
302 {0x06290000, 0, 1,
303 "FFFB: SCSI bus was reset"},
304 {0x06290500, 0, 0,
305 "FFFE: SCSI bus transition to single ended"},
306 {0x06290600, 0, 0,
307 "FFFE: SCSI bus transition to LVD"},
308 {0x06298000, 0, 1,
309 "FFFB: SCSI bus was reset by another initiator"},
310 {0x063F0300, 0, 1,
311 "3029: A device replacement has occurred"},
312 {0x064C8000, 0, 1,
313 "9051: IOA cache data exists for a missing or failed device"},
314 {0x064C8100, 0, 1,
315 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
316 {0x06670100, 0, 1,
317 "9025: Disk unit is not supported at its physical location"},
318 {0x06670600, 0, 1,
319 "3020: IOA detected a SCSI bus configuration error"},
320 {0x06678000, 0, 1,
321 "3150: SCSI bus configuration error"},
322 {0x06678100, 0, 1,
323 "9074: Asymmetric advanced function disk configuration"},
324 {0x06690200, 0, 1,
325 "9041: Array protection temporarily suspended"},
326 {0x06698200, 0, 1,
327 "9042: Corrupt array parity detected on specified device"},
328 {0x066B0200, 0, 1,
329 "9030: Array no longer protected due to missing or failed disk unit"},
330 {0x066B8000, 0, 1,
331 "9071: Link operational transition"},
332 {0x066B8100, 0, 1,
333 "9072: Link not operational transition"},
334 {0x066B8200, 0, 1,
335 "9032: Array exposed but still protected"},
336 {0x07270000, 0, 0,
337 "Failure due to other device"},
338 {0x07278000, 0, 1,
339 "9008: IOA does not support functions expected by devices"},
340 {0x07278100, 0, 1,
341 "9010: Cache data associated with attached devices cannot be found"},
342 {0x07278200, 0, 1,
343 "9011: Cache data belongs to devices other than those attached"},
344 {0x07278400, 0, 1,
345 "9020: Array missing 2 or more devices with only 1 device present"},
346 {0x07278500, 0, 1,
347 "9021: Array missing 2 or more devices with 2 or more devices present"},
348 {0x07278600, 0, 1,
349 "9022: Exposed array is missing a required device"},
350 {0x07278700, 0, 1,
351 "9023: Array member(s) not at required physical locations"},
352 {0x07278800, 0, 1,
353 "9024: Array not functional due to present hardware configuration"},
354 {0x07278900, 0, 1,
355 "9026: Array not functional due to present hardware configuration"},
356 {0x07278A00, 0, 1,
357 "9027: Array is missing a device and parity is out of sync"},
358 {0x07278B00, 0, 1,
359 "9028: Maximum number of arrays already exist"},
360 {0x07278C00, 0, 1,
361 "9050: Required cache data cannot be located for a disk unit"},
362 {0x07278D00, 0, 1,
363 "9052: Cache data exists for a device that has been modified"},
364 {0x07278F00, 0, 1,
365 "9054: IOA resources not available due to previous problems"},
366 {0x07279100, 0, 1,
367 "9092: Disk unit requires initialization before use"},
368 {0x07279200, 0, 1,
369 "9029: Incorrect hardware configuration change has been detected"},
370 {0x07279600, 0, 1,
371 "9060: One or more disk pairs are missing from an array"},
372 {0x07279700, 0, 1,
373 "9061: One or more disks are missing from an array"},
374 {0x07279800, 0, 1,
375 "9062: One or more disks are missing from an array"},
376 {0x07279900, 0, 1,
377 "9063: Maximum number of functional arrays has been exceeded"},
378 {0x0B260000, 0, 0,
379 "Aborted command, invalid descriptor"},
380 {0x0B5A0000, 0, 0,
381 "Command terminated by host"}
382 };
383
384 static const struct ipr_ses_table_entry ipr_ses_table[] = {
385 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
387 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
388 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
389 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
390 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
391 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
393 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
395 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
396 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
397 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
398 };
399
400 /*
401 * Function Prototypes
402 */
403 static int ipr_reset_alert(struct ipr_cmnd *);
404 static void ipr_process_ccn(struct ipr_cmnd *);
405 static void ipr_process_error(struct ipr_cmnd *);
406 static void ipr_reset_ioa_job(struct ipr_cmnd *);
407 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
408 enum ipr_shutdown_type);
409
410 #ifdef CONFIG_SCSI_IPR_TRACE
411 /**
412 * ipr_trc_hook - Add a trace entry to the driver trace
413 * @ipr_cmd: ipr command struct
414 * @type: trace type
415 * @add_data: additional data
416 *
417 * Return value:
418 * none
419 **/
420 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
421 u8 type, u32 add_data)
422 {
423 struct ipr_trace_entry *trace_entry;
424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
425
426 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
427 trace_entry->time = jiffies;
428 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
429 trace_entry->type = type;
430 trace_entry->cmd_index = ipr_cmd->cmd_index;
431 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
432 trace_entry->u.add_data = add_data;
433 }
434 #else
435 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
436 #endif
437
438 /**
439 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
440 * @ipr_cmd: ipr command struct
441 *
442 * Return value:
443 * none
444 **/
445 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
446 {
447 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
448 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
449
450 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
451 ioarcb->write_data_transfer_length = 0;
452 ioarcb->read_data_transfer_length = 0;
453 ioarcb->write_ioadl_len = 0;
454 ioarcb->read_ioadl_len = 0;
455 ioasa->ioasc = 0;
456 ioasa->residual_data_len = 0;
457
458 ipr_cmd->scsi_cmd = NULL;
459 ipr_cmd->sense_buffer[0] = 0;
460 ipr_cmd->dma_use_sg = 0;
461 }
462
463 /**
464 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
465 * @ipr_cmd: ipr command struct
466 *
467 * Return value:
468 * none
469 **/
470 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
471 {
472 ipr_reinit_ipr_cmnd(ipr_cmd);
473 ipr_cmd->u.scratch = 0;
474 ipr_cmd->sibling = NULL;
475 init_timer(&ipr_cmd->timer);
476 }
477
478 /**
479 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
480 * @ioa_cfg: ioa config struct
481 *
482 * Return value:
483 * pointer to ipr command struct
484 **/
485 static
486 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
487 {
488 struct ipr_cmnd *ipr_cmd;
489
490 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
491 list_del(&ipr_cmd->queue);
492 ipr_init_ipr_cmnd(ipr_cmd);
493
494 return ipr_cmd;
495 }
496
497 /**
498 * ipr_unmap_sglist - Unmap scatterlist if mapped
499 * @ioa_cfg: ioa config struct
500 * @ipr_cmd: ipr command struct
501 *
502 * Return value:
503 * nothing
504 **/
505 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
506 struct ipr_cmnd *ipr_cmd)
507 {
508 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
509
510 if (ipr_cmd->dma_use_sg) {
511 if (scsi_cmd->use_sg > 0) {
512 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
513 scsi_cmd->use_sg,
514 scsi_cmd->sc_data_direction);
515 } else {
516 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
517 scsi_cmd->request_bufflen,
518 scsi_cmd->sc_data_direction);
519 }
520 }
521 }
522
523 /**
524 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
525 * @ioa_cfg: ioa config struct
526 * @clr_ints: interrupts to clear
527 *
528 * This function masks all interrupts on the adapter, then clears the
529 * interrupts specified in the mask
530 *
531 * Return value:
532 * none
533 **/
534 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
535 u32 clr_ints)
536 {
537 volatile u32 int_reg;
538
539 /* Stop new interrupts */
540 ioa_cfg->allow_interrupts = 0;
541
542 /* Set interrupt mask to stop all new interrupts */
543 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
544
545 /* Clear any pending interrupts */
546 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
547 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
548 }
549
550 /**
551 * ipr_save_pcix_cmd_reg - Save PCI-X command register
552 * @ioa_cfg: ioa config struct
553 *
554 * Return value:
555 * 0 on success / -EIO on failure
556 **/
557 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
558 {
559 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
560
561 if (pcix_cmd_reg == 0) {
562 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
563 return -EIO;
564 }
565
566 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
567 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
568 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
569 return -EIO;
570 }
571
572 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
573 return 0;
574 }
575
576 /**
577 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
578 * @ioa_cfg: ioa config struct
579 *
580 * Return value:
581 * 0 on success / -EIO on failure
582 **/
583 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584 {
585 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586
587 if (pcix_cmd_reg) {
588 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
589 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
590 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
591 return -EIO;
592 }
593 } else {
594 dev_err(&ioa_cfg->pdev->dev,
595 "Failed to setup PCI-X command register\n");
596 return -EIO;
597 }
598
599 return 0;
600 }
601
602 /**
603 * ipr_scsi_eh_done - mid-layer done function for aborted ops
604 * @ipr_cmd: ipr command struct
605 *
606 * This function is invoked by the interrupt handler for
607 * ops generated by the SCSI mid-layer which are being aborted.
608 *
609 * Return value:
610 * none
611 **/
612 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
613 {
614 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
615 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
616
617 scsi_cmd->result |= (DID_ERROR << 16);
618
619 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
620 scsi_cmd->scsi_done(scsi_cmd);
621 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
622 }
623
624 /**
625 * ipr_fail_all_ops - Fails all outstanding ops.
626 * @ioa_cfg: ioa config struct
627 *
628 * This function fails all outstanding ops.
629 *
630 * Return value:
631 * none
632 **/
633 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
634 {
635 struct ipr_cmnd *ipr_cmd, *temp;
636
637 ENTER;
638 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
639 list_del(&ipr_cmd->queue);
640
641 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
642 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
643
644 if (ipr_cmd->scsi_cmd)
645 ipr_cmd->done = ipr_scsi_eh_done;
646
647 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
648 del_timer(&ipr_cmd->timer);
649 ipr_cmd->done(ipr_cmd);
650 }
651
652 LEAVE;
653 }
654
655 /**
656 * ipr_do_req - Send driver initiated requests.
657 * @ipr_cmd: ipr command struct
658 * @done: done function
659 * @timeout_func: timeout function
660 * @timeout: timeout value
661 *
662 * This function sends the specified command to the adapter with the
663 * timeout given. The done function is invoked on command completion.
664 *
665 * Return value:
666 * none
667 **/
668 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
669 void (*done) (struct ipr_cmnd *),
670 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
671 {
672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673
674 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
675
676 ipr_cmd->done = done;
677
678 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
679 ipr_cmd->timer.expires = jiffies + timeout;
680 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
681
682 add_timer(&ipr_cmd->timer);
683
684 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
685
686 mb();
687 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
688 ioa_cfg->regs.ioarrin_reg);
689 }
690
691 /**
692 * ipr_internal_cmd_done - Op done function for an internally generated op.
693 * @ipr_cmd: ipr command struct
694 *
695 * This function is the op done function for an internally generated,
696 * blocking op. It simply wakes the sleeping thread.
697 *
698 * Return value:
699 * none
700 **/
701 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
702 {
703 if (ipr_cmd->sibling)
704 ipr_cmd->sibling = NULL;
705 else
706 complete(&ipr_cmd->completion);
707 }
708
709 /**
710 * ipr_send_blocking_cmd - Send command and sleep on its completion.
711 * @ipr_cmd: ipr command struct
712 * @timeout_func: function to invoke if command times out
713 * @timeout: timeout
714 *
715 * Return value:
716 * none
717 **/
718 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
719 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
720 u32 timeout)
721 {
722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
723
724 init_completion(&ipr_cmd->completion);
725 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
726
727 spin_unlock_irq(ioa_cfg->host->host_lock);
728 wait_for_completion(&ipr_cmd->completion);
729 spin_lock_irq(ioa_cfg->host->host_lock);
730 }
731
732 /**
733 * ipr_send_hcam - Send an HCAM to the adapter.
734 * @ioa_cfg: ioa config struct
735 * @type: HCAM type
736 * @hostrcb: hostrcb struct
737 *
738 * This function will send a Host Controlled Async command to the adapter.
739 * If HCAMs are currently not allowed to be issued to the adapter, it will
740 * place the hostrcb on the free queue.
741 *
742 * Return value:
743 * none
744 **/
745 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
746 struct ipr_hostrcb *hostrcb)
747 {
748 struct ipr_cmnd *ipr_cmd;
749 struct ipr_ioarcb *ioarcb;
750
751 if (ioa_cfg->allow_cmds) {
752 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
753 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
754 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
755
756 ipr_cmd->u.hostrcb = hostrcb;
757 ioarcb = &ipr_cmd->ioarcb;
758
759 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
760 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
761 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
762 ioarcb->cmd_pkt.cdb[1] = type;
763 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
764 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
765
766 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
767 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
768 ipr_cmd->ioadl[0].flags_and_data_len =
769 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
770 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
771
772 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
773 ipr_cmd->done = ipr_process_ccn;
774 else
775 ipr_cmd->done = ipr_process_error;
776
777 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
778
779 mb();
780 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
781 ioa_cfg->regs.ioarrin_reg);
782 } else {
783 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
784 }
785 }
786
787 /**
788 * ipr_init_res_entry - Initialize a resource entry struct.
789 * @res: resource entry struct
790 *
791 * Return value:
792 * none
793 **/
794 static void ipr_init_res_entry(struct ipr_resource_entry *res)
795 {
796 res->needs_sync_complete = 0;
797 res->in_erp = 0;
798 res->add_to_ml = 0;
799 res->del_from_ml = 0;
800 res->resetting_device = 0;
801 res->sdev = NULL;
802 }
803
804 /**
805 * ipr_handle_config_change - Handle a config change from the adapter
806 * @ioa_cfg: ioa config struct
807 * @hostrcb: hostrcb
808 *
809 * Return value:
810 * none
811 **/
812 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
813 struct ipr_hostrcb *hostrcb)
814 {
815 struct ipr_resource_entry *res = NULL;
816 struct ipr_config_table_entry *cfgte;
817 u32 is_ndn = 1;
818
819 cfgte = &hostrcb->hcam.u.ccn.cfgte;
820
821 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
822 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
823 sizeof(cfgte->res_addr))) {
824 is_ndn = 0;
825 break;
826 }
827 }
828
829 if (is_ndn) {
830 if (list_empty(&ioa_cfg->free_res_q)) {
831 ipr_send_hcam(ioa_cfg,
832 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
833 hostrcb);
834 return;
835 }
836
837 res = list_entry(ioa_cfg->free_res_q.next,
838 struct ipr_resource_entry, queue);
839
840 list_del(&res->queue);
841 ipr_init_res_entry(res);
842 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
843 }
844
845 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
846
847 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
848 if (res->sdev) {
849 res->del_from_ml = 1;
850 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
851 if (ioa_cfg->allow_ml_add_del)
852 schedule_work(&ioa_cfg->work_q);
853 } else
854 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
855 } else if (!res->sdev) {
856 res->add_to_ml = 1;
857 if (ioa_cfg->allow_ml_add_del)
858 schedule_work(&ioa_cfg->work_q);
859 }
860
861 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
862 }
863
864 /**
865 * ipr_process_ccn - Op done function for a CCN.
866 * @ipr_cmd: ipr command struct
867 *
868 * This function is the op done function for a configuration
869 * change notification host controlled async from the adapter.
870 *
871 * Return value:
872 * none
873 **/
874 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
875 {
876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
877 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
878 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
879
880 list_del(&hostrcb->queue);
881 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
882
883 if (ioasc) {
884 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
885 dev_err(&ioa_cfg->pdev->dev,
886 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
887
888 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
889 } else {
890 ipr_handle_config_change(ioa_cfg, hostrcb);
891 }
892 }
893
894 /**
895 * ipr_log_vpd - Log the passed VPD to the error log.
896 * @vpd: vendor/product id/sn struct
897 *
898 * Return value:
899 * none
900 **/
901 static void ipr_log_vpd(struct ipr_vpd *vpd)
902 {
903 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
904 + IPR_SERIAL_NUM_LEN];
905
906 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
907 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
908 IPR_PROD_ID_LEN);
909 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
910 ipr_err("Vendor/Product ID: %s\n", buffer);
911
912 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
913 buffer[IPR_SERIAL_NUM_LEN] = '\0';
914 ipr_err(" Serial Number: %s\n", buffer);
915 }
916
917 /**
918 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
919 * @vpd: vendor/product id/sn/wwn struct
920 *
921 * Return value:
922 * none
923 **/
924 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
925 {
926 ipr_log_vpd(&vpd->vpd);
927 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
928 be32_to_cpu(vpd->wwid[1]));
929 }
930
931 /**
932 * ipr_log_enhanced_cache_error - Log a cache error.
933 * @ioa_cfg: ioa config struct
934 * @hostrcb: hostrcb struct
935 *
936 * Return value:
937 * none
938 **/
939 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
940 struct ipr_hostrcb *hostrcb)
941 {
942 struct ipr_hostrcb_type_12_error *error =
943 &hostrcb->hcam.u.error.u.type_12_error;
944
945 ipr_err("-----Current Configuration-----\n");
946 ipr_err("Cache Directory Card Information:\n");
947 ipr_log_ext_vpd(&error->ioa_vpd);
948 ipr_err("Adapter Card Information:\n");
949 ipr_log_ext_vpd(&error->cfc_vpd);
950
951 ipr_err("-----Expected Configuration-----\n");
952 ipr_err("Cache Directory Card Information:\n");
953 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
954 ipr_err("Adapter Card Information:\n");
955 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
956
957 ipr_err("Additional IOA Data: %08X %08X %08X\n",
958 be32_to_cpu(error->ioa_data[0]),
959 be32_to_cpu(error->ioa_data[1]),
960 be32_to_cpu(error->ioa_data[2]));
961 }
962
963 /**
964 * ipr_log_cache_error - Log a cache error.
965 * @ioa_cfg: ioa config struct
966 * @hostrcb: hostrcb struct
967 *
968 * Return value:
969 * none
970 **/
971 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
972 struct ipr_hostrcb *hostrcb)
973 {
974 struct ipr_hostrcb_type_02_error *error =
975 &hostrcb->hcam.u.error.u.type_02_error;
976
977 ipr_err("-----Current Configuration-----\n");
978 ipr_err("Cache Directory Card Information:\n");
979 ipr_log_vpd(&error->ioa_vpd);
980 ipr_err("Adapter Card Information:\n");
981 ipr_log_vpd(&error->cfc_vpd);
982
983 ipr_err("-----Expected Configuration-----\n");
984 ipr_err("Cache Directory Card Information:\n");
985 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
986 ipr_err("Adapter Card Information:\n");
987 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
988
989 ipr_err("Additional IOA Data: %08X %08X %08X\n",
990 be32_to_cpu(error->ioa_data[0]),
991 be32_to_cpu(error->ioa_data[1]),
992 be32_to_cpu(error->ioa_data[2]));
993 }
994
995 /**
996 * ipr_log_enhanced_config_error - Log a configuration error.
997 * @ioa_cfg: ioa config struct
998 * @hostrcb: hostrcb struct
999 *
1000 * Return value:
1001 * none
1002 **/
1003 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1004 struct ipr_hostrcb *hostrcb)
1005 {
1006 int errors_logged, i;
1007 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1008 struct ipr_hostrcb_type_13_error *error;
1009
1010 error = &hostrcb->hcam.u.error.u.type_13_error;
1011 errors_logged = be32_to_cpu(error->errors_logged);
1012
1013 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1014 be32_to_cpu(error->errors_detected), errors_logged);
1015
1016 dev_entry = error->dev;
1017
1018 for (i = 0; i < errors_logged; i++, dev_entry++) {
1019 ipr_err_separator;
1020
1021 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1022 ipr_log_ext_vpd(&dev_entry->vpd);
1023
1024 ipr_err("-----New Device Information-----\n");
1025 ipr_log_ext_vpd(&dev_entry->new_vpd);
1026
1027 ipr_err("Cache Directory Card Information:\n");
1028 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1029
1030 ipr_err("Adapter Card Information:\n");
1031 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1032 }
1033 }
1034
1035 /**
1036 * ipr_log_config_error - Log a configuration error.
1037 * @ioa_cfg: ioa config struct
1038 * @hostrcb: hostrcb struct
1039 *
1040 * Return value:
1041 * none
1042 **/
1043 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1044 struct ipr_hostrcb *hostrcb)
1045 {
1046 int errors_logged, i;
1047 struct ipr_hostrcb_device_data_entry *dev_entry;
1048 struct ipr_hostrcb_type_03_error *error;
1049
1050 error = &hostrcb->hcam.u.error.u.type_03_error;
1051 errors_logged = be32_to_cpu(error->errors_logged);
1052
1053 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1054 be32_to_cpu(error->errors_detected), errors_logged);
1055
1056 dev_entry = error->dev;
1057
1058 for (i = 0; i < errors_logged; i++, dev_entry++) {
1059 ipr_err_separator;
1060
1061 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1062 ipr_log_vpd(&dev_entry->vpd);
1063
1064 ipr_err("-----New Device Information-----\n");
1065 ipr_log_vpd(&dev_entry->new_vpd);
1066
1067 ipr_err("Cache Directory Card Information:\n");
1068 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1069
1070 ipr_err("Adapter Card Information:\n");
1071 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1072
1073 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1074 be32_to_cpu(dev_entry->ioa_data[0]),
1075 be32_to_cpu(dev_entry->ioa_data[1]),
1076 be32_to_cpu(dev_entry->ioa_data[2]),
1077 be32_to_cpu(dev_entry->ioa_data[3]),
1078 be32_to_cpu(dev_entry->ioa_data[4]));
1079 }
1080 }
1081
1082 /**
1083 * ipr_log_enhanced_array_error - Log an array configuration error.
1084 * @ioa_cfg: ioa config struct
1085 * @hostrcb: hostrcb struct
1086 *
1087 * Return value:
1088 * none
1089 **/
1090 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1091 struct ipr_hostrcb *hostrcb)
1092 {
1093 int i, num_entries;
1094 struct ipr_hostrcb_type_14_error *error;
1095 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1096 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1097
1098 error = &hostrcb->hcam.u.error.u.type_14_error;
1099
1100 ipr_err_separator;
1101
1102 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1103 error->protection_level,
1104 ioa_cfg->host->host_no,
1105 error->last_func_vset_res_addr.bus,
1106 error->last_func_vset_res_addr.target,
1107 error->last_func_vset_res_addr.lun);
1108
1109 ipr_err_separator;
1110
1111 array_entry = error->array_member;
1112 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1113 sizeof(error->array_member));
1114
1115 for (i = 0; i < num_entries; i++, array_entry++) {
1116 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1117 continue;
1118
1119 if (be32_to_cpu(error->exposed_mode_adn) == i)
1120 ipr_err("Exposed Array Member %d:\n", i);
1121 else
1122 ipr_err("Array Member %d:\n", i);
1123
1124 ipr_log_ext_vpd(&array_entry->vpd);
1125 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1126 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1127 "Expected Location");
1128
1129 ipr_err_separator;
1130 }
1131 }
1132
1133 /**
1134 * ipr_log_array_error - Log an array configuration error.
1135 * @ioa_cfg: ioa config struct
1136 * @hostrcb: hostrcb struct
1137 *
1138 * Return value:
1139 * none
1140 **/
1141 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1142 struct ipr_hostrcb *hostrcb)
1143 {
1144 int i;
1145 struct ipr_hostrcb_type_04_error *error;
1146 struct ipr_hostrcb_array_data_entry *array_entry;
1147 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1148
1149 error = &hostrcb->hcam.u.error.u.type_04_error;
1150
1151 ipr_err_separator;
1152
1153 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1154 error->protection_level,
1155 ioa_cfg->host->host_no,
1156 error->last_func_vset_res_addr.bus,
1157 error->last_func_vset_res_addr.target,
1158 error->last_func_vset_res_addr.lun);
1159
1160 ipr_err_separator;
1161
1162 array_entry = error->array_member;
1163
1164 for (i = 0; i < 18; i++) {
1165 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1166 continue;
1167
1168 if (be32_to_cpu(error->exposed_mode_adn) == i)
1169 ipr_err("Exposed Array Member %d:\n", i);
1170 else
1171 ipr_err("Array Member %d:\n", i);
1172
1173 ipr_log_vpd(&array_entry->vpd);
1174
1175 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1176 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1177 "Expected Location");
1178
1179 ipr_err_separator;
1180
1181 if (i == 9)
1182 array_entry = error->array_member2;
1183 else
1184 array_entry++;
1185 }
1186 }
1187
1188 /**
1189 * ipr_log_hex_data - Log additional hex IOA error data.
1190 * @data: IOA error data
1191 * @len: data length
1192 *
1193 * Return value:
1194 * none
1195 **/
1196 static void ipr_log_hex_data(u32 *data, int len)
1197 {
1198 int i;
1199
1200 if (len == 0)
1201 return;
1202
1203 for (i = 0; i < len / 4; i += 4) {
1204 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1205 be32_to_cpu(data[i]),
1206 be32_to_cpu(data[i+1]),
1207 be32_to_cpu(data[i+2]),
1208 be32_to_cpu(data[i+3]));
1209 }
1210 }
1211
1212 /**
1213 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1214 * @ioa_cfg: ioa config struct
1215 * @hostrcb: hostrcb struct
1216 *
1217 * Return value:
1218 * none
1219 **/
1220 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1221 struct ipr_hostrcb *hostrcb)
1222 {
1223 struct ipr_hostrcb_type_17_error *error;
1224
1225 error = &hostrcb->hcam.u.error.u.type_17_error;
1226 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1227
1228 ipr_err("%s\n", error->failure_reason);
1229 ipr_err("Remote Adapter VPD:\n");
1230 ipr_log_ext_vpd(&error->vpd);
1231 ipr_log_hex_data(error->data,
1232 be32_to_cpu(hostrcb->hcam.length) -
1233 (offsetof(struct ipr_hostrcb_error, u) +
1234 offsetof(struct ipr_hostrcb_type_17_error, data)));
1235 }
1236
1237 /**
1238 * ipr_log_dual_ioa_error - Log a dual adapter error.
1239 * @ioa_cfg: ioa config struct
1240 * @hostrcb: hostrcb struct
1241 *
1242 * Return value:
1243 * none
1244 **/
1245 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1246 struct ipr_hostrcb *hostrcb)
1247 {
1248 struct ipr_hostrcb_type_07_error *error;
1249
1250 error = &hostrcb->hcam.u.error.u.type_07_error;
1251 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1252
1253 ipr_err("%s\n", error->failure_reason);
1254 ipr_err("Remote Adapter VPD:\n");
1255 ipr_log_vpd(&error->vpd);
1256 ipr_log_hex_data(error->data,
1257 be32_to_cpu(hostrcb->hcam.length) -
1258 (offsetof(struct ipr_hostrcb_error, u) +
1259 offsetof(struct ipr_hostrcb_type_07_error, data)));
1260 }
1261
1262 /**
1263 * ipr_log_generic_error - Log an adapter error.
1264 * @ioa_cfg: ioa config struct
1265 * @hostrcb: hostrcb struct
1266 *
1267 * Return value:
1268 * none
1269 **/
1270 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1271 struct ipr_hostrcb *hostrcb)
1272 {
1273 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1274 be32_to_cpu(hostrcb->hcam.length));
1275 }
1276
1277 /**
1278 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1279 * @ioasc: IOASC
1280 *
1281 * This function will return the index of into the ipr_error_table
1282 * for the specified IOASC. If the IOASC is not in the table,
1283 * 0 will be returned, which points to the entry used for unknown errors.
1284 *
1285 * Return value:
1286 * index into the ipr_error_table
1287 **/
1288 static u32 ipr_get_error(u32 ioasc)
1289 {
1290 int i;
1291
1292 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1293 if (ipr_error_table[i].ioasc == ioasc)
1294 return i;
1295
1296 return 0;
1297 }
1298
1299 /**
1300 * ipr_handle_log_data - Log an adapter error.
1301 * @ioa_cfg: ioa config struct
1302 * @hostrcb: hostrcb struct
1303 *
1304 * This function logs an adapter error to the system.
1305 *
1306 * Return value:
1307 * none
1308 **/
1309 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1310 struct ipr_hostrcb *hostrcb)
1311 {
1312 u32 ioasc;
1313 int error_index;
1314
1315 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1316 return;
1317
1318 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1319 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1320
1321 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1322
1323 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1324 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1325 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1326 scsi_report_bus_reset(ioa_cfg->host,
1327 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1328 }
1329
1330 error_index = ipr_get_error(ioasc);
1331
1332 if (!ipr_error_table[error_index].log_hcam)
1333 return;
1334
1335 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1336 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1337 "%s\n", ipr_error_table[error_index].error);
1338 } else {
1339 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1340 ipr_error_table[error_index].error);
1341 }
1342
1343 /* Set indication we have logged an error */
1344 ioa_cfg->errors_logged++;
1345
1346 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1347 return;
1348 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1349 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1350
1351 switch (hostrcb->hcam.overlay_id) {
1352 case IPR_HOST_RCB_OVERLAY_ID_2:
1353 ipr_log_cache_error(ioa_cfg, hostrcb);
1354 break;
1355 case IPR_HOST_RCB_OVERLAY_ID_3:
1356 ipr_log_config_error(ioa_cfg, hostrcb);
1357 break;
1358 case IPR_HOST_RCB_OVERLAY_ID_4:
1359 case IPR_HOST_RCB_OVERLAY_ID_6:
1360 ipr_log_array_error(ioa_cfg, hostrcb);
1361 break;
1362 case IPR_HOST_RCB_OVERLAY_ID_7:
1363 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1364 break;
1365 case IPR_HOST_RCB_OVERLAY_ID_12:
1366 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1367 break;
1368 case IPR_HOST_RCB_OVERLAY_ID_13:
1369 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1370 break;
1371 case IPR_HOST_RCB_OVERLAY_ID_14:
1372 case IPR_HOST_RCB_OVERLAY_ID_16:
1373 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1374 break;
1375 case IPR_HOST_RCB_OVERLAY_ID_17:
1376 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1377 break;
1378 case IPR_HOST_RCB_OVERLAY_ID_1:
1379 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1380 default:
1381 ipr_log_generic_error(ioa_cfg, hostrcb);
1382 break;
1383 }
1384 }
1385
1386 /**
1387 * ipr_process_error - Op done function for an adapter error log.
1388 * @ipr_cmd: ipr command struct
1389 *
1390 * This function is the op done function for an error log host
1391 * controlled async from the adapter. It will log the error and
1392 * send the HCAM back to the adapter.
1393 *
1394 * Return value:
1395 * none
1396 **/
1397 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1398 {
1399 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1400 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1401 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1402
1403 list_del(&hostrcb->queue);
1404 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1405
1406 if (!ioasc) {
1407 ipr_handle_log_data(ioa_cfg, hostrcb);
1408 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1409 dev_err(&ioa_cfg->pdev->dev,
1410 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1411 }
1412
1413 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1414 }
1415
1416 /**
1417 * ipr_timeout - An internally generated op has timed out.
1418 * @ipr_cmd: ipr command struct
1419 *
1420 * This function blocks host requests and initiates an
1421 * adapter reset.
1422 *
1423 * Return value:
1424 * none
1425 **/
1426 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1427 {
1428 unsigned long lock_flags = 0;
1429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1430
1431 ENTER;
1432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1433
1434 ioa_cfg->errors_logged++;
1435 dev_err(&ioa_cfg->pdev->dev,
1436 "Adapter being reset due to command timeout.\n");
1437
1438 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1439 ioa_cfg->sdt_state = GET_DUMP;
1440
1441 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1442 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1443
1444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1445 LEAVE;
1446 }
1447
1448 /**
1449 * ipr_oper_timeout - Adapter timed out transitioning to operational
1450 * @ipr_cmd: ipr command struct
1451 *
1452 * This function blocks host requests and initiates an
1453 * adapter reset.
1454 *
1455 * Return value:
1456 * none
1457 **/
1458 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1459 {
1460 unsigned long lock_flags = 0;
1461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1462
1463 ENTER;
1464 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1465
1466 ioa_cfg->errors_logged++;
1467 dev_err(&ioa_cfg->pdev->dev,
1468 "Adapter timed out transitioning to operational.\n");
1469
1470 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1471 ioa_cfg->sdt_state = GET_DUMP;
1472
1473 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1474 if (ipr_fastfail)
1475 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1476 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1477 }
1478
1479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1480 LEAVE;
1481 }
1482
1483 /**
1484 * ipr_reset_reload - Reset/Reload the IOA
1485 * @ioa_cfg: ioa config struct
1486 * @shutdown_type: shutdown type
1487 *
1488 * This function resets the adapter and re-initializes it.
1489 * This function assumes that all new host commands have been stopped.
1490 * Return value:
1491 * SUCCESS / FAILED
1492 **/
1493 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1494 enum ipr_shutdown_type shutdown_type)
1495 {
1496 if (!ioa_cfg->in_reset_reload)
1497 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1498
1499 spin_unlock_irq(ioa_cfg->host->host_lock);
1500 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1501 spin_lock_irq(ioa_cfg->host->host_lock);
1502
1503 /* If we got hit with a host reset while we were already resetting
1504 the adapter for some reason, and the reset failed. */
1505 if (ioa_cfg->ioa_is_dead) {
1506 ipr_trace;
1507 return FAILED;
1508 }
1509
1510 return SUCCESS;
1511 }
1512
1513 /**
1514 * ipr_find_ses_entry - Find matching SES in SES table
1515 * @res: resource entry struct of SES
1516 *
1517 * Return value:
1518 * pointer to SES table entry / NULL on failure
1519 **/
1520 static const struct ipr_ses_table_entry *
1521 ipr_find_ses_entry(struct ipr_resource_entry *res)
1522 {
1523 int i, j, matches;
1524 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1525
1526 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1527 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1528 if (ste->compare_product_id_byte[j] == 'X') {
1529 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1530 matches++;
1531 else
1532 break;
1533 } else
1534 matches++;
1535 }
1536
1537 if (matches == IPR_PROD_ID_LEN)
1538 return ste;
1539 }
1540
1541 return NULL;
1542 }
1543
1544 /**
1545 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1546 * @ioa_cfg: ioa config struct
1547 * @bus: SCSI bus
1548 * @bus_width: bus width
1549 *
1550 * Return value:
1551 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1552 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1553 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1554 * max 160MHz = max 320MB/sec).
1555 **/
1556 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1557 {
1558 struct ipr_resource_entry *res;
1559 const struct ipr_ses_table_entry *ste;
1560 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1561
1562 /* Loop through each config table entry in the config table buffer */
1563 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1564 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1565 continue;
1566
1567 if (bus != res->cfgte.res_addr.bus)
1568 continue;
1569
1570 if (!(ste = ipr_find_ses_entry(res)))
1571 continue;
1572
1573 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1574 }
1575
1576 return max_xfer_rate;
1577 }
1578
1579 /**
1580 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1581 * @ioa_cfg: ioa config struct
1582 * @max_delay: max delay in micro-seconds to wait
1583 *
1584 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1585 *
1586 * Return value:
1587 * 0 on success / other on failure
1588 **/
1589 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1590 {
1591 volatile u32 pcii_reg;
1592 int delay = 1;
1593
1594 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1595 while (delay < max_delay) {
1596 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1597
1598 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1599 return 0;
1600
1601 /* udelay cannot be used if delay is more than a few milliseconds */
1602 if ((delay / 1000) > MAX_UDELAY_MS)
1603 mdelay(delay / 1000);
1604 else
1605 udelay(delay);
1606
1607 delay += delay;
1608 }
1609 return -EIO;
1610 }
1611
1612 /**
1613 * ipr_get_ldump_data_section - Dump IOA memory
1614 * @ioa_cfg: ioa config struct
1615 * @start_addr: adapter address to dump
1616 * @dest: destination kernel buffer
1617 * @length_in_words: length to dump in 4 byte words
1618 *
1619 * Return value:
1620 * 0 on success / -EIO on failure
1621 **/
1622 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1623 u32 start_addr,
1624 __be32 *dest, u32 length_in_words)
1625 {
1626 volatile u32 temp_pcii_reg;
1627 int i, delay = 0;
1628
1629 /* Write IOA interrupt reg starting LDUMP state */
1630 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1631 ioa_cfg->regs.set_uproc_interrupt_reg);
1632
1633 /* Wait for IO debug acknowledge */
1634 if (ipr_wait_iodbg_ack(ioa_cfg,
1635 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1636 dev_err(&ioa_cfg->pdev->dev,
1637 "IOA dump long data transfer timeout\n");
1638 return -EIO;
1639 }
1640
1641 /* Signal LDUMP interlocked - clear IO debug ack */
1642 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1643 ioa_cfg->regs.clr_interrupt_reg);
1644
1645 /* Write Mailbox with starting address */
1646 writel(start_addr, ioa_cfg->ioa_mailbox);
1647
1648 /* Signal address valid - clear IOA Reset alert */
1649 writel(IPR_UPROCI_RESET_ALERT,
1650 ioa_cfg->regs.clr_uproc_interrupt_reg);
1651
1652 for (i = 0; i < length_in_words; i++) {
1653 /* Wait for IO debug acknowledge */
1654 if (ipr_wait_iodbg_ack(ioa_cfg,
1655 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1656 dev_err(&ioa_cfg->pdev->dev,
1657 "IOA dump short data transfer timeout\n");
1658 return -EIO;
1659 }
1660
1661 /* Read data from mailbox and increment destination pointer */
1662 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1663 dest++;
1664
1665 /* For all but the last word of data, signal data received */
1666 if (i < (length_in_words - 1)) {
1667 /* Signal dump data received - Clear IO debug Ack */
1668 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1669 ioa_cfg->regs.clr_interrupt_reg);
1670 }
1671 }
1672
1673 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1674 writel(IPR_UPROCI_RESET_ALERT,
1675 ioa_cfg->regs.set_uproc_interrupt_reg);
1676
1677 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1678 ioa_cfg->regs.clr_uproc_interrupt_reg);
1679
1680 /* Signal dump data received - Clear IO debug Ack */
1681 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1682 ioa_cfg->regs.clr_interrupt_reg);
1683
1684 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1685 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1686 temp_pcii_reg =
1687 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1688
1689 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1690 return 0;
1691
1692 udelay(10);
1693 delay += 10;
1694 }
1695
1696 return 0;
1697 }
1698
1699 #ifdef CONFIG_SCSI_IPR_DUMP
1700 /**
1701 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1702 * @ioa_cfg: ioa config struct
1703 * @pci_address: adapter address
1704 * @length: length of data to copy
1705 *
1706 * Copy data from PCI adapter to kernel buffer.
1707 * Note: length MUST be a 4 byte multiple
1708 * Return value:
1709 * 0 on success / other on failure
1710 **/
1711 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1712 unsigned long pci_address, u32 length)
1713 {
1714 int bytes_copied = 0;
1715 int cur_len, rc, rem_len, rem_page_len;
1716 __be32 *page;
1717 unsigned long lock_flags = 0;
1718 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1719
1720 while (bytes_copied < length &&
1721 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1722 if (ioa_dump->page_offset >= PAGE_SIZE ||
1723 ioa_dump->page_offset == 0) {
1724 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1725
1726 if (!page) {
1727 ipr_trace;
1728 return bytes_copied;
1729 }
1730
1731 ioa_dump->page_offset = 0;
1732 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1733 ioa_dump->next_page_index++;
1734 } else
1735 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1736
1737 rem_len = length - bytes_copied;
1738 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1739 cur_len = min(rem_len, rem_page_len);
1740
1741 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1742 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1743 rc = -EIO;
1744 } else {
1745 rc = ipr_get_ldump_data_section(ioa_cfg,
1746 pci_address + bytes_copied,
1747 &page[ioa_dump->page_offset / 4],
1748 (cur_len / sizeof(u32)));
1749 }
1750 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1751
1752 if (!rc) {
1753 ioa_dump->page_offset += cur_len;
1754 bytes_copied += cur_len;
1755 } else {
1756 ipr_trace;
1757 break;
1758 }
1759 schedule();
1760 }
1761
1762 return bytes_copied;
1763 }
1764
1765 /**
1766 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1767 * @hdr: dump entry header struct
1768 *
1769 * Return value:
1770 * nothing
1771 **/
1772 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1773 {
1774 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1775 hdr->num_elems = 1;
1776 hdr->offset = sizeof(*hdr);
1777 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1778 }
1779
1780 /**
1781 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1782 * @ioa_cfg: ioa config struct
1783 * @driver_dump: driver dump struct
1784 *
1785 * Return value:
1786 * nothing
1787 **/
1788 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1789 struct ipr_driver_dump *driver_dump)
1790 {
1791 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1792
1793 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1794 driver_dump->ioa_type_entry.hdr.len =
1795 sizeof(struct ipr_dump_ioa_type_entry) -
1796 sizeof(struct ipr_dump_entry_header);
1797 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1798 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1799 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1800 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1801 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1802 ucode_vpd->minor_release[1];
1803 driver_dump->hdr.num_entries++;
1804 }
1805
1806 /**
1807 * ipr_dump_version_data - Fill in the driver version in the dump.
1808 * @ioa_cfg: ioa config struct
1809 * @driver_dump: driver dump struct
1810 *
1811 * Return value:
1812 * nothing
1813 **/
1814 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1815 struct ipr_driver_dump *driver_dump)
1816 {
1817 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1818 driver_dump->version_entry.hdr.len =
1819 sizeof(struct ipr_dump_version_entry) -
1820 sizeof(struct ipr_dump_entry_header);
1821 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1822 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1823 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1824 driver_dump->hdr.num_entries++;
1825 }
1826
1827 /**
1828 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1829 * @ioa_cfg: ioa config struct
1830 * @driver_dump: driver dump struct
1831 *
1832 * Return value:
1833 * nothing
1834 **/
1835 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1836 struct ipr_driver_dump *driver_dump)
1837 {
1838 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1839 driver_dump->trace_entry.hdr.len =
1840 sizeof(struct ipr_dump_trace_entry) -
1841 sizeof(struct ipr_dump_entry_header);
1842 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1843 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1844 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1845 driver_dump->hdr.num_entries++;
1846 }
1847
1848 /**
1849 * ipr_dump_location_data - Fill in the IOA location in the dump.
1850 * @ioa_cfg: ioa config struct
1851 * @driver_dump: driver dump struct
1852 *
1853 * Return value:
1854 * nothing
1855 **/
1856 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1857 struct ipr_driver_dump *driver_dump)
1858 {
1859 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1860 driver_dump->location_entry.hdr.len =
1861 sizeof(struct ipr_dump_location_entry) -
1862 sizeof(struct ipr_dump_entry_header);
1863 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1864 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1865 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1866 driver_dump->hdr.num_entries++;
1867 }
1868
1869 /**
1870 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1871 * @ioa_cfg: ioa config struct
1872 * @dump: dump struct
1873 *
1874 * Return value:
1875 * nothing
1876 **/
1877 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1878 {
1879 unsigned long start_addr, sdt_word;
1880 unsigned long lock_flags = 0;
1881 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1882 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1883 u32 num_entries, start_off, end_off;
1884 u32 bytes_to_copy, bytes_copied, rc;
1885 struct ipr_sdt *sdt;
1886 int i;
1887
1888 ENTER;
1889
1890 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1891
1892 if (ioa_cfg->sdt_state != GET_DUMP) {
1893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1894 return;
1895 }
1896
1897 start_addr = readl(ioa_cfg->ioa_mailbox);
1898
1899 if (!ipr_sdt_is_fmt2(start_addr)) {
1900 dev_err(&ioa_cfg->pdev->dev,
1901 "Invalid dump table format: %lx\n", start_addr);
1902 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1903 return;
1904 }
1905
1906 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1907
1908 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1909
1910 /* Initialize the overall dump header */
1911 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1912 driver_dump->hdr.num_entries = 1;
1913 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1914 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1915 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1916 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1917
1918 ipr_dump_version_data(ioa_cfg, driver_dump);
1919 ipr_dump_location_data(ioa_cfg, driver_dump);
1920 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1921 ipr_dump_trace_data(ioa_cfg, driver_dump);
1922
1923 /* Update dump_header */
1924 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1925
1926 /* IOA Dump entry */
1927 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1928 ioa_dump->format = IPR_SDT_FMT2;
1929 ioa_dump->hdr.len = 0;
1930 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1931 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1932
1933 /* First entries in sdt are actually a list of dump addresses and
1934 lengths to gather the real dump data. sdt represents the pointer
1935 to the ioa generated dump table. Dump data will be extracted based
1936 on entries in this table */
1937 sdt = &ioa_dump->sdt;
1938
1939 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1940 sizeof(struct ipr_sdt) / sizeof(__be32));
1941
1942 /* Smart Dump table is ready to use and the first entry is valid */
1943 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1944 dev_err(&ioa_cfg->pdev->dev,
1945 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1946 rc, be32_to_cpu(sdt->hdr.state));
1947 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1948 ioa_cfg->sdt_state = DUMP_OBTAINED;
1949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1950 return;
1951 }
1952
1953 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1954
1955 if (num_entries > IPR_NUM_SDT_ENTRIES)
1956 num_entries = IPR_NUM_SDT_ENTRIES;
1957
1958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1959
1960 for (i = 0; i < num_entries; i++) {
1961 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1962 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1963 break;
1964 }
1965
1966 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1967 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1968 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1969 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1970
1971 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1972 bytes_to_copy = end_off - start_off;
1973 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1974 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1975 continue;
1976 }
1977
1978 /* Copy data from adapter to driver buffers */
1979 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1980 bytes_to_copy);
1981
1982 ioa_dump->hdr.len += bytes_copied;
1983
1984 if (bytes_copied != bytes_to_copy) {
1985 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1986 break;
1987 }
1988 }
1989 }
1990 }
1991
1992 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1993
1994 /* Update dump_header */
1995 driver_dump->hdr.len += ioa_dump->hdr.len;
1996 wmb();
1997 ioa_cfg->sdt_state = DUMP_OBTAINED;
1998 LEAVE;
1999 }
2000
2001 #else
2002 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2003 #endif
2004
2005 /**
2006 * ipr_release_dump - Free adapter dump memory
2007 * @kref: kref struct
2008 *
2009 * Return value:
2010 * nothing
2011 **/
2012 static void ipr_release_dump(struct kref *kref)
2013 {
2014 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2015 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2016 unsigned long lock_flags = 0;
2017 int i;
2018
2019 ENTER;
2020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2021 ioa_cfg->dump = NULL;
2022 ioa_cfg->sdt_state = INACTIVE;
2023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024
2025 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2026 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2027
2028 kfree(dump);
2029 LEAVE;
2030 }
2031
2032 /**
2033 * ipr_worker_thread - Worker thread
2034 * @data: ioa config struct
2035 *
2036 * Called at task level from a work thread. This function takes care
2037 * of adding and removing device from the mid-layer as configuration
2038 * changes are detected by the adapter.
2039 *
2040 * Return value:
2041 * nothing
2042 **/
2043 static void ipr_worker_thread(void *data)
2044 {
2045 unsigned long lock_flags;
2046 struct ipr_resource_entry *res;
2047 struct scsi_device *sdev;
2048 struct ipr_dump *dump;
2049 struct ipr_ioa_cfg *ioa_cfg = data;
2050 u8 bus, target, lun;
2051 int did_work;
2052
2053 ENTER;
2054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055
2056 if (ioa_cfg->sdt_state == GET_DUMP) {
2057 dump = ioa_cfg->dump;
2058 if (!dump) {
2059 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2060 return;
2061 }
2062 kref_get(&dump->kref);
2063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2064 ipr_get_ioa_dump(ioa_cfg, dump);
2065 kref_put(&dump->kref, ipr_release_dump);
2066
2067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2068 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2069 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2071 return;
2072 }
2073
2074 restart:
2075 do {
2076 did_work = 0;
2077 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2078 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2079 return;
2080 }
2081
2082 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2083 if (res->del_from_ml && res->sdev) {
2084 did_work = 1;
2085 sdev = res->sdev;
2086 if (!scsi_device_get(sdev)) {
2087 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2089 scsi_remove_device(sdev);
2090 scsi_device_put(sdev);
2091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 }
2093 break;
2094 }
2095 }
2096 } while(did_work);
2097
2098 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2099 if (res->add_to_ml) {
2100 bus = res->cfgte.res_addr.bus;
2101 target = res->cfgte.res_addr.target;
2102 lun = res->cfgte.res_addr.lun;
2103 res->add_to_ml = 0;
2104 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105 scsi_add_device(ioa_cfg->host, bus, target, lun);
2106 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2107 goto restart;
2108 }
2109 }
2110
2111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2113 LEAVE;
2114 }
2115
2116 #ifdef CONFIG_SCSI_IPR_TRACE
2117 /**
2118 * ipr_read_trace - Dump the adapter trace
2119 * @kobj: kobject struct
2120 * @buf: buffer
2121 * @off: offset
2122 * @count: buffer size
2123 *
2124 * Return value:
2125 * number of bytes printed to buffer
2126 **/
2127 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2128 loff_t off, size_t count)
2129 {
2130 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2131 struct Scsi_Host *shost = class_to_shost(cdev);
2132 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2133 unsigned long lock_flags = 0;
2134 int size = IPR_TRACE_SIZE;
2135 char *src = (char *)ioa_cfg->trace;
2136
2137 if (off > size)
2138 return 0;
2139 if (off + count > size) {
2140 size -= off;
2141 count = size;
2142 }
2143
2144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2145 memcpy(buf, &src[off], count);
2146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2147 return count;
2148 }
2149
2150 static struct bin_attribute ipr_trace_attr = {
2151 .attr = {
2152 .name = "trace",
2153 .mode = S_IRUGO,
2154 },
2155 .size = 0,
2156 .read = ipr_read_trace,
2157 };
2158 #endif
2159
2160 static const struct {
2161 enum ipr_cache_state state;
2162 char *name;
2163 } cache_state [] = {
2164 { CACHE_NONE, "none" },
2165 { CACHE_DISABLED, "disabled" },
2166 { CACHE_ENABLED, "enabled" }
2167 };
2168
2169 /**
2170 * ipr_show_write_caching - Show the write caching attribute
2171 * @class_dev: class device struct
2172 * @buf: buffer
2173 *
2174 * Return value:
2175 * number of bytes printed to buffer
2176 **/
2177 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2178 {
2179 struct Scsi_Host *shost = class_to_shost(class_dev);
2180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2181 unsigned long lock_flags = 0;
2182 int i, len = 0;
2183
2184 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2185 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2186 if (cache_state[i].state == ioa_cfg->cache_state) {
2187 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2188 break;
2189 }
2190 }
2191 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2192 return len;
2193 }
2194
2195
2196 /**
2197 * ipr_store_write_caching - Enable/disable adapter write cache
2198 * @class_dev: class_device struct
2199 * @buf: buffer
2200 * @count: buffer size
2201 *
2202 * This function will enable/disable adapter write cache.
2203 *
2204 * Return value:
2205 * count on success / other on failure
2206 **/
2207 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2208 const char *buf, size_t count)
2209 {
2210 struct Scsi_Host *shost = class_to_shost(class_dev);
2211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2212 unsigned long lock_flags = 0;
2213 enum ipr_cache_state new_state = CACHE_INVALID;
2214 int i;
2215
2216 if (!capable(CAP_SYS_ADMIN))
2217 return -EACCES;
2218 if (ioa_cfg->cache_state == CACHE_NONE)
2219 return -EINVAL;
2220
2221 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2222 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2223 new_state = cache_state[i].state;
2224 break;
2225 }
2226 }
2227
2228 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2229 return -EINVAL;
2230
2231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2232 if (ioa_cfg->cache_state == new_state) {
2233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2234 return count;
2235 }
2236
2237 ioa_cfg->cache_state = new_state;
2238 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2239 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2240 if (!ioa_cfg->in_reset_reload)
2241 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2243 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2244
2245 return count;
2246 }
2247
2248 static struct class_device_attribute ipr_ioa_cache_attr = {
2249 .attr = {
2250 .name = "write_cache",
2251 .mode = S_IRUGO | S_IWUSR,
2252 },
2253 .show = ipr_show_write_caching,
2254 .store = ipr_store_write_caching
2255 };
2256
2257 /**
2258 * ipr_show_fw_version - Show the firmware version
2259 * @class_dev: class device struct
2260 * @buf: buffer
2261 *
2262 * Return value:
2263 * number of bytes printed to buffer
2264 **/
2265 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2266 {
2267 struct Scsi_Host *shost = class_to_shost(class_dev);
2268 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2269 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2270 unsigned long lock_flags = 0;
2271 int len;
2272
2273 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2274 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2275 ucode_vpd->major_release, ucode_vpd->card_type,
2276 ucode_vpd->minor_release[0],
2277 ucode_vpd->minor_release[1]);
2278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2279 return len;
2280 }
2281
2282 static struct class_device_attribute ipr_fw_version_attr = {
2283 .attr = {
2284 .name = "fw_version",
2285 .mode = S_IRUGO,
2286 },
2287 .show = ipr_show_fw_version,
2288 };
2289
2290 /**
2291 * ipr_show_log_level - Show the adapter's error logging level
2292 * @class_dev: class device struct
2293 * @buf: buffer
2294 *
2295 * Return value:
2296 * number of bytes printed to buffer
2297 **/
2298 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2299 {
2300 struct Scsi_Host *shost = class_to_shost(class_dev);
2301 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2302 unsigned long lock_flags = 0;
2303 int len;
2304
2305 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2306 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2308 return len;
2309 }
2310
2311 /**
2312 * ipr_store_log_level - Change the adapter's error logging level
2313 * @class_dev: class device struct
2314 * @buf: buffer
2315 *
2316 * Return value:
2317 * number of bytes printed to buffer
2318 **/
2319 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2320 const char *buf, size_t count)
2321 {
2322 struct Scsi_Host *shost = class_to_shost(class_dev);
2323 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2324 unsigned long lock_flags = 0;
2325
2326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329 return strlen(buf);
2330 }
2331
2332 static struct class_device_attribute ipr_log_level_attr = {
2333 .attr = {
2334 .name = "log_level",
2335 .mode = S_IRUGO | S_IWUSR,
2336 },
2337 .show = ipr_show_log_level,
2338 .store = ipr_store_log_level
2339 };
2340
2341 /**
2342 * ipr_store_diagnostics - IOA Diagnostics interface
2343 * @class_dev: class_device struct
2344 * @buf: buffer
2345 * @count: buffer size
2346 *
2347 * This function will reset the adapter and wait a reasonable
2348 * amount of time for any errors that the adapter might log.
2349 *
2350 * Return value:
2351 * count on success / other on failure
2352 **/
2353 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2354 const char *buf, size_t count)
2355 {
2356 struct Scsi_Host *shost = class_to_shost(class_dev);
2357 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2358 unsigned long lock_flags = 0;
2359 int rc = count;
2360
2361 if (!capable(CAP_SYS_ADMIN))
2362 return -EACCES;
2363
2364 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2366 ioa_cfg->errors_logged = 0;
2367 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2368
2369 if (ioa_cfg->in_reset_reload) {
2370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2371 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2372
2373 /* Wait for a second for any errors to be logged */
2374 msleep(1000);
2375 } else {
2376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2377 return -EIO;
2378 }
2379
2380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2381 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2382 rc = -EIO;
2383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2384
2385 return rc;
2386 }
2387
2388 static struct class_device_attribute ipr_diagnostics_attr = {
2389 .attr = {
2390 .name = "run_diagnostics",
2391 .mode = S_IWUSR,
2392 },
2393 .store = ipr_store_diagnostics
2394 };
2395
2396 /**
2397 * ipr_show_adapter_state - Show the adapter's state
2398 * @class_dev: class device struct
2399 * @buf: buffer
2400 *
2401 * Return value:
2402 * number of bytes printed to buffer
2403 **/
2404 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2405 {
2406 struct Scsi_Host *shost = class_to_shost(class_dev);
2407 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2408 unsigned long lock_flags = 0;
2409 int len;
2410
2411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2412 if (ioa_cfg->ioa_is_dead)
2413 len = snprintf(buf, PAGE_SIZE, "offline\n");
2414 else
2415 len = snprintf(buf, PAGE_SIZE, "online\n");
2416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2417 return len;
2418 }
2419
2420 /**
2421 * ipr_store_adapter_state - Change adapter state
2422 * @class_dev: class_device struct
2423 * @buf: buffer
2424 * @count: buffer size
2425 *
2426 * This function will change the adapter's state.
2427 *
2428 * Return value:
2429 * count on success / other on failure
2430 **/
2431 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2432 const char *buf, size_t count)
2433 {
2434 struct Scsi_Host *shost = class_to_shost(class_dev);
2435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2436 unsigned long lock_flags;
2437 int result = count;
2438
2439 if (!capable(CAP_SYS_ADMIN))
2440 return -EACCES;
2441
2442 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2444 ioa_cfg->ioa_is_dead = 0;
2445 ioa_cfg->reset_retries = 0;
2446 ioa_cfg->in_ioa_bringdown = 0;
2447 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2448 }
2449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2450 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2451
2452 return result;
2453 }
2454
2455 static struct class_device_attribute ipr_ioa_state_attr = {
2456 .attr = {
2457 .name = "state",
2458 .mode = S_IRUGO | S_IWUSR,
2459 },
2460 .show = ipr_show_adapter_state,
2461 .store = ipr_store_adapter_state
2462 };
2463
2464 /**
2465 * ipr_store_reset_adapter - Reset the adapter
2466 * @class_dev: class_device struct
2467 * @buf: buffer
2468 * @count: buffer size
2469 *
2470 * This function will reset the adapter.
2471 *
2472 * Return value:
2473 * count on success / other on failure
2474 **/
2475 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2476 const char *buf, size_t count)
2477 {
2478 struct Scsi_Host *shost = class_to_shost(class_dev);
2479 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2480 unsigned long lock_flags;
2481 int result = count;
2482
2483 if (!capable(CAP_SYS_ADMIN))
2484 return -EACCES;
2485
2486 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2487 if (!ioa_cfg->in_reset_reload)
2488 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2491
2492 return result;
2493 }
2494
2495 static struct class_device_attribute ipr_ioa_reset_attr = {
2496 .attr = {
2497 .name = "reset_host",
2498 .mode = S_IWUSR,
2499 },
2500 .store = ipr_store_reset_adapter
2501 };
2502
2503 /**
2504 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2505 * @buf_len: buffer length
2506 *
2507 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2508 * list to use for microcode download
2509 *
2510 * Return value:
2511 * pointer to sglist / NULL on failure
2512 **/
2513 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2514 {
2515 int sg_size, order, bsize_elem, num_elem, i, j;
2516 struct ipr_sglist *sglist;
2517 struct scatterlist *scatterlist;
2518 struct page *page;
2519
2520 /* Get the minimum size per scatter/gather element */
2521 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2522
2523 /* Get the actual size per element */
2524 order = get_order(sg_size);
2525
2526 /* Determine the actual number of bytes per element */
2527 bsize_elem = PAGE_SIZE * (1 << order);
2528
2529 /* Determine the actual number of sg entries needed */
2530 if (buf_len % bsize_elem)
2531 num_elem = (buf_len / bsize_elem) + 1;
2532 else
2533 num_elem = buf_len / bsize_elem;
2534
2535 /* Allocate a scatter/gather list for the DMA */
2536 sglist = kzalloc(sizeof(struct ipr_sglist) +
2537 (sizeof(struct scatterlist) * (num_elem - 1)),
2538 GFP_KERNEL);
2539
2540 if (sglist == NULL) {
2541 ipr_trace;
2542 return NULL;
2543 }
2544
2545 scatterlist = sglist->scatterlist;
2546
2547 sglist->order = order;
2548 sglist->num_sg = num_elem;
2549
2550 /* Allocate a bunch of sg elements */
2551 for (i = 0; i < num_elem; i++) {
2552 page = alloc_pages(GFP_KERNEL, order);
2553 if (!page) {
2554 ipr_trace;
2555
2556 /* Free up what we already allocated */
2557 for (j = i - 1; j >= 0; j--)
2558 __free_pages(scatterlist[j].page, order);
2559 kfree(sglist);
2560 return NULL;
2561 }
2562
2563 scatterlist[i].page = page;
2564 }
2565
2566 return sglist;
2567 }
2568
2569 /**
2570 * ipr_free_ucode_buffer - Frees a microcode download buffer
2571 * @p_dnld: scatter/gather list pointer
2572 *
2573 * Free a DMA'able ucode download buffer previously allocated with
2574 * ipr_alloc_ucode_buffer
2575 *
2576 * Return value:
2577 * nothing
2578 **/
2579 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2580 {
2581 int i;
2582
2583 for (i = 0; i < sglist->num_sg; i++)
2584 __free_pages(sglist->scatterlist[i].page, sglist->order);
2585
2586 kfree(sglist);
2587 }
2588
2589 /**
2590 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2591 * @sglist: scatter/gather list pointer
2592 * @buffer: buffer pointer
2593 * @len: buffer length
2594 *
2595 * Copy a microcode image from a user buffer into a buffer allocated by
2596 * ipr_alloc_ucode_buffer
2597 *
2598 * Return value:
2599 * 0 on success / other on failure
2600 **/
2601 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2602 u8 *buffer, u32 len)
2603 {
2604 int bsize_elem, i, result = 0;
2605 struct scatterlist *scatterlist;
2606 void *kaddr;
2607
2608 /* Determine the actual number of bytes per element */
2609 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2610
2611 scatterlist = sglist->scatterlist;
2612
2613 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2614 kaddr = kmap(scatterlist[i].page);
2615 memcpy(kaddr, buffer, bsize_elem);
2616 kunmap(scatterlist[i].page);
2617
2618 scatterlist[i].length = bsize_elem;
2619
2620 if (result != 0) {
2621 ipr_trace;
2622 return result;
2623 }
2624 }
2625
2626 if (len % bsize_elem) {
2627 kaddr = kmap(scatterlist[i].page);
2628 memcpy(kaddr, buffer, len % bsize_elem);
2629 kunmap(scatterlist[i].page);
2630
2631 scatterlist[i].length = len % bsize_elem;
2632 }
2633
2634 sglist->buffer_len = len;
2635 return result;
2636 }
2637
2638 /**
2639 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2640 * @ipr_cmd: ipr command struct
2641 * @sglist: scatter/gather list
2642 *
2643 * Builds a microcode download IOA data list (IOADL).
2644 *
2645 **/
2646 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2647 struct ipr_sglist *sglist)
2648 {
2649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2650 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2651 struct scatterlist *scatterlist = sglist->scatterlist;
2652 int i;
2653
2654 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2655 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2656 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2657 ioarcb->write_ioadl_len =
2658 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2659
2660 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2661 ioadl[i].flags_and_data_len =
2662 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2663 ioadl[i].address =
2664 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2665 }
2666
2667 ioadl[i-1].flags_and_data_len |=
2668 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2669 }
2670
2671 /**
2672 * ipr_update_ioa_ucode - Update IOA's microcode
2673 * @ioa_cfg: ioa config struct
2674 * @sglist: scatter/gather list
2675 *
2676 * Initiate an adapter reset to update the IOA's microcode
2677 *
2678 * Return value:
2679 * 0 on success / -EIO on failure
2680 **/
2681 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2682 struct ipr_sglist *sglist)
2683 {
2684 unsigned long lock_flags;
2685
2686 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687
2688 if (ioa_cfg->ucode_sglist) {
2689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2690 dev_err(&ioa_cfg->pdev->dev,
2691 "Microcode download already in progress\n");
2692 return -EIO;
2693 }
2694
2695 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2696 sglist->num_sg, DMA_TO_DEVICE);
2697
2698 if (!sglist->num_dma_sg) {
2699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2700 dev_err(&ioa_cfg->pdev->dev,
2701 "Failed to map microcode download buffer!\n");
2702 return -EIO;
2703 }
2704
2705 ioa_cfg->ucode_sglist = sglist;
2706 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2709
2710 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2711 ioa_cfg->ucode_sglist = NULL;
2712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 return 0;
2714 }
2715
2716 /**
2717 * ipr_store_update_fw - Update the firmware on the adapter
2718 * @class_dev: class_device struct
2719 * @buf: buffer
2720 * @count: buffer size
2721 *
2722 * This function will update the firmware on the adapter.
2723 *
2724 * Return value:
2725 * count on success / other on failure
2726 **/
2727 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2728 const char *buf, size_t count)
2729 {
2730 struct Scsi_Host *shost = class_to_shost(class_dev);
2731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2732 struct ipr_ucode_image_header *image_hdr;
2733 const struct firmware *fw_entry;
2734 struct ipr_sglist *sglist;
2735 char fname[100];
2736 char *src;
2737 int len, result, dnld_size;
2738
2739 if (!capable(CAP_SYS_ADMIN))
2740 return -EACCES;
2741
2742 len = snprintf(fname, 99, "%s", buf);
2743 fname[len-1] = '\0';
2744
2745 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2746 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2747 return -EIO;
2748 }
2749
2750 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2751
2752 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2753 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2754 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2755 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2756 release_firmware(fw_entry);
2757 return -EINVAL;
2758 }
2759
2760 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2761 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2762 sglist = ipr_alloc_ucode_buffer(dnld_size);
2763
2764 if (!sglist) {
2765 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2766 release_firmware(fw_entry);
2767 return -ENOMEM;
2768 }
2769
2770 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2771
2772 if (result) {
2773 dev_err(&ioa_cfg->pdev->dev,
2774 "Microcode buffer copy to DMA buffer failed\n");
2775 goto out;
2776 }
2777
2778 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2779
2780 if (!result)
2781 result = count;
2782 out:
2783 ipr_free_ucode_buffer(sglist);
2784 release_firmware(fw_entry);
2785 return result;
2786 }
2787
2788 static struct class_device_attribute ipr_update_fw_attr = {
2789 .attr = {
2790 .name = "update_fw",
2791 .mode = S_IWUSR,
2792 },
2793 .store = ipr_store_update_fw
2794 };
2795
2796 static struct class_device_attribute *ipr_ioa_attrs[] = {
2797 &ipr_fw_version_attr,
2798 &ipr_log_level_attr,
2799 &ipr_diagnostics_attr,
2800 &ipr_ioa_state_attr,
2801 &ipr_ioa_reset_attr,
2802 &ipr_update_fw_attr,
2803 &ipr_ioa_cache_attr,
2804 NULL,
2805 };
2806
2807 #ifdef CONFIG_SCSI_IPR_DUMP
2808 /**
2809 * ipr_read_dump - Dump the adapter
2810 * @kobj: kobject struct
2811 * @buf: buffer
2812 * @off: offset
2813 * @count: buffer size
2814 *
2815 * Return value:
2816 * number of bytes printed to buffer
2817 **/
2818 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2819 loff_t off, size_t count)
2820 {
2821 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2822 struct Scsi_Host *shost = class_to_shost(cdev);
2823 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2824 struct ipr_dump *dump;
2825 unsigned long lock_flags = 0;
2826 char *src;
2827 int len;
2828 size_t rc = count;
2829
2830 if (!capable(CAP_SYS_ADMIN))
2831 return -EACCES;
2832
2833 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2834 dump = ioa_cfg->dump;
2835
2836 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2837 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2838 return 0;
2839 }
2840 kref_get(&dump->kref);
2841 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2842
2843 if (off > dump->driver_dump.hdr.len) {
2844 kref_put(&dump->kref, ipr_release_dump);
2845 return 0;
2846 }
2847
2848 if (off + count > dump->driver_dump.hdr.len) {
2849 count = dump->driver_dump.hdr.len - off;
2850 rc = count;
2851 }
2852
2853 if (count && off < sizeof(dump->driver_dump)) {
2854 if (off + count > sizeof(dump->driver_dump))
2855 len = sizeof(dump->driver_dump) - off;
2856 else
2857 len = count;
2858 src = (u8 *)&dump->driver_dump + off;
2859 memcpy(buf, src, len);
2860 buf += len;
2861 off += len;
2862 count -= len;
2863 }
2864
2865 off -= sizeof(dump->driver_dump);
2866
2867 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2868 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2869 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2870 else
2871 len = count;
2872 src = (u8 *)&dump->ioa_dump + off;
2873 memcpy(buf, src, len);
2874 buf += len;
2875 off += len;
2876 count -= len;
2877 }
2878
2879 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2880
2881 while (count) {
2882 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2883 len = PAGE_ALIGN(off) - off;
2884 else
2885 len = count;
2886 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2887 src += off & ~PAGE_MASK;
2888 memcpy(buf, src, len);
2889 buf += len;
2890 off += len;
2891 count -= len;
2892 }
2893
2894 kref_put(&dump->kref, ipr_release_dump);
2895 return rc;
2896 }
2897
2898 /**
2899 * ipr_alloc_dump - Prepare for adapter dump
2900 * @ioa_cfg: ioa config struct
2901 *
2902 * Return value:
2903 * 0 on success / other on failure
2904 **/
2905 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2906 {
2907 struct ipr_dump *dump;
2908 unsigned long lock_flags = 0;
2909
2910 ENTER;
2911 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2912
2913 if (!dump) {
2914 ipr_err("Dump memory allocation failed\n");
2915 return -ENOMEM;
2916 }
2917
2918 kref_init(&dump->kref);
2919 dump->ioa_cfg = ioa_cfg;
2920
2921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2922
2923 if (INACTIVE != ioa_cfg->sdt_state) {
2924 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2925 kfree(dump);
2926 return 0;
2927 }
2928
2929 ioa_cfg->dump = dump;
2930 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2931 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2932 ioa_cfg->dump_taken = 1;
2933 schedule_work(&ioa_cfg->work_q);
2934 }
2935 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2936
2937 LEAVE;
2938 return 0;
2939 }
2940
2941 /**
2942 * ipr_free_dump - Free adapter dump memory
2943 * @ioa_cfg: ioa config struct
2944 *
2945 * Return value:
2946 * 0 on success / other on failure
2947 **/
2948 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2949 {
2950 struct ipr_dump *dump;
2951 unsigned long lock_flags = 0;
2952
2953 ENTER;
2954
2955 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2956 dump = ioa_cfg->dump;
2957 if (!dump) {
2958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2959 return 0;
2960 }
2961
2962 ioa_cfg->dump = NULL;
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964
2965 kref_put(&dump->kref, ipr_release_dump);
2966
2967 LEAVE;
2968 return 0;
2969 }
2970
2971 /**
2972 * ipr_write_dump - Setup dump state of adapter
2973 * @kobj: kobject struct
2974 * @buf: buffer
2975 * @off: offset
2976 * @count: buffer size
2977 *
2978 * Return value:
2979 * number of bytes printed to buffer
2980 **/
2981 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2982 loff_t off, size_t count)
2983 {
2984 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2985 struct Scsi_Host *shost = class_to_shost(cdev);
2986 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2987 int rc;
2988
2989 if (!capable(CAP_SYS_ADMIN))
2990 return -EACCES;
2991
2992 if (buf[0] == '1')
2993 rc = ipr_alloc_dump(ioa_cfg);
2994 else if (buf[0] == '0')
2995 rc = ipr_free_dump(ioa_cfg);
2996 else
2997 return -EINVAL;
2998
2999 if (rc)
3000 return rc;
3001 else
3002 return count;
3003 }
3004
3005 static struct bin_attribute ipr_dump_attr = {
3006 .attr = {
3007 .name = "dump",
3008 .mode = S_IRUSR | S_IWUSR,
3009 },
3010 .size = 0,
3011 .read = ipr_read_dump,
3012 .write = ipr_write_dump
3013 };
3014 #else
3015 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3016 #endif
3017
3018 /**
3019 * ipr_change_queue_depth - Change the device's queue depth
3020 * @sdev: scsi device struct
3021 * @qdepth: depth to set
3022 *
3023 * Return value:
3024 * actual depth set
3025 **/
3026 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3027 {
3028 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3029 return sdev->queue_depth;
3030 }
3031
3032 /**
3033 * ipr_change_queue_type - Change the device's queue type
3034 * @dsev: scsi device struct
3035 * @tag_type: type of tags to use
3036 *
3037 * Return value:
3038 * actual queue type set
3039 **/
3040 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3041 {
3042 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3043 struct ipr_resource_entry *res;
3044 unsigned long lock_flags = 0;
3045
3046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3047 res = (struct ipr_resource_entry *)sdev->hostdata;
3048
3049 if (res) {
3050 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3051 /*
3052 * We don't bother quiescing the device here since the
3053 * adapter firmware does it for us.
3054 */
3055 scsi_set_tag_type(sdev, tag_type);
3056
3057 if (tag_type)
3058 scsi_activate_tcq(sdev, sdev->queue_depth);
3059 else
3060 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3061 } else
3062 tag_type = 0;
3063 } else
3064 tag_type = 0;
3065
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 return tag_type;
3068 }
3069
3070 /**
3071 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3072 * @dev: device struct
3073 * @buf: buffer
3074 *
3075 * Return value:
3076 * number of bytes printed to buffer
3077 **/
3078 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3079 {
3080 struct scsi_device *sdev = to_scsi_device(dev);
3081 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3082 struct ipr_resource_entry *res;
3083 unsigned long lock_flags = 0;
3084 ssize_t len = -ENXIO;
3085
3086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3087 res = (struct ipr_resource_entry *)sdev->hostdata;
3088 if (res)
3089 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091 return len;
3092 }
3093
3094 static struct device_attribute ipr_adapter_handle_attr = {
3095 .attr = {
3096 .name = "adapter_handle",
3097 .mode = S_IRUSR,
3098 },
3099 .show = ipr_show_adapter_handle
3100 };
3101
3102 static struct device_attribute *ipr_dev_attrs[] = {
3103 &ipr_adapter_handle_attr,
3104 NULL,
3105 };
3106
3107 /**
3108 * ipr_biosparam - Return the HSC mapping
3109 * @sdev: scsi device struct
3110 * @block_device: block device pointer
3111 * @capacity: capacity of the device
3112 * @parm: Array containing returned HSC values.
3113 *
3114 * This function generates the HSC parms that fdisk uses.
3115 * We want to make sure we return something that places partitions
3116 * on 4k boundaries for best performance with the IOA.
3117 *
3118 * Return value:
3119 * 0 on success
3120 **/
3121 static int ipr_biosparam(struct scsi_device *sdev,
3122 struct block_device *block_device,
3123 sector_t capacity, int *parm)
3124 {
3125 int heads, sectors;
3126 sector_t cylinders;
3127
3128 heads = 128;
3129 sectors = 32;
3130
3131 cylinders = capacity;
3132 sector_div(cylinders, (128 * 32));
3133
3134 /* return result */
3135 parm[0] = heads;
3136 parm[1] = sectors;
3137 parm[2] = cylinders;
3138
3139 return 0;
3140 }
3141
3142 /**
3143 * ipr_slave_destroy - Unconfigure a SCSI device
3144 * @sdev: scsi device struct
3145 *
3146 * Return value:
3147 * nothing
3148 **/
3149 static void ipr_slave_destroy(struct scsi_device *sdev)
3150 {
3151 struct ipr_resource_entry *res;
3152 struct ipr_ioa_cfg *ioa_cfg;
3153 unsigned long lock_flags = 0;
3154
3155 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3156
3157 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3158 res = (struct ipr_resource_entry *) sdev->hostdata;
3159 if (res) {
3160 sdev->hostdata = NULL;
3161 res->sdev = NULL;
3162 }
3163 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3164 }
3165
3166 /**
3167 * ipr_slave_configure - Configure a SCSI device
3168 * @sdev: scsi device struct
3169 *
3170 * This function configures the specified scsi device.
3171 *
3172 * Return value:
3173 * 0 on success
3174 **/
3175 static int ipr_slave_configure(struct scsi_device *sdev)
3176 {
3177 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3178 struct ipr_resource_entry *res;
3179 unsigned long lock_flags = 0;
3180
3181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3182 res = sdev->hostdata;
3183 if (res) {
3184 if (ipr_is_af_dasd_device(res))
3185 sdev->type = TYPE_RAID;
3186 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3187 sdev->scsi_level = 4;
3188 sdev->no_uld_attach = 1;
3189 }
3190 if (ipr_is_vset_device(res)) {
3191 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3192 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3193 }
3194 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3195 sdev->allow_restart = 1;
3196 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3197 }
3198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3199 return 0;
3200 }
3201
3202 /**
3203 * ipr_slave_alloc - Prepare for commands to a device.
3204 * @sdev: scsi device struct
3205 *
3206 * This function saves a pointer to the resource entry
3207 * in the scsi device struct if the device exists. We
3208 * can then use this pointer in ipr_queuecommand when
3209 * handling new commands.
3210 *
3211 * Return value:
3212 * 0 on success / -ENXIO if device does not exist
3213 **/
3214 static int ipr_slave_alloc(struct scsi_device *sdev)
3215 {
3216 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3217 struct ipr_resource_entry *res;
3218 unsigned long lock_flags;
3219 int rc = -ENXIO;
3220
3221 sdev->hostdata = NULL;
3222
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224
3225 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3226 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3227 (res->cfgte.res_addr.target == sdev->id) &&
3228 (res->cfgte.res_addr.lun == sdev->lun)) {
3229 res->sdev = sdev;
3230 res->add_to_ml = 0;
3231 res->in_erp = 0;
3232 sdev->hostdata = res;
3233 if (!ipr_is_naca_model(res))
3234 res->needs_sync_complete = 1;
3235 rc = 0;
3236 break;
3237 }
3238 }
3239
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241
3242 return rc;
3243 }
3244
3245 /**
3246 * ipr_eh_host_reset - Reset the host adapter
3247 * @scsi_cmd: scsi command struct
3248 *
3249 * Return value:
3250 * SUCCESS / FAILED
3251 **/
3252 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3253 {
3254 struct ipr_ioa_cfg *ioa_cfg;
3255 int rc;
3256
3257 ENTER;
3258 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3259
3260 dev_err(&ioa_cfg->pdev->dev,
3261 "Adapter being reset as a result of error recovery.\n");
3262
3263 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3264 ioa_cfg->sdt_state = GET_DUMP;
3265
3266 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3267
3268 LEAVE;
3269 return rc;
3270 }
3271
3272 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3273 {
3274 int rc;
3275
3276 spin_lock_irq(cmd->device->host->host_lock);
3277 rc = __ipr_eh_host_reset(cmd);
3278 spin_unlock_irq(cmd->device->host->host_lock);
3279
3280 return rc;
3281 }
3282
3283 /**
3284 * ipr_eh_dev_reset - Reset the device
3285 * @scsi_cmd: scsi command struct
3286 *
3287 * This function issues a device reset to the affected device.
3288 * A LUN reset will be sent to the device first. If that does
3289 * not work, a target reset will be sent.
3290 *
3291 * Return value:
3292 * SUCCESS / FAILED
3293 **/
3294 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3295 {
3296 struct ipr_cmnd *ipr_cmd;
3297 struct ipr_ioa_cfg *ioa_cfg;
3298 struct ipr_resource_entry *res;
3299 struct ipr_cmd_pkt *cmd_pkt;
3300 u32 ioasc;
3301
3302 ENTER;
3303 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3304 res = scsi_cmd->device->hostdata;
3305
3306 if (!res)
3307 return FAILED;
3308
3309 /*
3310 * If we are currently going through reset/reload, return failed. This will force the
3311 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3312 * reset to complete
3313 */
3314 if (ioa_cfg->in_reset_reload)
3315 return FAILED;
3316 if (ioa_cfg->ioa_is_dead)
3317 return FAILED;
3318
3319 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3320 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3321 if (ipr_cmd->scsi_cmd)
3322 ipr_cmd->done = ipr_scsi_eh_done;
3323 }
3324 }
3325
3326 res->resetting_device = 1;
3327
3328 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3329
3330 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3331 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3332 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3333 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3334
3335 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3336 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3337
3338 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3339
3340 res->resetting_device = 0;
3341
3342 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3343
3344 LEAVE;
3345 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3346 }
3347
3348 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3349 {
3350 int rc;
3351
3352 spin_lock_irq(cmd->device->host->host_lock);
3353 rc = __ipr_eh_dev_reset(cmd);
3354 spin_unlock_irq(cmd->device->host->host_lock);
3355
3356 return rc;
3357 }
3358
3359 /**
3360 * ipr_bus_reset_done - Op done function for bus reset.
3361 * @ipr_cmd: ipr command struct
3362 *
3363 * This function is the op done function for a bus reset
3364 *
3365 * Return value:
3366 * none
3367 **/
3368 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3369 {
3370 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3371 struct ipr_resource_entry *res;
3372
3373 ENTER;
3374 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3375 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3376 sizeof(res->cfgte.res_handle))) {
3377 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3378 break;
3379 }
3380 }
3381
3382 /*
3383 * If abort has not completed, indicate the reset has, else call the
3384 * abort's done function to wake the sleeping eh thread
3385 */
3386 if (ipr_cmd->sibling->sibling)
3387 ipr_cmd->sibling->sibling = NULL;
3388 else
3389 ipr_cmd->sibling->done(ipr_cmd->sibling);
3390
3391 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3392 LEAVE;
3393 }
3394
3395 /**
3396 * ipr_abort_timeout - An abort task has timed out
3397 * @ipr_cmd: ipr command struct
3398 *
3399 * This function handles when an abort task times out. If this
3400 * happens we issue a bus reset since we have resources tied
3401 * up that must be freed before returning to the midlayer.
3402 *
3403 * Return value:
3404 * none
3405 **/
3406 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3407 {
3408 struct ipr_cmnd *reset_cmd;
3409 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3410 struct ipr_cmd_pkt *cmd_pkt;
3411 unsigned long lock_flags = 0;
3412
3413 ENTER;
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3417 return;
3418 }
3419
3420 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3421 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3422 ipr_cmd->sibling = reset_cmd;
3423 reset_cmd->sibling = ipr_cmd;
3424 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3425 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3426 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3427 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3428 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3429
3430 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432 LEAVE;
3433 }
3434
3435 /**
3436 * ipr_cancel_op - Cancel specified op
3437 * @scsi_cmd: scsi command struct
3438 *
3439 * This function cancels specified op.
3440 *
3441 * Return value:
3442 * SUCCESS / FAILED
3443 **/
3444 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3445 {
3446 struct ipr_cmnd *ipr_cmd;
3447 struct ipr_ioa_cfg *ioa_cfg;
3448 struct ipr_resource_entry *res;
3449 struct ipr_cmd_pkt *cmd_pkt;
3450 u32 ioasc;
3451 int op_found = 0;
3452
3453 ENTER;
3454 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3455 res = scsi_cmd->device->hostdata;
3456
3457 /* If we are currently going through reset/reload, return failed.
3458 * This will force the mid-layer to call ipr_eh_host_reset,
3459 * which will then go to sleep and wait for the reset to complete
3460 */
3461 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3462 return FAILED;
3463 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3464 return FAILED;
3465
3466 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3467 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3468 ipr_cmd->done = ipr_scsi_eh_done;
3469 op_found = 1;
3470 break;
3471 }
3472 }
3473
3474 if (!op_found)
3475 return SUCCESS;
3476
3477 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3478 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3479 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3480 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3481 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3482 ipr_cmd->u.sdev = scsi_cmd->device;
3483
3484 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3485 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3486 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3487
3488 /*
3489 * If the abort task timed out and we sent a bus reset, we will get
3490 * one the following responses to the abort
3491 */
3492 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3493 ioasc = 0;
3494 ipr_trace;
3495 }
3496
3497 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3498 if (!ipr_is_naca_model(res))
3499 res->needs_sync_complete = 1;
3500
3501 LEAVE;
3502 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3503 }
3504
3505 /**
3506 * ipr_eh_abort - Abort a single op
3507 * @scsi_cmd: scsi command struct
3508 *
3509 * Return value:
3510 * SUCCESS / FAILED
3511 **/
3512 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3513 {
3514 unsigned long flags;
3515 int rc;
3516
3517 ENTER;
3518
3519 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3520 rc = ipr_cancel_op(scsi_cmd);
3521 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3522
3523 LEAVE;
3524 return rc;
3525 }
3526
3527 /**
3528 * ipr_handle_other_interrupt - Handle "other" interrupts
3529 * @ioa_cfg: ioa config struct
3530 * @int_reg: interrupt register
3531 *
3532 * Return value:
3533 * IRQ_NONE / IRQ_HANDLED
3534 **/
3535 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3536 volatile u32 int_reg)
3537 {
3538 irqreturn_t rc = IRQ_HANDLED;
3539
3540 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3541 /* Mask the interrupt */
3542 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3543
3544 /* Clear the interrupt */
3545 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3547
3548 list_del(&ioa_cfg->reset_cmd->queue);
3549 del_timer(&ioa_cfg->reset_cmd->timer);
3550 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3551 } else {
3552 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3553 ioa_cfg->ioa_unit_checked = 1;
3554 else
3555 dev_err(&ioa_cfg->pdev->dev,
3556 "Permanent IOA failure. 0x%08X\n", int_reg);
3557
3558 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3559 ioa_cfg->sdt_state = GET_DUMP;
3560
3561 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3562 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3563 }
3564
3565 return rc;
3566 }
3567
3568 /**
3569 * ipr_isr - Interrupt service routine
3570 * @irq: irq number
3571 * @devp: pointer to ioa config struct
3572 * @regs: pt_regs struct
3573 *
3574 * Return value:
3575 * IRQ_NONE / IRQ_HANDLED
3576 **/
3577 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3578 {
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3580 unsigned long lock_flags = 0;
3581 volatile u32 int_reg, int_mask_reg;
3582 u32 ioasc;
3583 u16 cmd_index;
3584 struct ipr_cmnd *ipr_cmd;
3585 irqreturn_t rc = IRQ_NONE;
3586
3587 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3588
3589 /* If interrupts are disabled, ignore the interrupt */
3590 if (!ioa_cfg->allow_interrupts) {
3591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3592 return IRQ_NONE;
3593 }
3594
3595 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3596 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3597
3598 /* If an interrupt on the adapter did not occur, ignore it */
3599 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3600 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3601 return IRQ_NONE;
3602 }
3603
3604 while (1) {
3605 ipr_cmd = NULL;
3606
3607 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3608 ioa_cfg->toggle_bit) {
3609
3610 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3611 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3612
3613 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3614 ioa_cfg->errors_logged++;
3615 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3616
3617 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3618 ioa_cfg->sdt_state = GET_DUMP;
3619
3620 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 return IRQ_HANDLED;
3623 }
3624
3625 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3626
3627 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3628
3629 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3630
3631 list_del(&ipr_cmd->queue);
3632 del_timer(&ipr_cmd->timer);
3633 ipr_cmd->done(ipr_cmd);
3634
3635 rc = IRQ_HANDLED;
3636
3637 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3638 ioa_cfg->hrrq_curr++;
3639 } else {
3640 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3641 ioa_cfg->toggle_bit ^= 1u;
3642 }
3643 }
3644
3645 if (ipr_cmd != NULL) {
3646 /* Clear the PCI interrupt */
3647 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3648 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3649 } else
3650 break;
3651 }
3652
3653 if (unlikely(rc == IRQ_NONE))
3654 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3655
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 return rc;
3658 }
3659
3660 /**
3661 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3662 * @ioa_cfg: ioa config struct
3663 * @ipr_cmd: ipr command struct
3664 *
3665 * Return value:
3666 * 0 on success / -1 on failure
3667 **/
3668 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3669 struct ipr_cmnd *ipr_cmd)
3670 {
3671 int i;
3672 struct scatterlist *sglist;
3673 u32 length;
3674 u32 ioadl_flags = 0;
3675 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3676 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3677 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3678
3679 length = scsi_cmd->request_bufflen;
3680
3681 if (length == 0)
3682 return 0;
3683
3684 if (scsi_cmd->use_sg) {
3685 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3686 scsi_cmd->request_buffer,
3687 scsi_cmd->use_sg,
3688 scsi_cmd->sc_data_direction);
3689
3690 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3691 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3692 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3693 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3694 ioarcb->write_ioadl_len =
3695 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3696 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3697 ioadl_flags = IPR_IOADL_FLAGS_READ;
3698 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3699 ioarcb->read_ioadl_len =
3700 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3701 }
3702
3703 sglist = scsi_cmd->request_buffer;
3704
3705 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3706 ioadl[i].flags_and_data_len =
3707 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3708 ioadl[i].address =
3709 cpu_to_be32(sg_dma_address(&sglist[i]));
3710 }
3711
3712 if (likely(ipr_cmd->dma_use_sg)) {
3713 ioadl[i-1].flags_and_data_len |=
3714 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3715 return 0;
3716 } else
3717 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3718 } else {
3719 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3720 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3721 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3722 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3723 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3724 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3725 ioadl_flags = IPR_IOADL_FLAGS_READ;
3726 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3727 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3728 }
3729
3730 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3731 scsi_cmd->request_buffer, length,
3732 scsi_cmd->sc_data_direction);
3733
3734 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3735 ipr_cmd->dma_use_sg = 1;
3736 ioadl[0].flags_and_data_len =
3737 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3738 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3739 return 0;
3740 } else
3741 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3742 }
3743
3744 return -1;
3745 }
3746
3747 /**
3748 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3749 * @scsi_cmd: scsi command struct
3750 *
3751 * Return value:
3752 * task attributes
3753 **/
3754 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3755 {
3756 u8 tag[2];
3757 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3758
3759 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3760 switch (tag[0]) {
3761 case MSG_SIMPLE_TAG:
3762 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3763 break;
3764 case MSG_HEAD_TAG:
3765 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3766 break;
3767 case MSG_ORDERED_TAG:
3768 rc = IPR_FLAGS_LO_ORDERED_TASK;
3769 break;
3770 };
3771 }
3772
3773 return rc;
3774 }
3775
3776 /**
3777 * ipr_erp_done - Process completion of ERP for a device
3778 * @ipr_cmd: ipr command struct
3779 *
3780 * This function copies the sense buffer into the scsi_cmd
3781 * struct and pushes the scsi_done function.
3782 *
3783 * Return value:
3784 * nothing
3785 **/
3786 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3787 {
3788 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3789 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3790 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3791 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3792
3793 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3794 scsi_cmd->result |= (DID_ERROR << 16);
3795 ipr_sdev_err(scsi_cmd->device,
3796 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3797 } else {
3798 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3799 SCSI_SENSE_BUFFERSIZE);
3800 }
3801
3802 if (res) {
3803 if (!ipr_is_naca_model(res))
3804 res->needs_sync_complete = 1;
3805 res->in_erp = 0;
3806 }
3807 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3808 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3809 scsi_cmd->scsi_done(scsi_cmd);
3810 }
3811
3812 /**
3813 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3814 * @ipr_cmd: ipr command struct
3815 *
3816 * Return value:
3817 * none
3818 **/
3819 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3820 {
3821 struct ipr_ioarcb *ioarcb;
3822 struct ipr_ioasa *ioasa;
3823
3824 ioarcb = &ipr_cmd->ioarcb;
3825 ioasa = &ipr_cmd->ioasa;
3826
3827 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3828 ioarcb->write_data_transfer_length = 0;
3829 ioarcb->read_data_transfer_length = 0;
3830 ioarcb->write_ioadl_len = 0;
3831 ioarcb->read_ioadl_len = 0;
3832 ioasa->ioasc = 0;
3833 ioasa->residual_data_len = 0;
3834 }
3835
3836 /**
3837 * ipr_erp_request_sense - Send request sense to a device
3838 * @ipr_cmd: ipr command struct
3839 *
3840 * This function sends a request sense to a device as a result
3841 * of a check condition.
3842 *
3843 * Return value:
3844 * nothing
3845 **/
3846 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3847 {
3848 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3849 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3850
3851 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3852 ipr_erp_done(ipr_cmd);
3853 return;
3854 }
3855
3856 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3857
3858 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3859 cmd_pkt->cdb[0] = REQUEST_SENSE;
3860 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3861 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3862 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3863 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3864
3865 ipr_cmd->ioadl[0].flags_and_data_len =
3866 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3867 ipr_cmd->ioadl[0].address =
3868 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3869
3870 ipr_cmd->ioarcb.read_ioadl_len =
3871 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3872 ipr_cmd->ioarcb.read_data_transfer_length =
3873 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3874
3875 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3876 IPR_REQUEST_SENSE_TIMEOUT * 2);
3877 }
3878
3879 /**
3880 * ipr_erp_cancel_all - Send cancel all to a device
3881 * @ipr_cmd: ipr command struct
3882 *
3883 * This function sends a cancel all to a device to clear the
3884 * queue. If we are running TCQ on the device, QERR is set to 1,
3885 * which means all outstanding ops have been dropped on the floor.
3886 * Cancel all will return them to us.
3887 *
3888 * Return value:
3889 * nothing
3890 **/
3891 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3892 {
3893 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3894 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3895 struct ipr_cmd_pkt *cmd_pkt;
3896
3897 res->in_erp = 1;
3898
3899 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3900
3901 if (!scsi_get_tag_type(scsi_cmd->device)) {
3902 ipr_erp_request_sense(ipr_cmd);
3903 return;
3904 }
3905
3906 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3907 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3908 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3909
3910 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3911 IPR_CANCEL_ALL_TIMEOUT);
3912 }
3913
3914 /**
3915 * ipr_dump_ioasa - Dump contents of IOASA
3916 * @ioa_cfg: ioa config struct
3917 * @ipr_cmd: ipr command struct
3918 * @res: resource entry struct
3919 *
3920 * This function is invoked by the interrupt handler when ops
3921 * fail. It will log the IOASA if appropriate. Only called
3922 * for GPDD ops.
3923 *
3924 * Return value:
3925 * none
3926 **/
3927 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3928 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3929 {
3930 int i;
3931 u16 data_len;
3932 u32 ioasc;
3933 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3934 __be32 *ioasa_data = (__be32 *)ioasa;
3935 int error_index;
3936
3937 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3938
3939 if (0 == ioasc)
3940 return;
3941
3942 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3943 return;
3944
3945 error_index = ipr_get_error(ioasc);
3946
3947 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3948 /* Don't log an error if the IOA already logged one */
3949 if (ioasa->ilid != 0)
3950 return;
3951
3952 if (ipr_error_table[error_index].log_ioasa == 0)
3953 return;
3954 }
3955
3956 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3957
3958 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3959 data_len = sizeof(struct ipr_ioasa);
3960 else
3961 data_len = be16_to_cpu(ioasa->ret_stat_len);
3962
3963 ipr_err("IOASA Dump:\n");
3964
3965 for (i = 0; i < data_len / 4; i += 4) {
3966 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3967 be32_to_cpu(ioasa_data[i]),
3968 be32_to_cpu(ioasa_data[i+1]),
3969 be32_to_cpu(ioasa_data[i+2]),
3970 be32_to_cpu(ioasa_data[i+3]));
3971 }
3972 }
3973
3974 /**
3975 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3976 * @ioasa: IOASA
3977 * @sense_buf: sense data buffer
3978 *
3979 * Return value:
3980 * none
3981 **/
3982 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3983 {
3984 u32 failing_lba;
3985 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3986 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3987 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3988 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3989
3990 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3991
3992 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3993 return;
3994
3995 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3996
3997 if (ipr_is_vset_device(res) &&
3998 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3999 ioasa->u.vset.failing_lba_hi != 0) {
4000 sense_buf[0] = 0x72;
4001 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4002 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4003 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4004
4005 sense_buf[7] = 12;
4006 sense_buf[8] = 0;
4007 sense_buf[9] = 0x0A;
4008 sense_buf[10] = 0x80;
4009
4010 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4011
4012 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4013 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4014 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4015 sense_buf[15] = failing_lba & 0x000000ff;
4016
4017 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4018
4019 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4020 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4021 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4022 sense_buf[19] = failing_lba & 0x000000ff;
4023 } else {
4024 sense_buf[0] = 0x70;
4025 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4026 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4027 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4028
4029 /* Illegal request */
4030 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4031 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4032 sense_buf[7] = 10; /* additional length */
4033
4034 /* IOARCB was in error */
4035 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4036 sense_buf[15] = 0xC0;
4037 else /* Parameter data was invalid */
4038 sense_buf[15] = 0x80;
4039
4040 sense_buf[16] =
4041 ((IPR_FIELD_POINTER_MASK &
4042 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4043 sense_buf[17] =
4044 (IPR_FIELD_POINTER_MASK &
4045 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4046 } else {
4047 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4048 if (ipr_is_vset_device(res))
4049 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4050 else
4051 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4052
4053 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4054 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4055 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4056 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4057 sense_buf[6] = failing_lba & 0x000000ff;
4058 }
4059
4060 sense_buf[7] = 6; /* additional length */
4061 }
4062 }
4063 }
4064
4065 /**
4066 * ipr_get_autosense - Copy autosense data to sense buffer
4067 * @ipr_cmd: ipr command struct
4068 *
4069 * This function copies the autosense buffer to the buffer
4070 * in the scsi_cmd, if there is autosense available.
4071 *
4072 * Return value:
4073 * 1 if autosense was available / 0 if not
4074 **/
4075 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4076 {
4077 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4078
4079 if ((be32_to_cpu(ioasa->ioasc_specific) &
4080 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4081 return 0;
4082
4083 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4084 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4085 SCSI_SENSE_BUFFERSIZE));
4086 return 1;
4087 }
4088
4089 /**
4090 * ipr_erp_start - Process an error response for a SCSI op
4091 * @ioa_cfg: ioa config struct
4092 * @ipr_cmd: ipr command struct
4093 *
4094 * This function determines whether or not to initiate ERP
4095 * on the affected device.
4096 *
4097 * Return value:
4098 * nothing
4099 **/
4100 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4101 struct ipr_cmnd *ipr_cmd)
4102 {
4103 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4104 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4105 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4106
4107 if (!res) {
4108 ipr_scsi_eh_done(ipr_cmd);
4109 return;
4110 }
4111
4112 if (ipr_is_gscsi(res))
4113 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4114 else
4115 ipr_gen_sense(ipr_cmd);
4116
4117 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4118 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4119 if (ipr_is_naca_model(res))
4120 scsi_cmd->result |= (DID_ABORT << 16);
4121 else
4122 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4123 break;
4124 case IPR_IOASC_IR_RESOURCE_HANDLE:
4125 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4126 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4127 break;
4128 case IPR_IOASC_HW_SEL_TIMEOUT:
4129 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4130 if (!ipr_is_naca_model(res))
4131 res->needs_sync_complete = 1;
4132 break;
4133 case IPR_IOASC_SYNC_REQUIRED:
4134 if (!res->in_erp)
4135 res->needs_sync_complete = 1;
4136 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4137 break;
4138 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4139 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4140 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4141 break;
4142 case IPR_IOASC_BUS_WAS_RESET:
4143 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4144 /*
4145 * Report the bus reset and ask for a retry. The device
4146 * will give CC/UA the next command.
4147 */
4148 if (!res->resetting_device)
4149 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4150 scsi_cmd->result |= (DID_ERROR << 16);
4151 if (!ipr_is_naca_model(res))
4152 res->needs_sync_complete = 1;
4153 break;
4154 case IPR_IOASC_HW_DEV_BUS_STATUS:
4155 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4156 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4157 if (!ipr_get_autosense(ipr_cmd)) {
4158 if (!ipr_is_naca_model(res)) {
4159 ipr_erp_cancel_all(ipr_cmd);
4160 return;
4161 }
4162 }
4163 }
4164 if (!ipr_is_naca_model(res))
4165 res->needs_sync_complete = 1;
4166 break;
4167 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4168 break;
4169 default:
4170 scsi_cmd->result |= (DID_ERROR << 16);
4171 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4172 res->needs_sync_complete = 1;
4173 break;
4174 }
4175
4176 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4177 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4178 scsi_cmd->scsi_done(scsi_cmd);
4179 }
4180
4181 /**
4182 * ipr_scsi_done - mid-layer done function
4183 * @ipr_cmd: ipr command struct
4184 *
4185 * This function is invoked by the interrupt handler for
4186 * ops generated by the SCSI mid-layer
4187 *
4188 * Return value:
4189 * none
4190 **/
4191 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4192 {
4193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4194 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4195 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4196
4197 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4198
4199 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4200 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4201 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4202 scsi_cmd->scsi_done(scsi_cmd);
4203 } else
4204 ipr_erp_start(ioa_cfg, ipr_cmd);
4205 }
4206
4207 /**
4208 * ipr_queuecommand - Queue a mid-layer request
4209 * @scsi_cmd: scsi command struct
4210 * @done: done function
4211 *
4212 * This function queues a request generated by the mid-layer.
4213 *
4214 * Return value:
4215 * 0 on success
4216 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4217 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4218 **/
4219 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4220 void (*done) (struct scsi_cmnd *))
4221 {
4222 struct ipr_ioa_cfg *ioa_cfg;
4223 struct ipr_resource_entry *res;
4224 struct ipr_ioarcb *ioarcb;
4225 struct ipr_cmnd *ipr_cmd;
4226 int rc = 0;
4227
4228 scsi_cmd->scsi_done = done;
4229 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4230 res = scsi_cmd->device->hostdata;
4231 scsi_cmd->result = (DID_OK << 16);
4232
4233 /*
4234 * We are currently blocking all devices due to a host reset
4235 * We have told the host to stop giving us new requests, but
4236 * ERP ops don't count. FIXME
4237 */
4238 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4239 return SCSI_MLQUEUE_HOST_BUSY;
4240
4241 /*
4242 * FIXME - Create scsi_set_host_offline interface
4243 * and the ioa_is_dead check can be removed
4244 */
4245 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4246 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4247 scsi_cmd->result = (DID_NO_CONNECT << 16);
4248 scsi_cmd->scsi_done(scsi_cmd);
4249 return 0;
4250 }
4251
4252 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4253 ioarcb = &ipr_cmd->ioarcb;
4254 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4255
4256 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4257 ipr_cmd->scsi_cmd = scsi_cmd;
4258 ioarcb->res_handle = res->cfgte.res_handle;
4259 ipr_cmd->done = ipr_scsi_done;
4260 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4261
4262 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4263 if (scsi_cmd->underflow == 0)
4264 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4265
4266 if (res->needs_sync_complete) {
4267 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4268 res->needs_sync_complete = 0;
4269 }
4270
4271 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4272 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4273 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4274 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4275 }
4276
4277 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4278 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4279 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4280
4281 if (likely(rc == 0))
4282 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4283
4284 if (likely(rc == 0)) {
4285 mb();
4286 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4287 ioa_cfg->regs.ioarrin_reg);
4288 } else {
4289 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4290 return SCSI_MLQUEUE_HOST_BUSY;
4291 }
4292
4293 return 0;
4294 }
4295
4296 /**
4297 * ipr_info - Get information about the card/driver
4298 * @scsi_host: scsi host struct
4299 *
4300 * Return value:
4301 * pointer to buffer with description string
4302 **/
4303 static const char * ipr_ioa_info(struct Scsi_Host *host)
4304 {
4305 static char buffer[512];
4306 struct ipr_ioa_cfg *ioa_cfg;
4307 unsigned long lock_flags = 0;
4308
4309 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4310
4311 spin_lock_irqsave(host->host_lock, lock_flags);
4312 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4313 spin_unlock_irqrestore(host->host_lock, lock_flags);
4314
4315 return buffer;
4316 }
4317
4318 static struct scsi_host_template driver_template = {
4319 .module = THIS_MODULE,
4320 .name = "IPR",
4321 .info = ipr_ioa_info,
4322 .queuecommand = ipr_queuecommand,
4323 .eh_abort_handler = ipr_eh_abort,
4324 .eh_device_reset_handler = ipr_eh_dev_reset,
4325 .eh_host_reset_handler = ipr_eh_host_reset,
4326 .slave_alloc = ipr_slave_alloc,
4327 .slave_configure = ipr_slave_configure,
4328 .slave_destroy = ipr_slave_destroy,
4329 .change_queue_depth = ipr_change_queue_depth,
4330 .change_queue_type = ipr_change_queue_type,
4331 .bios_param = ipr_biosparam,
4332 .can_queue = IPR_MAX_COMMANDS,
4333 .this_id = -1,
4334 .sg_tablesize = IPR_MAX_SGLIST,
4335 .max_sectors = IPR_IOA_MAX_SECTORS,
4336 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4337 .use_clustering = ENABLE_CLUSTERING,
4338 .shost_attrs = ipr_ioa_attrs,
4339 .sdev_attrs = ipr_dev_attrs,
4340 .proc_name = IPR_NAME
4341 };
4342
4343 #ifdef CONFIG_PPC_PSERIES
4344 static const u16 ipr_blocked_processors[] = {
4345 PV_NORTHSTAR,
4346 PV_PULSAR,
4347 PV_POWER4,
4348 PV_ICESTAR,
4349 PV_SSTAR,
4350 PV_POWER4p,
4351 PV_630,
4352 PV_630p
4353 };
4354
4355 /**
4356 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4357 * @ioa_cfg: ioa cfg struct
4358 *
4359 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4360 * certain pSeries hardware. This function determines if the given
4361 * adapter is in one of these confgurations or not.
4362 *
4363 * Return value:
4364 * 1 if adapter is not supported / 0 if adapter is supported
4365 **/
4366 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4367 {
4368 u8 rev_id;
4369 int i;
4370
4371 if (ioa_cfg->type == 0x5702) {
4372 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4373 &rev_id) == PCIBIOS_SUCCESSFUL) {
4374 if (rev_id < 4) {
4375 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4376 if (__is_processor(ipr_blocked_processors[i]))
4377 return 1;
4378 }
4379 }
4380 }
4381 }
4382 return 0;
4383 }
4384 #else
4385 #define ipr_invalid_adapter(ioa_cfg) 0
4386 #endif
4387
4388 /**
4389 * ipr_ioa_bringdown_done - IOA bring down completion.
4390 * @ipr_cmd: ipr command struct
4391 *
4392 * This function processes the completion of an adapter bring down.
4393 * It wakes any reset sleepers.
4394 *
4395 * Return value:
4396 * IPR_RC_JOB_RETURN
4397 **/
4398 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4399 {
4400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4401
4402 ENTER;
4403 ioa_cfg->in_reset_reload = 0;
4404 ioa_cfg->reset_retries = 0;
4405 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4406 wake_up_all(&ioa_cfg->reset_wait_q);
4407
4408 spin_unlock_irq(ioa_cfg->host->host_lock);
4409 scsi_unblock_requests(ioa_cfg->host);
4410 spin_lock_irq(ioa_cfg->host->host_lock);
4411 LEAVE;
4412
4413 return IPR_RC_JOB_RETURN;
4414 }
4415
4416 /**
4417 * ipr_ioa_reset_done - IOA reset completion.
4418 * @ipr_cmd: ipr command struct
4419 *
4420 * This function processes the completion of an adapter reset.
4421 * It schedules any necessary mid-layer add/removes and
4422 * wakes any reset sleepers.
4423 *
4424 * Return value:
4425 * IPR_RC_JOB_RETURN
4426 **/
4427 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4428 {
4429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4430 struct ipr_resource_entry *res;
4431 struct ipr_hostrcb *hostrcb, *temp;
4432 int i = 0;
4433
4434 ENTER;
4435 ioa_cfg->in_reset_reload = 0;
4436 ioa_cfg->allow_cmds = 1;
4437 ioa_cfg->reset_cmd = NULL;
4438 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4439
4440 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4441 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4442 ipr_trace;
4443 break;
4444 }
4445 }
4446 schedule_work(&ioa_cfg->work_q);
4447
4448 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4449 list_del(&hostrcb->queue);
4450 if (i++ < IPR_NUM_LOG_HCAMS)
4451 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4452 else
4453 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4454 }
4455
4456 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4457
4458 ioa_cfg->reset_retries = 0;
4459 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4460 wake_up_all(&ioa_cfg->reset_wait_q);
4461
4462 spin_unlock_irq(ioa_cfg->host->host_lock);
4463 scsi_unblock_requests(ioa_cfg->host);
4464 spin_lock_irq(ioa_cfg->host->host_lock);
4465
4466 if (!ioa_cfg->allow_cmds)
4467 scsi_block_requests(ioa_cfg->host);
4468
4469 LEAVE;
4470 return IPR_RC_JOB_RETURN;
4471 }
4472
4473 /**
4474 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4475 * @supported_dev: supported device struct
4476 * @vpids: vendor product id struct
4477 *
4478 * Return value:
4479 * none
4480 **/
4481 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4482 struct ipr_std_inq_vpids *vpids)
4483 {
4484 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4485 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4486 supported_dev->num_records = 1;
4487 supported_dev->data_length =
4488 cpu_to_be16(sizeof(struct ipr_supported_device));
4489 supported_dev->reserved = 0;
4490 }
4491
4492 /**
4493 * ipr_set_supported_devs - Send Set Supported Devices for a device
4494 * @ipr_cmd: ipr command struct
4495 *
4496 * This function send a Set Supported Devices to the adapter
4497 *
4498 * Return value:
4499 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4500 **/
4501 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4502 {
4503 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4504 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4505 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4506 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4507 struct ipr_resource_entry *res = ipr_cmd->u.res;
4508
4509 ipr_cmd->job_step = ipr_ioa_reset_done;
4510
4511 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4512 if (!ipr_is_scsi_disk(res))
4513 continue;
4514
4515 ipr_cmd->u.res = res;
4516 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4517
4518 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4519 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4520 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4521
4522 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4523 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4524 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4525
4526 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4527 sizeof(struct ipr_supported_device));
4528 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4529 offsetof(struct ipr_misc_cbs, supp_dev));
4530 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4531 ioarcb->write_data_transfer_length =
4532 cpu_to_be32(sizeof(struct ipr_supported_device));
4533
4534 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4535 IPR_SET_SUP_DEVICE_TIMEOUT);
4536
4537 ipr_cmd->job_step = ipr_set_supported_devs;
4538 return IPR_RC_JOB_RETURN;
4539 }
4540
4541 return IPR_RC_JOB_CONTINUE;
4542 }
4543
4544 /**
4545 * ipr_setup_write_cache - Disable write cache if needed
4546 * @ipr_cmd: ipr command struct
4547 *
4548 * This function sets up adapters write cache to desired setting
4549 *
4550 * Return value:
4551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4552 **/
4553 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4554 {
4555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4556
4557 ipr_cmd->job_step = ipr_set_supported_devs;
4558 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4559 struct ipr_resource_entry, queue);
4560
4561 if (ioa_cfg->cache_state != CACHE_DISABLED)
4562 return IPR_RC_JOB_CONTINUE;
4563
4564 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4565 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4566 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4567 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4568
4569 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4570
4571 return IPR_RC_JOB_RETURN;
4572 }
4573
4574 /**
4575 * ipr_get_mode_page - Locate specified mode page
4576 * @mode_pages: mode page buffer
4577 * @page_code: page code to find
4578 * @len: minimum required length for mode page
4579 *
4580 * Return value:
4581 * pointer to mode page / NULL on failure
4582 **/
4583 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4584 u32 page_code, u32 len)
4585 {
4586 struct ipr_mode_page_hdr *mode_hdr;
4587 u32 page_length;
4588 u32 length;
4589
4590 if (!mode_pages || (mode_pages->hdr.length == 0))
4591 return NULL;
4592
4593 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4594 mode_hdr = (struct ipr_mode_page_hdr *)
4595 (mode_pages->data + mode_pages->hdr.block_desc_len);
4596
4597 while (length) {
4598 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4599 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4600 return mode_hdr;
4601 break;
4602 } else {
4603 page_length = (sizeof(struct ipr_mode_page_hdr) +
4604 mode_hdr->page_length);
4605 length -= page_length;
4606 mode_hdr = (struct ipr_mode_page_hdr *)
4607 ((unsigned long)mode_hdr + page_length);
4608 }
4609 }
4610 return NULL;
4611 }
4612
4613 /**
4614 * ipr_check_term_power - Check for term power errors
4615 * @ioa_cfg: ioa config struct
4616 * @mode_pages: IOAFP mode pages buffer
4617 *
4618 * Check the IOAFP's mode page 28 for term power errors
4619 *
4620 * Return value:
4621 * nothing
4622 **/
4623 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4624 struct ipr_mode_pages *mode_pages)
4625 {
4626 int i;
4627 int entry_length;
4628 struct ipr_dev_bus_entry *bus;
4629 struct ipr_mode_page28 *mode_page;
4630
4631 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4632 sizeof(struct ipr_mode_page28));
4633
4634 entry_length = mode_page->entry_length;
4635
4636 bus = mode_page->bus;
4637
4638 for (i = 0; i < mode_page->num_entries; i++) {
4639 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4640 dev_err(&ioa_cfg->pdev->dev,
4641 "Term power is absent on scsi bus %d\n",
4642 bus->res_addr.bus);
4643 }
4644
4645 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4646 }
4647 }
4648
4649 /**
4650 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4651 * @ioa_cfg: ioa config struct
4652 *
4653 * Looks through the config table checking for SES devices. If
4654 * the SES device is in the SES table indicating a maximum SCSI
4655 * bus speed, the speed is limited for the bus.
4656 *
4657 * Return value:
4658 * none
4659 **/
4660 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4661 {
4662 u32 max_xfer_rate;
4663 int i;
4664
4665 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4666 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4667 ioa_cfg->bus_attr[i].bus_width);
4668
4669 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4670 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4671 }
4672 }
4673
4674 /**
4675 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4676 * @ioa_cfg: ioa config struct
4677 * @mode_pages: mode page 28 buffer
4678 *
4679 * Updates mode page 28 based on driver configuration
4680 *
4681 * Return value:
4682 * none
4683 **/
4684 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4685 struct ipr_mode_pages *mode_pages)
4686 {
4687 int i, entry_length;
4688 struct ipr_dev_bus_entry *bus;
4689 struct ipr_bus_attributes *bus_attr;
4690 struct ipr_mode_page28 *mode_page;
4691
4692 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4693 sizeof(struct ipr_mode_page28));
4694
4695 entry_length = mode_page->entry_length;
4696
4697 /* Loop for each device bus entry */
4698 for (i = 0, bus = mode_page->bus;
4699 i < mode_page->num_entries;
4700 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4701 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4702 dev_err(&ioa_cfg->pdev->dev,
4703 "Invalid resource address reported: 0x%08X\n",
4704 IPR_GET_PHYS_LOC(bus->res_addr));
4705 continue;
4706 }
4707
4708 bus_attr = &ioa_cfg->bus_attr[i];
4709 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4710 bus->bus_width = bus_attr->bus_width;
4711 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4712 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4713 if (bus_attr->qas_enabled)
4714 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4715 else
4716 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4717 }
4718 }
4719
4720 /**
4721 * ipr_build_mode_select - Build a mode select command
4722 * @ipr_cmd: ipr command struct
4723 * @res_handle: resource handle to send command to
4724 * @parm: Byte 2 of Mode Sense command
4725 * @dma_addr: DMA buffer address
4726 * @xfer_len: data transfer length
4727 *
4728 * Return value:
4729 * none
4730 **/
4731 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4732 __be32 res_handle, u8 parm, u32 dma_addr,
4733 u8 xfer_len)
4734 {
4735 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4736 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4737
4738 ioarcb->res_handle = res_handle;
4739 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4740 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4741 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4742 ioarcb->cmd_pkt.cdb[1] = parm;
4743 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4744
4745 ioadl->flags_and_data_len =
4746 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4747 ioadl->address = cpu_to_be32(dma_addr);
4748 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4749 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4750 }
4751
4752 /**
4753 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4754 * @ipr_cmd: ipr command struct
4755 *
4756 * This function sets up the SCSI bus attributes and sends
4757 * a Mode Select for Page 28 to activate them.
4758 *
4759 * Return value:
4760 * IPR_RC_JOB_RETURN
4761 **/
4762 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4763 {
4764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4765 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4766 int length;
4767
4768 ENTER;
4769 ipr_scsi_bus_speed_limit(ioa_cfg);
4770 ipr_check_term_power(ioa_cfg, mode_pages);
4771 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4772 length = mode_pages->hdr.length + 1;
4773 mode_pages->hdr.length = 0;
4774
4775 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4776 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4777 length);
4778
4779 ipr_cmd->job_step = ipr_setup_write_cache;
4780 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4781
4782 LEAVE;
4783 return IPR_RC_JOB_RETURN;
4784 }
4785
4786 /**
4787 * ipr_build_mode_sense - Builds a mode sense command
4788 * @ipr_cmd: ipr command struct
4789 * @res: resource entry struct
4790 * @parm: Byte 2 of mode sense command
4791 * @dma_addr: DMA address of mode sense buffer
4792 * @xfer_len: Size of DMA buffer
4793 *
4794 * Return value:
4795 * none
4796 **/
4797 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4798 __be32 res_handle,
4799 u8 parm, u32 dma_addr, u8 xfer_len)
4800 {
4801 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4802 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4803
4804 ioarcb->res_handle = res_handle;
4805 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4806 ioarcb->cmd_pkt.cdb[2] = parm;
4807 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4808 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4809
4810 ioadl->flags_and_data_len =
4811 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4812 ioadl->address = cpu_to_be32(dma_addr);
4813 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4814 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4815 }
4816
4817 /**
4818 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4819 * @ipr_cmd: ipr command struct
4820 *
4821 * This function handles the failure of an IOA bringup command.
4822 *
4823 * Return value:
4824 * IPR_RC_JOB_RETURN
4825 **/
4826 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4827 {
4828 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4829 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4830
4831 dev_err(&ioa_cfg->pdev->dev,
4832 "0x%02X failed with IOASC: 0x%08X\n",
4833 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4834
4835 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4836 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4837 return IPR_RC_JOB_RETURN;
4838 }
4839
4840 /**
4841 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4842 * @ipr_cmd: ipr command struct
4843 *
4844 * This function handles the failure of a Mode Sense to the IOAFP.
4845 * Some adapters do not handle all mode pages.
4846 *
4847 * Return value:
4848 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4849 **/
4850 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4851 {
4852 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4853
4854 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4855 ipr_cmd->job_step = ipr_setup_write_cache;
4856 return IPR_RC_JOB_CONTINUE;
4857 }
4858
4859 return ipr_reset_cmd_failed(ipr_cmd);
4860 }
4861
4862 /**
4863 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4864 * @ipr_cmd: ipr command struct
4865 *
4866 * This function send a Page 28 mode sense to the IOA to
4867 * retrieve SCSI bus attributes.
4868 *
4869 * Return value:
4870 * IPR_RC_JOB_RETURN
4871 **/
4872 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4873 {
4874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4875
4876 ENTER;
4877 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4878 0x28, ioa_cfg->vpd_cbs_dma +
4879 offsetof(struct ipr_misc_cbs, mode_pages),
4880 sizeof(struct ipr_mode_pages));
4881
4882 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4883 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4884
4885 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4886
4887 LEAVE;
4888 return IPR_RC_JOB_RETURN;
4889 }
4890
4891 /**
4892 * ipr_init_res_table - Initialize the resource table
4893 * @ipr_cmd: ipr command struct
4894 *
4895 * This function looks through the existing resource table, comparing
4896 * it with the config table. This function will take care of old/new
4897 * devices and schedule adding/removing them from the mid-layer
4898 * as appropriate.
4899 *
4900 * Return value:
4901 * IPR_RC_JOB_CONTINUE
4902 **/
4903 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4904 {
4905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4906 struct ipr_resource_entry *res, *temp;
4907 struct ipr_config_table_entry *cfgte;
4908 int found, i;
4909 LIST_HEAD(old_res);
4910
4911 ENTER;
4912 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4913 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4914
4915 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4916 list_move_tail(&res->queue, &old_res);
4917
4918 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4919 cfgte = &ioa_cfg->cfg_table->dev[i];
4920 found = 0;
4921
4922 list_for_each_entry_safe(res, temp, &old_res, queue) {
4923 if (!memcmp(&res->cfgte.res_addr,
4924 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4925 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4926 found = 1;
4927 break;
4928 }
4929 }
4930
4931 if (!found) {
4932 if (list_empty(&ioa_cfg->free_res_q)) {
4933 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4934 break;
4935 }
4936
4937 found = 1;
4938 res = list_entry(ioa_cfg->free_res_q.next,
4939 struct ipr_resource_entry, queue);
4940 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4941 ipr_init_res_entry(res);
4942 res->add_to_ml = 1;
4943 }
4944
4945 if (found)
4946 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4947 }
4948
4949 list_for_each_entry_safe(res, temp, &old_res, queue) {
4950 if (res->sdev) {
4951 res->del_from_ml = 1;
4952 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4953 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4954 } else {
4955 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4956 }
4957 }
4958
4959 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4960
4961 LEAVE;
4962 return IPR_RC_JOB_CONTINUE;
4963 }
4964
4965 /**
4966 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4967 * @ipr_cmd: ipr command struct
4968 *
4969 * This function sends a Query IOA Configuration command
4970 * to the adapter to retrieve the IOA configuration table.
4971 *
4972 * Return value:
4973 * IPR_RC_JOB_RETURN
4974 **/
4975 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4976 {
4977 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4978 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4979 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4980 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4981
4982 ENTER;
4983 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4984 ucode_vpd->major_release, ucode_vpd->card_type,
4985 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4986 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4987 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4988
4989 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4990 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4991 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4992
4993 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4994 ioarcb->read_data_transfer_length =
4995 cpu_to_be32(sizeof(struct ipr_config_table));
4996
4997 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4998 ioadl->flags_and_data_len =
4999 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5000
5001 ipr_cmd->job_step = ipr_init_res_table;
5002
5003 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5004
5005 LEAVE;
5006 return IPR_RC_JOB_RETURN;
5007 }
5008
5009 /**
5010 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5011 * @ipr_cmd: ipr command struct
5012 *
5013 * This utility function sends an inquiry to the adapter.
5014 *
5015 * Return value:
5016 * none
5017 **/
5018 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5019 u32 dma_addr, u8 xfer_len)
5020 {
5021 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5022 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5023
5024 ENTER;
5025 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5026 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5027
5028 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5029 ioarcb->cmd_pkt.cdb[1] = flags;
5030 ioarcb->cmd_pkt.cdb[2] = page;
5031 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5032
5033 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5034 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5035
5036 ioadl->address = cpu_to_be32(dma_addr);
5037 ioadl->flags_and_data_len =
5038 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5039
5040 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5041 LEAVE;
5042 }
5043
5044 /**
5045 * ipr_inquiry_page_supported - Is the given inquiry page supported
5046 * @page0: inquiry page 0 buffer
5047 * @page: page code.
5048 *
5049 * This function determines if the specified inquiry page is supported.
5050 *
5051 * Return value:
5052 * 1 if page is supported / 0 if not
5053 **/
5054 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5055 {
5056 int i;
5057
5058 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5059 if (page0->page[i] == page)
5060 return 1;
5061
5062 return 0;
5063 }
5064
5065 /**
5066 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5067 * @ipr_cmd: ipr command struct
5068 *
5069 * This function sends a Page 3 inquiry to the adapter
5070 * to retrieve software VPD information.
5071 *
5072 * Return value:
5073 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5074 **/
5075 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5076 {
5077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5078 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5079
5080 ENTER;
5081
5082 if (!ipr_inquiry_page_supported(page0, 1))
5083 ioa_cfg->cache_state = CACHE_NONE;
5084
5085 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5086
5087 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5088 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5089 sizeof(struct ipr_inquiry_page3));
5090
5091 LEAVE;
5092 return IPR_RC_JOB_RETURN;
5093 }
5094
5095 /**
5096 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5097 * @ipr_cmd: ipr command struct
5098 *
5099 * This function sends a Page 0 inquiry to the adapter
5100 * to retrieve supported inquiry pages.
5101 *
5102 * Return value:
5103 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5104 **/
5105 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5106 {
5107 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5108 char type[5];
5109
5110 ENTER;
5111
5112 /* Grab the type out of the VPD and store it away */
5113 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5114 type[4] = '\0';
5115 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5116
5117 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5118
5119 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5120 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5121 sizeof(struct ipr_inquiry_page0));
5122
5123 LEAVE;
5124 return IPR_RC_JOB_RETURN;
5125 }
5126
5127 /**
5128 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5129 * @ipr_cmd: ipr command struct
5130 *
5131 * This function sends a standard inquiry to the adapter.
5132 *
5133 * Return value:
5134 * IPR_RC_JOB_RETURN
5135 **/
5136 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5137 {
5138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5139
5140 ENTER;
5141 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5142
5143 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5145 sizeof(struct ipr_ioa_vpd));
5146
5147 LEAVE;
5148 return IPR_RC_JOB_RETURN;
5149 }
5150
5151 /**
5152 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5153 * @ipr_cmd: ipr command struct
5154 *
5155 * This function send an Identify Host Request Response Queue
5156 * command to establish the HRRQ with the adapter.
5157 *
5158 * Return value:
5159 * IPR_RC_JOB_RETURN
5160 **/
5161 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5162 {
5163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5164 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5165
5166 ENTER;
5167 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5168
5169 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5170 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5171
5172 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5173 ioarcb->cmd_pkt.cdb[2] =
5174 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5175 ioarcb->cmd_pkt.cdb[3] =
5176 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5177 ioarcb->cmd_pkt.cdb[4] =
5178 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5179 ioarcb->cmd_pkt.cdb[5] =
5180 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5181 ioarcb->cmd_pkt.cdb[7] =
5182 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5183 ioarcb->cmd_pkt.cdb[8] =
5184 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5185
5186 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5187
5188 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5189
5190 LEAVE;
5191 return IPR_RC_JOB_RETURN;
5192 }
5193
5194 /**
5195 * ipr_reset_timer_done - Adapter reset timer function
5196 * @ipr_cmd: ipr command struct
5197 *
5198 * Description: This function is used in adapter reset processing
5199 * for timing events. If the reset_cmd pointer in the IOA
5200 * config struct is not this adapter's we are doing nested
5201 * resets and fail_all_ops will take care of freeing the
5202 * command block.
5203 *
5204 * Return value:
5205 * none
5206 **/
5207 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5208 {
5209 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5210 unsigned long lock_flags = 0;
5211
5212 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5213
5214 if (ioa_cfg->reset_cmd == ipr_cmd) {
5215 list_del(&ipr_cmd->queue);
5216 ipr_cmd->done(ipr_cmd);
5217 }
5218
5219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5220 }
5221
5222 /**
5223 * ipr_reset_start_timer - Start a timer for adapter reset job
5224 * @ipr_cmd: ipr command struct
5225 * @timeout: timeout value
5226 *
5227 * Description: This function is used in adapter reset processing
5228 * for timing events. If the reset_cmd pointer in the IOA
5229 * config struct is not this adapter's we are doing nested
5230 * resets and fail_all_ops will take care of freeing the
5231 * command block.
5232 *
5233 * Return value:
5234 * none
5235 **/
5236 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5237 unsigned long timeout)
5238 {
5239 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5240 ipr_cmd->done = ipr_reset_ioa_job;
5241
5242 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5243 ipr_cmd->timer.expires = jiffies + timeout;
5244 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5245 add_timer(&ipr_cmd->timer);
5246 }
5247
5248 /**
5249 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5250 * @ioa_cfg: ioa cfg struct
5251 *
5252 * Return value:
5253 * nothing
5254 **/
5255 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5256 {
5257 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5258
5259 /* Initialize Host RRQ pointers */
5260 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5261 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5262 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5263 ioa_cfg->toggle_bit = 1;
5264
5265 /* Zero out config table */
5266 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5267 }
5268
5269 /**
5270 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5271 * @ipr_cmd: ipr command struct
5272 *
5273 * This function reinitializes some control blocks and
5274 * enables destructive diagnostics on the adapter.
5275 *
5276 * Return value:
5277 * IPR_RC_JOB_RETURN
5278 **/
5279 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5280 {
5281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5282 volatile u32 int_reg;
5283
5284 ENTER;
5285 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5286 ipr_init_ioa_mem(ioa_cfg);
5287
5288 ioa_cfg->allow_interrupts = 1;
5289 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5290
5291 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5292 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5293 ioa_cfg->regs.clr_interrupt_mask_reg);
5294 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5295 return IPR_RC_JOB_CONTINUE;
5296 }
5297
5298 /* Enable destructive diagnostics on IOA */
5299 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5300
5301 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5302 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5303
5304 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5305
5306 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5307 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5308 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5309 ipr_cmd->done = ipr_reset_ioa_job;
5310 add_timer(&ipr_cmd->timer);
5311 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5312
5313 LEAVE;
5314 return IPR_RC_JOB_RETURN;
5315 }
5316
5317 /**
5318 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5319 * @ipr_cmd: ipr command struct
5320 *
5321 * This function is invoked when an adapter dump has run out
5322 * of processing time.
5323 *
5324 * Return value:
5325 * IPR_RC_JOB_CONTINUE
5326 **/
5327 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5328 {
5329 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5330
5331 if (ioa_cfg->sdt_state == GET_DUMP)
5332 ioa_cfg->sdt_state = ABORT_DUMP;
5333
5334 ipr_cmd->job_step = ipr_reset_alert;
5335
5336 return IPR_RC_JOB_CONTINUE;
5337 }
5338
5339 /**
5340 * ipr_unit_check_no_data - Log a unit check/no data error log
5341 * @ioa_cfg: ioa config struct
5342 *
5343 * Logs an error indicating the adapter unit checked, but for some
5344 * reason, we were unable to fetch the unit check buffer.
5345 *
5346 * Return value:
5347 * nothing
5348 **/
5349 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5350 {
5351 ioa_cfg->errors_logged++;
5352 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5353 }
5354
5355 /**
5356 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5357 * @ioa_cfg: ioa config struct
5358 *
5359 * Fetches the unit check buffer from the adapter by clocking the data
5360 * through the mailbox register.
5361 *
5362 * Return value:
5363 * nothing
5364 **/
5365 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5366 {
5367 unsigned long mailbox;
5368 struct ipr_hostrcb *hostrcb;
5369 struct ipr_uc_sdt sdt;
5370 int rc, length;
5371
5372 mailbox = readl(ioa_cfg->ioa_mailbox);
5373
5374 if (!ipr_sdt_is_fmt2(mailbox)) {
5375 ipr_unit_check_no_data(ioa_cfg);
5376 return;
5377 }
5378
5379 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5380 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5381 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5382
5383 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5384 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5385 ipr_unit_check_no_data(ioa_cfg);
5386 return;
5387 }
5388
5389 /* Find length of the first sdt entry (UC buffer) */
5390 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5391 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5392
5393 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5394 struct ipr_hostrcb, queue);
5395 list_del(&hostrcb->queue);
5396 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5397
5398 rc = ipr_get_ldump_data_section(ioa_cfg,
5399 be32_to_cpu(sdt.entry[0].bar_str_offset),
5400 (__be32 *)&hostrcb->hcam,
5401 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5402
5403 if (!rc)
5404 ipr_handle_log_data(ioa_cfg, hostrcb);
5405 else
5406 ipr_unit_check_no_data(ioa_cfg);
5407
5408 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5409 }
5410
5411 /**
5412 * ipr_reset_restore_cfg_space - Restore PCI config space.
5413 * @ipr_cmd: ipr command struct
5414 *
5415 * Description: This function restores the saved PCI config space of
5416 * the adapter, fails all outstanding ops back to the callers, and
5417 * fetches the dump/unit check if applicable to this reset.
5418 *
5419 * Return value:
5420 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5421 **/
5422 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5423 {
5424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5425 int rc;
5426
5427 ENTER;
5428 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5429 rc = pci_restore_state(ioa_cfg->pdev);
5430
5431 if (rc != PCIBIOS_SUCCESSFUL) {
5432 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5433 return IPR_RC_JOB_CONTINUE;
5434 }
5435
5436 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5437 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5438 return IPR_RC_JOB_CONTINUE;
5439 }
5440
5441 ipr_fail_all_ops(ioa_cfg);
5442
5443 if (ioa_cfg->ioa_unit_checked) {
5444 ioa_cfg->ioa_unit_checked = 0;
5445 ipr_get_unit_check_buffer(ioa_cfg);
5446 ipr_cmd->job_step = ipr_reset_alert;
5447 ipr_reset_start_timer(ipr_cmd, 0);
5448 return IPR_RC_JOB_RETURN;
5449 }
5450
5451 if (ioa_cfg->in_ioa_bringdown) {
5452 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5453 } else {
5454 ipr_cmd->job_step = ipr_reset_enable_ioa;
5455
5456 if (GET_DUMP == ioa_cfg->sdt_state) {
5457 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5458 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5459 schedule_work(&ioa_cfg->work_q);
5460 return IPR_RC_JOB_RETURN;
5461 }
5462 }
5463
5464 ENTER;
5465 return IPR_RC_JOB_CONTINUE;
5466 }
5467
5468 /**
5469 * ipr_reset_start_bist - Run BIST on the adapter.
5470 * @ipr_cmd: ipr command struct
5471 *
5472 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5473 *
5474 * Return value:
5475 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5476 **/
5477 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5478 {
5479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5480 int rc;
5481
5482 ENTER;
5483 pci_block_user_cfg_access(ioa_cfg->pdev);
5484 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5485
5486 if (rc != PCIBIOS_SUCCESSFUL) {
5487 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5488 rc = IPR_RC_JOB_CONTINUE;
5489 } else {
5490 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5491 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5492 rc = IPR_RC_JOB_RETURN;
5493 }
5494
5495 LEAVE;
5496 return rc;
5497 }
5498
5499 /**
5500 * ipr_reset_allowed - Query whether or not IOA can be reset
5501 * @ioa_cfg: ioa config struct
5502 *
5503 * Return value:
5504 * 0 if reset not allowed / non-zero if reset is allowed
5505 **/
5506 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5507 {
5508 volatile u32 temp_reg;
5509
5510 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5511 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5512 }
5513
5514 /**
5515 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5516 * @ipr_cmd: ipr command struct
5517 *
5518 * Description: This function waits for adapter permission to run BIST,
5519 * then runs BIST. If the adapter does not give permission after a
5520 * reasonable time, we will reset the adapter anyway. The impact of
5521 * resetting the adapter without warning the adapter is the risk of
5522 * losing the persistent error log on the adapter. If the adapter is
5523 * reset while it is writing to the flash on the adapter, the flash
5524 * segment will have bad ECC and be zeroed.
5525 *
5526 * Return value:
5527 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5528 **/
5529 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5530 {
5531 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5532 int rc = IPR_RC_JOB_RETURN;
5533
5534 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5535 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5536 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5537 } else {
5538 ipr_cmd->job_step = ipr_reset_start_bist;
5539 rc = IPR_RC_JOB_CONTINUE;
5540 }
5541
5542 return rc;
5543 }
5544
5545 /**
5546 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5547 * @ipr_cmd: ipr command struct
5548 *
5549 * Description: This function alerts the adapter that it will be reset.
5550 * If memory space is not currently enabled, proceed directly
5551 * to running BIST on the adapter. The timer must always be started
5552 * so we guarantee we do not run BIST from ipr_isr.
5553 *
5554 * Return value:
5555 * IPR_RC_JOB_RETURN
5556 **/
5557 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5558 {
5559 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5560 u16 cmd_reg;
5561 int rc;
5562
5563 ENTER;
5564 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5565
5566 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5567 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5568 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5569 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5570 } else {
5571 ipr_cmd->job_step = ipr_reset_start_bist;
5572 }
5573
5574 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5575 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5576
5577 LEAVE;
5578 return IPR_RC_JOB_RETURN;
5579 }
5580
5581 /**
5582 * ipr_reset_ucode_download_done - Microcode download completion
5583 * @ipr_cmd: ipr command struct
5584 *
5585 * Description: This function unmaps the microcode download buffer.
5586 *
5587 * Return value:
5588 * IPR_RC_JOB_CONTINUE
5589 **/
5590 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5591 {
5592 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5593 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5594
5595 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5596 sglist->num_sg, DMA_TO_DEVICE);
5597
5598 ipr_cmd->job_step = ipr_reset_alert;
5599 return IPR_RC_JOB_CONTINUE;
5600 }
5601
5602 /**
5603 * ipr_reset_ucode_download - Download microcode to the adapter
5604 * @ipr_cmd: ipr command struct
5605 *
5606 * Description: This function checks to see if it there is microcode
5607 * to download to the adapter. If there is, a download is performed.
5608 *
5609 * Return value:
5610 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5611 **/
5612 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5613 {
5614 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5615 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5616
5617 ENTER;
5618 ipr_cmd->job_step = ipr_reset_alert;
5619
5620 if (!sglist)
5621 return IPR_RC_JOB_CONTINUE;
5622
5623 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5624 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5625 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5626 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5627 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5628 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5629 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5630
5631 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5632 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5633
5634 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5635 IPR_WRITE_BUFFER_TIMEOUT);
5636
5637 LEAVE;
5638 return IPR_RC_JOB_RETURN;
5639 }
5640
5641 /**
5642 * ipr_reset_shutdown_ioa - Shutdown the adapter
5643 * @ipr_cmd: ipr command struct
5644 *
5645 * Description: This function issues an adapter shutdown of the
5646 * specified type to the specified adapter as part of the
5647 * adapter reset job.
5648 *
5649 * Return value:
5650 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5651 **/
5652 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5653 {
5654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5655 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5656 unsigned long timeout;
5657 int rc = IPR_RC_JOB_CONTINUE;
5658
5659 ENTER;
5660 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5661 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5662 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5663 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5664 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5665
5666 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5667 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5668 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5669 timeout = IPR_INTERNAL_TIMEOUT;
5670 else
5671 timeout = IPR_SHUTDOWN_TIMEOUT;
5672
5673 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5674
5675 rc = IPR_RC_JOB_RETURN;
5676 ipr_cmd->job_step = ipr_reset_ucode_download;
5677 } else
5678 ipr_cmd->job_step = ipr_reset_alert;
5679
5680 LEAVE;
5681 return rc;
5682 }
5683
5684 /**
5685 * ipr_reset_ioa_job - Adapter reset job
5686 * @ipr_cmd: ipr command struct
5687 *
5688 * Description: This function is the job router for the adapter reset job.
5689 *
5690 * Return value:
5691 * none
5692 **/
5693 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5694 {
5695 u32 rc, ioasc;
5696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5697
5698 do {
5699 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5700
5701 if (ioa_cfg->reset_cmd != ipr_cmd) {
5702 /*
5703 * We are doing nested adapter resets and this is
5704 * not the current reset job.
5705 */
5706 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5707 return;
5708 }
5709
5710 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5711 rc = ipr_cmd->job_step_failed(ipr_cmd);
5712 if (rc == IPR_RC_JOB_RETURN)
5713 return;
5714 }
5715
5716 ipr_reinit_ipr_cmnd(ipr_cmd);
5717 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5718 rc = ipr_cmd->job_step(ipr_cmd);
5719 } while(rc == IPR_RC_JOB_CONTINUE);
5720 }
5721
5722 /**
5723 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5724 * @ioa_cfg: ioa config struct
5725 * @job_step: first job step of reset job
5726 * @shutdown_type: shutdown type
5727 *
5728 * Description: This function will initiate the reset of the given adapter
5729 * starting at the selected job step.
5730 * If the caller needs to wait on the completion of the reset,
5731 * the caller must sleep on the reset_wait_q.
5732 *
5733 * Return value:
5734 * none
5735 **/
5736 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5737 int (*job_step) (struct ipr_cmnd *),
5738 enum ipr_shutdown_type shutdown_type)
5739 {
5740 struct ipr_cmnd *ipr_cmd;
5741
5742 ioa_cfg->in_reset_reload = 1;
5743 ioa_cfg->allow_cmds = 0;
5744 scsi_block_requests(ioa_cfg->host);
5745
5746 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5747 ioa_cfg->reset_cmd = ipr_cmd;
5748 ipr_cmd->job_step = job_step;
5749 ipr_cmd->u.shutdown_type = shutdown_type;
5750
5751 ipr_reset_ioa_job(ipr_cmd);
5752 }
5753
5754 /**
5755 * ipr_initiate_ioa_reset - Initiate an adapter reset
5756 * @ioa_cfg: ioa config struct
5757 * @shutdown_type: shutdown type
5758 *
5759 * Description: This function will initiate the reset of the given adapter.
5760 * If the caller needs to wait on the completion of the reset,
5761 * the caller must sleep on the reset_wait_q.
5762 *
5763 * Return value:
5764 * none
5765 **/
5766 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5767 enum ipr_shutdown_type shutdown_type)
5768 {
5769 if (ioa_cfg->ioa_is_dead)
5770 return;
5771
5772 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5773 ioa_cfg->sdt_state = ABORT_DUMP;
5774
5775 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5776 dev_err(&ioa_cfg->pdev->dev,
5777 "IOA taken offline - error recovery failed\n");
5778
5779 ioa_cfg->reset_retries = 0;
5780 ioa_cfg->ioa_is_dead = 1;
5781
5782 if (ioa_cfg->in_ioa_bringdown) {
5783 ioa_cfg->reset_cmd = NULL;
5784 ioa_cfg->in_reset_reload = 0;
5785 ipr_fail_all_ops(ioa_cfg);
5786 wake_up_all(&ioa_cfg->reset_wait_q);
5787
5788 spin_unlock_irq(ioa_cfg->host->host_lock);
5789 scsi_unblock_requests(ioa_cfg->host);
5790 spin_lock_irq(ioa_cfg->host->host_lock);
5791 return;
5792 } else {
5793 ioa_cfg->in_ioa_bringdown = 1;
5794 shutdown_type = IPR_SHUTDOWN_NONE;
5795 }
5796 }
5797
5798 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5799 shutdown_type);
5800 }
5801
5802 /**
5803 * ipr_reset_freeze - Hold off all I/O activity
5804 * @ipr_cmd: ipr command struct
5805 *
5806 * Description: If the PCI slot is frozen, hold off all I/O
5807 * activity; then, as soon as the slot is available again,
5808 * initiate an adapter reset.
5809 */
5810 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5811 {
5812 /* Disallow new interrupts, avoid loop */
5813 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5814 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5815 ipr_cmd->done = ipr_reset_ioa_job;
5816 return IPR_RC_JOB_RETURN;
5817 }
5818
5819 /**
5820 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5821 * @pdev: PCI device struct
5822 *
5823 * Description: This routine is called to tell us that the PCI bus
5824 * is down. Can't do anything here, except put the device driver
5825 * into a holding pattern, waiting for the PCI bus to come back.
5826 */
5827 static void ipr_pci_frozen(struct pci_dev *pdev)
5828 {
5829 unsigned long flags = 0;
5830 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5831
5832 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5833 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5834 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5835 }
5836
5837 /**
5838 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5839 * @pdev: PCI device struct
5840 *
5841 * Description: This routine is called by the pci error recovery
5842 * code after the PCI slot has been reset, just before we
5843 * should resume normal operations.
5844 */
5845 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5846 {
5847 unsigned long flags = 0;
5848 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5849
5850 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5851 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5852 IPR_SHUTDOWN_NONE);
5853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5854 return PCI_ERS_RESULT_RECOVERED;
5855 }
5856
5857 /**
5858 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5859 * @pdev: PCI device struct
5860 *
5861 * Description: This routine is called when the PCI bus has
5862 * permanently failed.
5863 */
5864 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5865 {
5866 unsigned long flags = 0;
5867 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5868
5869 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5870 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5871 ioa_cfg->sdt_state = ABORT_DUMP;
5872 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5873 ioa_cfg->in_ioa_bringdown = 1;
5874 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5875 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5876 }
5877
5878 /**
5879 * ipr_pci_error_detected - Called when a PCI error is detected.
5880 * @pdev: PCI device struct
5881 * @state: PCI channel state
5882 *
5883 * Description: Called when a PCI error is detected.
5884 *
5885 * Return value:
5886 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5887 */
5888 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5889 pci_channel_state_t state)
5890 {
5891 switch (state) {
5892 case pci_channel_io_frozen:
5893 ipr_pci_frozen(pdev);
5894 return PCI_ERS_RESULT_NEED_RESET;
5895 case pci_channel_io_perm_failure:
5896 ipr_pci_perm_failure(pdev);
5897 return PCI_ERS_RESULT_DISCONNECT;
5898 break;
5899 default:
5900 break;
5901 }
5902 return PCI_ERS_RESULT_NEED_RESET;
5903 }
5904
5905 /**
5906 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5907 * @ioa_cfg: ioa cfg struct
5908 *
5909 * Description: This is the second phase of adapter intialization
5910 * This function takes care of initilizing the adapter to the point
5911 * where it can accept new commands.
5912
5913 * Return value:
5914 * 0 on sucess / -EIO on failure
5915 **/
5916 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5917 {
5918 int rc = 0;
5919 unsigned long host_lock_flags = 0;
5920
5921 ENTER;
5922 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5923 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5924 if (ioa_cfg->needs_hard_reset) {
5925 ioa_cfg->needs_hard_reset = 0;
5926 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5927 } else
5928 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5929 IPR_SHUTDOWN_NONE);
5930
5931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5932 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5933 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5934
5935 if (ioa_cfg->ioa_is_dead) {
5936 rc = -EIO;
5937 } else if (ipr_invalid_adapter(ioa_cfg)) {
5938 if (!ipr_testmode)
5939 rc = -EIO;
5940
5941 dev_err(&ioa_cfg->pdev->dev,
5942 "Adapter not supported in this hardware configuration.\n");
5943 }
5944
5945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5946
5947 LEAVE;
5948 return rc;
5949 }
5950
5951 /**
5952 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5953 * @ioa_cfg: ioa config struct
5954 *
5955 * Return value:
5956 * none
5957 **/
5958 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5959 {
5960 int i;
5961
5962 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5963 if (ioa_cfg->ipr_cmnd_list[i])
5964 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5965 ioa_cfg->ipr_cmnd_list[i],
5966 ioa_cfg->ipr_cmnd_list_dma[i]);
5967
5968 ioa_cfg->ipr_cmnd_list[i] = NULL;
5969 }
5970
5971 if (ioa_cfg->ipr_cmd_pool)
5972 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5973
5974 ioa_cfg->ipr_cmd_pool = NULL;
5975 }
5976
5977 /**
5978 * ipr_free_mem - Frees memory allocated for an adapter
5979 * @ioa_cfg: ioa cfg struct
5980 *
5981 * Return value:
5982 * nothing
5983 **/
5984 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5985 {
5986 int i;
5987
5988 kfree(ioa_cfg->res_entries);
5989 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5990 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5991 ipr_free_cmd_blks(ioa_cfg);
5992 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5993 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5994 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5995 ioa_cfg->cfg_table,
5996 ioa_cfg->cfg_table_dma);
5997
5998 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5999 pci_free_consistent(ioa_cfg->pdev,
6000 sizeof(struct ipr_hostrcb),
6001 ioa_cfg->hostrcb[i],
6002 ioa_cfg->hostrcb_dma[i]);
6003 }
6004
6005 ipr_free_dump(ioa_cfg);
6006 kfree(ioa_cfg->trace);
6007 }
6008
6009 /**
6010 * ipr_free_all_resources - Free all allocated resources for an adapter.
6011 * @ipr_cmd: ipr command struct
6012 *
6013 * This function frees all allocated resources for the
6014 * specified adapter.
6015 *
6016 * Return value:
6017 * none
6018 **/
6019 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6020 {
6021 struct pci_dev *pdev = ioa_cfg->pdev;
6022
6023 ENTER;
6024 free_irq(pdev->irq, ioa_cfg);
6025 iounmap(ioa_cfg->hdw_dma_regs);
6026 pci_release_regions(pdev);
6027 ipr_free_mem(ioa_cfg);
6028 scsi_host_put(ioa_cfg->host);
6029 pci_disable_device(pdev);
6030 LEAVE;
6031 }
6032
6033 /**
6034 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6035 * @ioa_cfg: ioa config struct
6036 *
6037 * Return value:
6038 * 0 on success / -ENOMEM on allocation failure
6039 **/
6040 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6041 {
6042 struct ipr_cmnd *ipr_cmd;
6043 struct ipr_ioarcb *ioarcb;
6044 dma_addr_t dma_addr;
6045 int i;
6046
6047 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6048 sizeof(struct ipr_cmnd), 8, 0);
6049
6050 if (!ioa_cfg->ipr_cmd_pool)
6051 return -ENOMEM;
6052
6053 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6054 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6055
6056 if (!ipr_cmd) {
6057 ipr_free_cmd_blks(ioa_cfg);
6058 return -ENOMEM;
6059 }
6060
6061 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6062 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6063 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6064
6065 ioarcb = &ipr_cmd->ioarcb;
6066 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6067 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6068 ioarcb->write_ioadl_addr =
6069 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6070 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6071 ioarcb->ioasa_host_pci_addr =
6072 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6073 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6074 ipr_cmd->cmd_index = i;
6075 ipr_cmd->ioa_cfg = ioa_cfg;
6076 ipr_cmd->sense_buffer_dma = dma_addr +
6077 offsetof(struct ipr_cmnd, sense_buffer);
6078
6079 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6080 }
6081
6082 return 0;
6083 }
6084
6085 /**
6086 * ipr_alloc_mem - Allocate memory for an adapter
6087 * @ioa_cfg: ioa config struct
6088 *
6089 * Return value:
6090 * 0 on success / non-zero for error
6091 **/
6092 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6093 {
6094 struct pci_dev *pdev = ioa_cfg->pdev;
6095 int i, rc = -ENOMEM;
6096
6097 ENTER;
6098 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6099 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6100
6101 if (!ioa_cfg->res_entries)
6102 goto out;
6103
6104 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6105 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6106
6107 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6108 sizeof(struct ipr_misc_cbs),
6109 &ioa_cfg->vpd_cbs_dma);
6110
6111 if (!ioa_cfg->vpd_cbs)
6112 goto out_free_res_entries;
6113
6114 if (ipr_alloc_cmd_blks(ioa_cfg))
6115 goto out_free_vpd_cbs;
6116
6117 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6118 sizeof(u32) * IPR_NUM_CMD_BLKS,
6119 &ioa_cfg->host_rrq_dma);
6120
6121 if (!ioa_cfg->host_rrq)
6122 goto out_ipr_free_cmd_blocks;
6123
6124 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6125 sizeof(struct ipr_config_table),
6126 &ioa_cfg->cfg_table_dma);
6127
6128 if (!ioa_cfg->cfg_table)
6129 goto out_free_host_rrq;
6130
6131 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6132 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6133 sizeof(struct ipr_hostrcb),
6134 &ioa_cfg->hostrcb_dma[i]);
6135
6136 if (!ioa_cfg->hostrcb[i])
6137 goto out_free_hostrcb_dma;
6138
6139 ioa_cfg->hostrcb[i]->hostrcb_dma =
6140 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6141 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6142 }
6143
6144 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6145 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6146
6147 if (!ioa_cfg->trace)
6148 goto out_free_hostrcb_dma;
6149
6150 rc = 0;
6151 out:
6152 LEAVE;
6153 return rc;
6154
6155 out_free_hostrcb_dma:
6156 while (i-- > 0) {
6157 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6158 ioa_cfg->hostrcb[i],
6159 ioa_cfg->hostrcb_dma[i]);
6160 }
6161 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6162 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6163 out_free_host_rrq:
6164 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6165 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6166 out_ipr_free_cmd_blocks:
6167 ipr_free_cmd_blks(ioa_cfg);
6168 out_free_vpd_cbs:
6169 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6170 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6171 out_free_res_entries:
6172 kfree(ioa_cfg->res_entries);
6173 goto out;
6174 }
6175
6176 /**
6177 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6178 * @ioa_cfg: ioa config struct
6179 *
6180 * Return value:
6181 * none
6182 **/
6183 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6184 {
6185 int i;
6186
6187 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6188 ioa_cfg->bus_attr[i].bus = i;
6189 ioa_cfg->bus_attr[i].qas_enabled = 0;
6190 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6191 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6192 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6193 else
6194 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6195 }
6196 }
6197
6198 /**
6199 * ipr_init_ioa_cfg - Initialize IOA config struct
6200 * @ioa_cfg: ioa config struct
6201 * @host: scsi host struct
6202 * @pdev: PCI dev struct
6203 *
6204 * Return value:
6205 * none
6206 **/
6207 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6208 struct Scsi_Host *host, struct pci_dev *pdev)
6209 {
6210 const struct ipr_interrupt_offsets *p;
6211 struct ipr_interrupts *t;
6212 void __iomem *base;
6213
6214 ioa_cfg->host = host;
6215 ioa_cfg->pdev = pdev;
6216 ioa_cfg->log_level = ipr_log_level;
6217 ioa_cfg->doorbell = IPR_DOORBELL;
6218 if (!ipr_auto_create)
6219 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6220 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6221 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6222 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6223 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6224 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6225 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6226 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6227 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6228
6229 INIT_LIST_HEAD(&ioa_cfg->free_q);
6230 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6231 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6232 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6233 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6234 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6235 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6236 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6237 ioa_cfg->sdt_state = INACTIVE;
6238 if (ipr_enable_cache)
6239 ioa_cfg->cache_state = CACHE_ENABLED;
6240 else
6241 ioa_cfg->cache_state = CACHE_DISABLED;
6242
6243 ipr_initialize_bus_attr(ioa_cfg);
6244
6245 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6246 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6248 host->unique_id = host->host_no;
6249 host->max_cmd_len = IPR_MAX_CDB_LEN;
6250 pci_set_drvdata(pdev, ioa_cfg);
6251
6252 p = &ioa_cfg->chip_cfg->regs;
6253 t = &ioa_cfg->regs;
6254 base = ioa_cfg->hdw_dma_regs;
6255
6256 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6257 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6258 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6259 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6260 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6261 t->ioarrin_reg = base + p->ioarrin_reg;
6262 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6263 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6264 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6265 }
6266
6267 /**
6268 * ipr_get_chip_cfg - Find adapter chip configuration
6269 * @dev_id: PCI device id struct
6270 *
6271 * Return value:
6272 * ptr to chip config on success / NULL on failure
6273 **/
6274 static const struct ipr_chip_cfg_t * __devinit
6275 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6276 {
6277 int i;
6278
6279 if (dev_id->driver_data)
6280 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6281
6282 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6283 if (ipr_chip[i].vendor == dev_id->vendor &&
6284 ipr_chip[i].device == dev_id->device)
6285 return ipr_chip[i].cfg;
6286 return NULL;
6287 }
6288
6289 /**
6290 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6291 * @pdev: PCI device struct
6292 * @dev_id: PCI device id struct
6293 *
6294 * Return value:
6295 * 0 on success / non-zero on failure
6296 **/
6297 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6298 const struct pci_device_id *dev_id)
6299 {
6300 struct ipr_ioa_cfg *ioa_cfg;
6301 struct Scsi_Host *host;
6302 unsigned long ipr_regs_pci;
6303 void __iomem *ipr_regs;
6304 u32 rc = PCIBIOS_SUCCESSFUL;
6305 volatile u32 mask, uproc;
6306
6307 ENTER;
6308
6309 if ((rc = pci_enable_device(pdev))) {
6310 dev_err(&pdev->dev, "Cannot enable adapter\n");
6311 goto out;
6312 }
6313
6314 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6315
6316 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6317
6318 if (!host) {
6319 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6320 rc = -ENOMEM;
6321 goto out_disable;
6322 }
6323
6324 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6325 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6326
6327 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6328
6329 if (!ioa_cfg->chip_cfg) {
6330 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6331 dev_id->vendor, dev_id->device);
6332 goto out_scsi_host_put;
6333 }
6334
6335 ipr_regs_pci = pci_resource_start(pdev, 0);
6336
6337 rc = pci_request_regions(pdev, IPR_NAME);
6338 if (rc < 0) {
6339 dev_err(&pdev->dev,
6340 "Couldn't register memory range of registers\n");
6341 goto out_scsi_host_put;
6342 }
6343
6344 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6345
6346 if (!ipr_regs) {
6347 dev_err(&pdev->dev,
6348 "Couldn't map memory range of registers\n");
6349 rc = -ENOMEM;
6350 goto out_release_regions;
6351 }
6352
6353 ioa_cfg->hdw_dma_regs = ipr_regs;
6354 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6355 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6356
6357 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6358
6359 pci_set_master(pdev);
6360
6361 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6362 if (rc < 0) {
6363 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6364 goto cleanup_nomem;
6365 }
6366
6367 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6368 ioa_cfg->chip_cfg->cache_line_size);
6369
6370 if (rc != PCIBIOS_SUCCESSFUL) {
6371 dev_err(&pdev->dev, "Write of cache line size failed\n");
6372 rc = -EIO;
6373 goto cleanup_nomem;
6374 }
6375
6376 /* Save away PCI config space for use following IOA reset */
6377 rc = pci_save_state(pdev);
6378
6379 if (rc != PCIBIOS_SUCCESSFUL) {
6380 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6381 rc = -EIO;
6382 goto cleanup_nomem;
6383 }
6384
6385 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6386 goto cleanup_nomem;
6387
6388 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6389 goto cleanup_nomem;
6390
6391 rc = ipr_alloc_mem(ioa_cfg);
6392 if (rc < 0) {
6393 dev_err(&pdev->dev,
6394 "Couldn't allocate enough memory for device driver!\n");
6395 goto cleanup_nomem;
6396 }
6397
6398 /*
6399 * If HRRQ updated interrupt is not masked, or reset alert is set,
6400 * the card is in an unknown state and needs a hard reset
6401 */
6402 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6403 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6404 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6405 ioa_cfg->needs_hard_reset = 1;
6406
6407 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6408 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6409
6410 if (rc) {
6411 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6412 pdev->irq, rc);
6413 goto cleanup_nolog;
6414 }
6415
6416 spin_lock(&ipr_driver_lock);
6417 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6418 spin_unlock(&ipr_driver_lock);
6419
6420 LEAVE;
6421 out:
6422 return rc;
6423
6424 cleanup_nolog:
6425 ipr_free_mem(ioa_cfg);
6426 cleanup_nomem:
6427 iounmap(ipr_regs);
6428 out_release_regions:
6429 pci_release_regions(pdev);
6430 out_scsi_host_put:
6431 scsi_host_put(host);
6432 out_disable:
6433 pci_disable_device(pdev);
6434 goto out;
6435 }
6436
6437 /**
6438 * ipr_scan_vsets - Scans for VSET devices
6439 * @ioa_cfg: ioa config struct
6440 *
6441 * Description: Since the VSET resources do not follow SAM in that we can have
6442 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6443 *
6444 * Return value:
6445 * none
6446 **/
6447 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6448 {
6449 int target, lun;
6450
6451 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6452 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6453 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6454 }
6455
6456 /**
6457 * ipr_initiate_ioa_bringdown - Bring down an adapter
6458 * @ioa_cfg: ioa config struct
6459 * @shutdown_type: shutdown type
6460 *
6461 * Description: This function will initiate bringing down the adapter.
6462 * This consists of issuing an IOA shutdown to the adapter
6463 * to flush the cache, and running BIST.
6464 * If the caller needs to wait on the completion of the reset,
6465 * the caller must sleep on the reset_wait_q.
6466 *
6467 * Return value:
6468 * none
6469 **/
6470 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6471 enum ipr_shutdown_type shutdown_type)
6472 {
6473 ENTER;
6474 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6475 ioa_cfg->sdt_state = ABORT_DUMP;
6476 ioa_cfg->reset_retries = 0;
6477 ioa_cfg->in_ioa_bringdown = 1;
6478 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6479 LEAVE;
6480 }
6481
6482 /**
6483 * __ipr_remove - Remove a single adapter
6484 * @pdev: pci device struct
6485 *
6486 * Adapter hot plug remove entry point.
6487 *
6488 * Return value:
6489 * none
6490 **/
6491 static void __ipr_remove(struct pci_dev *pdev)
6492 {
6493 unsigned long host_lock_flags = 0;
6494 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6495 ENTER;
6496
6497 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6498 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6499
6500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6501 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6502 flush_scheduled_work();
6503 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6504
6505 spin_lock(&ipr_driver_lock);
6506 list_del(&ioa_cfg->queue);
6507 spin_unlock(&ipr_driver_lock);
6508
6509 if (ioa_cfg->sdt_state == ABORT_DUMP)
6510 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6512
6513 ipr_free_all_resources(ioa_cfg);
6514
6515 LEAVE;
6516 }
6517
6518 /**
6519 * ipr_remove - IOA hot plug remove entry point
6520 * @pdev: pci device struct
6521 *
6522 * Adapter hot plug remove entry point.
6523 *
6524 * Return value:
6525 * none
6526 **/
6527 static void ipr_remove(struct pci_dev *pdev)
6528 {
6529 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6530
6531 ENTER;
6532
6533 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6534 &ipr_trace_attr);
6535 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6536 &ipr_dump_attr);
6537 scsi_remove_host(ioa_cfg->host);
6538
6539 __ipr_remove(pdev);
6540
6541 LEAVE;
6542 }
6543
6544 /**
6545 * ipr_probe - Adapter hot plug add entry point
6546 *
6547 * Return value:
6548 * 0 on success / non-zero on failure
6549 **/
6550 static int __devinit ipr_probe(struct pci_dev *pdev,
6551 const struct pci_device_id *dev_id)
6552 {
6553 struct ipr_ioa_cfg *ioa_cfg;
6554 int rc;
6555
6556 rc = ipr_probe_ioa(pdev, dev_id);
6557
6558 if (rc)
6559 return rc;
6560
6561 ioa_cfg = pci_get_drvdata(pdev);
6562 rc = ipr_probe_ioa_part2(ioa_cfg);
6563
6564 if (rc) {
6565 __ipr_remove(pdev);
6566 return rc;
6567 }
6568
6569 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6570
6571 if (rc) {
6572 __ipr_remove(pdev);
6573 return rc;
6574 }
6575
6576 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6577 &ipr_trace_attr);
6578
6579 if (rc) {
6580 scsi_remove_host(ioa_cfg->host);
6581 __ipr_remove(pdev);
6582 return rc;
6583 }
6584
6585 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6586 &ipr_dump_attr);
6587
6588 if (rc) {
6589 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6590 &ipr_trace_attr);
6591 scsi_remove_host(ioa_cfg->host);
6592 __ipr_remove(pdev);
6593 return rc;
6594 }
6595
6596 scsi_scan_host(ioa_cfg->host);
6597 ipr_scan_vsets(ioa_cfg);
6598 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6599 ioa_cfg->allow_ml_add_del = 1;
6600 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6601 schedule_work(&ioa_cfg->work_q);
6602 return 0;
6603 }
6604
6605 /**
6606 * ipr_shutdown - Shutdown handler.
6607 * @pdev: pci device struct
6608 *
6609 * This function is invoked upon system shutdown/reboot. It will issue
6610 * an adapter shutdown to the adapter to flush the write cache.
6611 *
6612 * Return value:
6613 * none
6614 **/
6615 static void ipr_shutdown(struct pci_dev *pdev)
6616 {
6617 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6618 unsigned long lock_flags = 0;
6619
6620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6621 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6622 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6623 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6624 }
6625
6626 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6627 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6629 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6630 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6631 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6632 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6633 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6635 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6636 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6637 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6638 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6639 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6641 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6642 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6643 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6644 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6645 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6646 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6647 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6648 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6649 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6650 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6651 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6653 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6654 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6656 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6657 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6659 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6662 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6665 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6666 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6668 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6669 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6671 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6672 { }
6673 };
6674 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6675
6676 static struct pci_error_handlers ipr_err_handler = {
6677 .error_detected = ipr_pci_error_detected,
6678 .slot_reset = ipr_pci_slot_reset,
6679 };
6680
6681 static struct pci_driver ipr_driver = {
6682 .name = IPR_NAME,
6683 .id_table = ipr_pci_table,
6684 .probe = ipr_probe,
6685 .remove = ipr_remove,
6686 .shutdown = ipr_shutdown,
6687 .err_handler = &ipr_err_handler,
6688 };
6689
6690 /**
6691 * ipr_init - Module entry point
6692 *
6693 * Return value:
6694 * 0 on success / negative value on failure
6695 **/
6696 static int __init ipr_init(void)
6697 {
6698 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6699 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6700
6701 return pci_module_init(&ipr_driver);
6702 }
6703
6704 /**
6705 * ipr_exit - Module unload
6706 *
6707 * Module unload entry point.
6708 *
6709 * Return value:
6710 * none
6711 **/
6712 static void __exit ipr_exit(void)
6713 {
6714 pci_unregister_driver(&ipr_driver);
6715 }
6716
6717 module_init(ipr_init);
6718 module_exit(ipr_exit);