]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/ipr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / ipr.c
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include "ipr.h"
83
84 /*
85 * Global Data
86 */
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static int ipr_auto_create = 1;
96 static DEFINE_SPINLOCK(ipr_driver_lock);
97
98 /* This table describes the differences between DMA controller chips */
99 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 { /* Gemstone, Citrine, and Obsidian */
101 .mailbox = 0x0042C,
102 .cache_line_size = 0x20,
103 {
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
113 }
114 },
115 { /* Snipe and Scamp */
116 .mailbox = 0x0052C,
117 .cache_line_size = 0x20,
118 {
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
128 }
129 },
130 };
131
132 static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
139 };
140
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
143 };
144
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 module_param_named(auto_create, ipr_auto_create, int, 0);
162 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
163 MODULE_LICENSE("GPL");
164 MODULE_VERSION(IPR_DRIVER_VERSION);
165
166 /* A constant array of IOASCs/URCs/Error Messages */
167 static const
168 struct ipr_error_table_t ipr_error_table[] = {
169 {0x00000000, 1, 1,
170 "8155: An unknown error was received"},
171 {0x00330000, 0, 0,
172 "Soft underlength error"},
173 {0x005A0000, 0, 0,
174 "Command to be cancelled not found"},
175 {0x00808000, 0, 0,
176 "Qualified success"},
177 {0x01080000, 1, 1,
178 "FFFE: Soft device bus error recovered by the IOA"},
179 {0x01170600, 0, 1,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, 1,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, 1,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, 1,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, 1,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, 1,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, 1,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, 1,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, 1,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, 1,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, 1,
200 "FFF6: Device bus error, message or command phase"},
201 {0x015D0000, 0, 1,
202 "FFF6: Failure prediction threshold exceeded"},
203 {0x015D9200, 0, 1,
204 "8009: Impending cache battery pack failure"},
205 {0x02040400, 0, 0,
206 "34FF: Disk device format in progress"},
207 {0x023F0000, 0, 0,
208 "Synchronization required"},
209 {0x024E0000, 0, 0,
210 "No ready, IOA shutdown"},
211 {0x025A0000, 0, 0,
212 "Not ready, IOA has been shutdown"},
213 {0x02670100, 0, 1,
214 "3020: Storage subsystem configuration error"},
215 {0x03110B00, 0, 0,
216 "FFF5: Medium error, data unreadable, recommend reassign"},
217 {0x03110C00, 0, 0,
218 "7000: Medium error, data unreadable, do not reassign"},
219 {0x03310000, 0, 1,
220 "FFF3: Disk media format bad"},
221 {0x04050000, 0, 1,
222 "3002: Addressed device failed to respond to selection"},
223 {0x04080000, 1, 1,
224 "3100: Device bus error"},
225 {0x04080100, 0, 1,
226 "3109: IOA timed out a device command"},
227 {0x04088000, 0, 0,
228 "3120: SCSI bus is not operational"},
229 {0x04118000, 0, 1,
230 "9000: IOA reserved area data check"},
231 {0x04118100, 0, 1,
232 "9001: IOA reserved area invalid data pattern"},
233 {0x04118200, 0, 1,
234 "9002: IOA reserved area LRC error"},
235 {0x04320000, 0, 1,
236 "102E: Out of alternate sectors for disk storage"},
237 {0x04330000, 1, 1,
238 "FFF4: Data transfer underlength error"},
239 {0x04338000, 1, 1,
240 "FFF4: Data transfer overlength error"},
241 {0x043E0100, 0, 1,
242 "3400: Logical unit failure"},
243 {0x04408500, 0, 1,
244 "FFF4: Device microcode is corrupt"},
245 {0x04418000, 1, 1,
246 "8150: PCI bus error"},
247 {0x04430000, 1, 0,
248 "Unsupported device bus message received"},
249 {0x04440000, 1, 1,
250 "FFF4: Disk device problem"},
251 {0x04448200, 1, 1,
252 "8150: Permanent IOA failure"},
253 {0x04448300, 0, 1,
254 "3010: Disk device returned wrong response to IOA"},
255 {0x04448400, 0, 1,
256 "8151: IOA microcode error"},
257 {0x04448500, 0, 0,
258 "Device bus status error"},
259 {0x04448600, 0, 1,
260 "8157: IOA error requiring IOA reset to recover"},
261 {0x04490000, 0, 0,
262 "Message reject received from the device"},
263 {0x04449200, 0, 1,
264 "8008: A permanent cache battery pack failure occurred"},
265 {0x0444A000, 0, 1,
266 "9090: Disk unit has been modified after the last known status"},
267 {0x0444A200, 0, 1,
268 "9081: IOA detected device error"},
269 {0x0444A300, 0, 1,
270 "9082: IOA detected device error"},
271 {0x044A0000, 1, 1,
272 "3110: Device bus error, message or command phase"},
273 {0x04670400, 0, 1,
274 "9091: Incorrect hardware configuration change has been detected"},
275 {0x04678000, 0, 1,
276 "9073: Invalid multi-adapter configuration"},
277 {0x046E0000, 0, 1,
278 "FFF4: Command to logical unit failed"},
279 {0x05240000, 1, 0,
280 "Illegal request, invalid request type or request packet"},
281 {0x05250000, 0, 0,
282 "Illegal request, invalid resource handle"},
283 {0x05258000, 0, 0,
284 "Illegal request, commands not allowed to this device"},
285 {0x05258100, 0, 0,
286 "Illegal request, command not allowed to a secondary adapter"},
287 {0x05260000, 0, 0,
288 "Illegal request, invalid field in parameter list"},
289 {0x05260100, 0, 0,
290 "Illegal request, parameter not supported"},
291 {0x05260200, 0, 0,
292 "Illegal request, parameter value invalid"},
293 {0x052C0000, 0, 0,
294 "Illegal request, command sequence error"},
295 {0x052C8000, 1, 0,
296 "Illegal request, dual adapter support not enabled"},
297 {0x06040500, 0, 1,
298 "9031: Array protection temporarily suspended, protection resuming"},
299 {0x06040600, 0, 1,
300 "9040: Array protection temporarily suspended, protection resuming"},
301 {0x06290000, 0, 1,
302 "FFFB: SCSI bus was reset"},
303 {0x06290500, 0, 0,
304 "FFFE: SCSI bus transition to single ended"},
305 {0x06290600, 0, 0,
306 "FFFE: SCSI bus transition to LVD"},
307 {0x06298000, 0, 1,
308 "FFFB: SCSI bus was reset by another initiator"},
309 {0x063F0300, 0, 1,
310 "3029: A device replacement has occurred"},
311 {0x064C8000, 0, 1,
312 "9051: IOA cache data exists for a missing or failed device"},
313 {0x064C8100, 0, 1,
314 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
315 {0x06670100, 0, 1,
316 "9025: Disk unit is not supported at its physical location"},
317 {0x06670600, 0, 1,
318 "3020: IOA detected a SCSI bus configuration error"},
319 {0x06678000, 0, 1,
320 "3150: SCSI bus configuration error"},
321 {0x06678100, 0, 1,
322 "9074: Asymmetric advanced function disk configuration"},
323 {0x06690200, 0, 1,
324 "9041: Array protection temporarily suspended"},
325 {0x06698200, 0, 1,
326 "9042: Corrupt array parity detected on specified device"},
327 {0x066B0200, 0, 1,
328 "9030: Array no longer protected due to missing or failed disk unit"},
329 {0x066B8000, 0, 1,
330 "9071: Link operational transition"},
331 {0x066B8100, 0, 1,
332 "9072: Link not operational transition"},
333 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"},
335 {0x07270000, 0, 0,
336 "Failure due to other device"},
337 {0x07278000, 0, 1,
338 "9008: IOA does not support functions expected by devices"},
339 {0x07278100, 0, 1,
340 "9010: Cache data associated with attached devices cannot be found"},
341 {0x07278200, 0, 1,
342 "9011: Cache data belongs to devices other than those attached"},
343 {0x07278400, 0, 1,
344 "9020: Array missing 2 or more devices with only 1 device present"},
345 {0x07278500, 0, 1,
346 "9021: Array missing 2 or more devices with 2 or more devices present"},
347 {0x07278600, 0, 1,
348 "9022: Exposed array is missing a required device"},
349 {0x07278700, 0, 1,
350 "9023: Array member(s) not at required physical locations"},
351 {0x07278800, 0, 1,
352 "9024: Array not functional due to present hardware configuration"},
353 {0x07278900, 0, 1,
354 "9026: Array not functional due to present hardware configuration"},
355 {0x07278A00, 0, 1,
356 "9027: Array is missing a device and parity is out of sync"},
357 {0x07278B00, 0, 1,
358 "9028: Maximum number of arrays already exist"},
359 {0x07278C00, 0, 1,
360 "9050: Required cache data cannot be located for a disk unit"},
361 {0x07278D00, 0, 1,
362 "9052: Cache data exists for a device that has been modified"},
363 {0x07278F00, 0, 1,
364 "9054: IOA resources not available due to previous problems"},
365 {0x07279100, 0, 1,
366 "9092: Disk unit requires initialization before use"},
367 {0x07279200, 0, 1,
368 "9029: Incorrect hardware configuration change has been detected"},
369 {0x07279600, 0, 1,
370 "9060: One or more disk pairs are missing from an array"},
371 {0x07279700, 0, 1,
372 "9061: One or more disks are missing from an array"},
373 {0x07279800, 0, 1,
374 "9062: One or more disks are missing from an array"},
375 {0x07279900, 0, 1,
376 "9063: Maximum number of functional arrays has been exceeded"},
377 {0x0B260000, 0, 0,
378 "Aborted command, invalid descriptor"},
379 {0x0B5A0000, 0, 0,
380 "Command terminated by host"}
381 };
382
383 static const struct ipr_ses_table_entry ipr_ses_table[] = {
384 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
387 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
388 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
389 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
390 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
397 };
398
399 /*
400 * Function Prototypes
401 */
402 static int ipr_reset_alert(struct ipr_cmnd *);
403 static void ipr_process_ccn(struct ipr_cmnd *);
404 static void ipr_process_error(struct ipr_cmnd *);
405 static void ipr_reset_ioa_job(struct ipr_cmnd *);
406 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
407 enum ipr_shutdown_type);
408
409 #ifdef CONFIG_SCSI_IPR_TRACE
410 /**
411 * ipr_trc_hook - Add a trace entry to the driver trace
412 * @ipr_cmd: ipr command struct
413 * @type: trace type
414 * @add_data: additional data
415 *
416 * Return value:
417 * none
418 **/
419 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
420 u8 type, u32 add_data)
421 {
422 struct ipr_trace_entry *trace_entry;
423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
424
425 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
426 trace_entry->time = jiffies;
427 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
428 trace_entry->type = type;
429 trace_entry->cmd_index = ipr_cmd->cmd_index;
430 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
431 trace_entry->u.add_data = add_data;
432 }
433 #else
434 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
435 #endif
436
437 /**
438 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
439 * @ipr_cmd: ipr command struct
440 *
441 * Return value:
442 * none
443 **/
444 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
445 {
446 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
447 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
448
449 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
450 ioarcb->write_data_transfer_length = 0;
451 ioarcb->read_data_transfer_length = 0;
452 ioarcb->write_ioadl_len = 0;
453 ioarcb->read_ioadl_len = 0;
454 ioasa->ioasc = 0;
455 ioasa->residual_data_len = 0;
456
457 ipr_cmd->scsi_cmd = NULL;
458 ipr_cmd->sense_buffer[0] = 0;
459 ipr_cmd->dma_use_sg = 0;
460 }
461
462 /**
463 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
464 * @ipr_cmd: ipr command struct
465 *
466 * Return value:
467 * none
468 **/
469 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
470 {
471 ipr_reinit_ipr_cmnd(ipr_cmd);
472 ipr_cmd->u.scratch = 0;
473 ipr_cmd->sibling = NULL;
474 init_timer(&ipr_cmd->timer);
475 }
476
477 /**
478 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
479 * @ioa_cfg: ioa config struct
480 *
481 * Return value:
482 * pointer to ipr command struct
483 **/
484 static
485 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
486 {
487 struct ipr_cmnd *ipr_cmd;
488
489 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
490 list_del(&ipr_cmd->queue);
491 ipr_init_ipr_cmnd(ipr_cmd);
492
493 return ipr_cmd;
494 }
495
496 /**
497 * ipr_unmap_sglist - Unmap scatterlist if mapped
498 * @ioa_cfg: ioa config struct
499 * @ipr_cmd: ipr command struct
500 *
501 * Return value:
502 * nothing
503 **/
504 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
505 struct ipr_cmnd *ipr_cmd)
506 {
507 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
508
509 if (ipr_cmd->dma_use_sg) {
510 if (scsi_cmd->use_sg > 0) {
511 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
512 scsi_cmd->use_sg,
513 scsi_cmd->sc_data_direction);
514 } else {
515 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
516 scsi_cmd->request_bufflen,
517 scsi_cmd->sc_data_direction);
518 }
519 }
520 }
521
522 /**
523 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
524 * @ioa_cfg: ioa config struct
525 * @clr_ints: interrupts to clear
526 *
527 * This function masks all interrupts on the adapter, then clears the
528 * interrupts specified in the mask
529 *
530 * Return value:
531 * none
532 **/
533 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
534 u32 clr_ints)
535 {
536 volatile u32 int_reg;
537
538 /* Stop new interrupts */
539 ioa_cfg->allow_interrupts = 0;
540
541 /* Set interrupt mask to stop all new interrupts */
542 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
543
544 /* Clear any pending interrupts */
545 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
547 }
548
549 /**
550 * ipr_save_pcix_cmd_reg - Save PCI-X command register
551 * @ioa_cfg: ioa config struct
552 *
553 * Return value:
554 * 0 on success / -EIO on failure
555 **/
556 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
557 {
558 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
559
560 if (pcix_cmd_reg == 0) {
561 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
562 return -EIO;
563 }
564
565 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
566 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 return -EIO;
569 }
570
571 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
572 return 0;
573 }
574
575 /**
576 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
577 * @ioa_cfg: ioa config struct
578 *
579 * Return value:
580 * 0 on success / -EIO on failure
581 **/
582 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
583 {
584 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
585
586 if (pcix_cmd_reg) {
587 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
590 return -EIO;
591 }
592 } else {
593 dev_err(&ioa_cfg->pdev->dev,
594 "Failed to setup PCI-X command register\n");
595 return -EIO;
596 }
597
598 return 0;
599 }
600
601 /**
602 * ipr_scsi_eh_done - mid-layer done function for aborted ops
603 * @ipr_cmd: ipr command struct
604 *
605 * This function is invoked by the interrupt handler for
606 * ops generated by the SCSI mid-layer which are being aborted.
607 *
608 * Return value:
609 * none
610 **/
611 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
612 {
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
615
616 scsi_cmd->result |= (DID_ERROR << 16);
617
618 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
619 scsi_cmd->scsi_done(scsi_cmd);
620 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
621 }
622
623 /**
624 * ipr_fail_all_ops - Fails all outstanding ops.
625 * @ioa_cfg: ioa config struct
626 *
627 * This function fails all outstanding ops.
628 *
629 * Return value:
630 * none
631 **/
632 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
633 {
634 struct ipr_cmnd *ipr_cmd, *temp;
635
636 ENTER;
637 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
638 list_del(&ipr_cmd->queue);
639
640 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
641 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
642
643 if (ipr_cmd->scsi_cmd)
644 ipr_cmd->done = ipr_scsi_eh_done;
645
646 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
647 del_timer(&ipr_cmd->timer);
648 ipr_cmd->done(ipr_cmd);
649 }
650
651 LEAVE;
652 }
653
654 /**
655 * ipr_do_req - Send driver initiated requests.
656 * @ipr_cmd: ipr command struct
657 * @done: done function
658 * @timeout_func: timeout function
659 * @timeout: timeout value
660 *
661 * This function sends the specified command to the adapter with the
662 * timeout given. The done function is invoked on command completion.
663 *
664 * Return value:
665 * none
666 **/
667 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
668 void (*done) (struct ipr_cmnd *),
669 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
670 {
671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
672
673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
674
675 ipr_cmd->done = done;
676
677 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
678 ipr_cmd->timer.expires = jiffies + timeout;
679 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
680
681 add_timer(&ipr_cmd->timer);
682
683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
684
685 mb();
686 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
687 ioa_cfg->regs.ioarrin_reg);
688 }
689
690 /**
691 * ipr_internal_cmd_done - Op done function for an internally generated op.
692 * @ipr_cmd: ipr command struct
693 *
694 * This function is the op done function for an internally generated,
695 * blocking op. It simply wakes the sleeping thread.
696 *
697 * Return value:
698 * none
699 **/
700 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
701 {
702 if (ipr_cmd->sibling)
703 ipr_cmd->sibling = NULL;
704 else
705 complete(&ipr_cmd->completion);
706 }
707
708 /**
709 * ipr_send_blocking_cmd - Send command and sleep on its completion.
710 * @ipr_cmd: ipr command struct
711 * @timeout_func: function to invoke if command times out
712 * @timeout: timeout
713 *
714 * Return value:
715 * none
716 **/
717 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
718 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
719 u32 timeout)
720 {
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722
723 init_completion(&ipr_cmd->completion);
724 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
725
726 spin_unlock_irq(ioa_cfg->host->host_lock);
727 wait_for_completion(&ipr_cmd->completion);
728 spin_lock_irq(ioa_cfg->host->host_lock);
729 }
730
731 /**
732 * ipr_send_hcam - Send an HCAM to the adapter.
733 * @ioa_cfg: ioa config struct
734 * @type: HCAM type
735 * @hostrcb: hostrcb struct
736 *
737 * This function will send a Host Controlled Async command to the adapter.
738 * If HCAMs are currently not allowed to be issued to the adapter, it will
739 * place the hostrcb on the free queue.
740 *
741 * Return value:
742 * none
743 **/
744 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
745 struct ipr_hostrcb *hostrcb)
746 {
747 struct ipr_cmnd *ipr_cmd;
748 struct ipr_ioarcb *ioarcb;
749
750 if (ioa_cfg->allow_cmds) {
751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
753 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
754
755 ipr_cmd->u.hostrcb = hostrcb;
756 ioarcb = &ipr_cmd->ioarcb;
757
758 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
759 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
760 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
761 ioarcb->cmd_pkt.cdb[1] = type;
762 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
763 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
764
765 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
766 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
767 ipr_cmd->ioadl[0].flags_and_data_len =
768 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
769 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
770
771 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
772 ipr_cmd->done = ipr_process_ccn;
773 else
774 ipr_cmd->done = ipr_process_error;
775
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
777
778 mb();
779 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
780 ioa_cfg->regs.ioarrin_reg);
781 } else {
782 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
783 }
784 }
785
786 /**
787 * ipr_init_res_entry - Initialize a resource entry struct.
788 * @res: resource entry struct
789 *
790 * Return value:
791 * none
792 **/
793 static void ipr_init_res_entry(struct ipr_resource_entry *res)
794 {
795 res->needs_sync_complete = 0;
796 res->in_erp = 0;
797 res->add_to_ml = 0;
798 res->del_from_ml = 0;
799 res->resetting_device = 0;
800 res->sdev = NULL;
801 }
802
803 /**
804 * ipr_handle_config_change - Handle a config change from the adapter
805 * @ioa_cfg: ioa config struct
806 * @hostrcb: hostrcb
807 *
808 * Return value:
809 * none
810 **/
811 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
812 struct ipr_hostrcb *hostrcb)
813 {
814 struct ipr_resource_entry *res = NULL;
815 struct ipr_config_table_entry *cfgte;
816 u32 is_ndn = 1;
817
818 cfgte = &hostrcb->hcam.u.ccn.cfgte;
819
820 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
821 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
822 sizeof(cfgte->res_addr))) {
823 is_ndn = 0;
824 break;
825 }
826 }
827
828 if (is_ndn) {
829 if (list_empty(&ioa_cfg->free_res_q)) {
830 ipr_send_hcam(ioa_cfg,
831 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
832 hostrcb);
833 return;
834 }
835
836 res = list_entry(ioa_cfg->free_res_q.next,
837 struct ipr_resource_entry, queue);
838
839 list_del(&res->queue);
840 ipr_init_res_entry(res);
841 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
842 }
843
844 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
845
846 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
847 if (res->sdev) {
848 res->del_from_ml = 1;
849 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
850 if (ioa_cfg->allow_ml_add_del)
851 schedule_work(&ioa_cfg->work_q);
852 } else
853 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
854 } else if (!res->sdev) {
855 res->add_to_ml = 1;
856 if (ioa_cfg->allow_ml_add_del)
857 schedule_work(&ioa_cfg->work_q);
858 }
859
860 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
861 }
862
863 /**
864 * ipr_process_ccn - Op done function for a CCN.
865 * @ipr_cmd: ipr command struct
866 *
867 * This function is the op done function for a configuration
868 * change notification host controlled async from the adapter.
869 *
870 * Return value:
871 * none
872 **/
873 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
874 {
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
877 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
878
879 list_del(&hostrcb->queue);
880 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
881
882 if (ioasc) {
883 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
884 dev_err(&ioa_cfg->pdev->dev,
885 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
886
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888 } else {
889 ipr_handle_config_change(ioa_cfg, hostrcb);
890 }
891 }
892
893 /**
894 * ipr_log_vpd - Log the passed VPD to the error log.
895 * @vpd: vendor/product id/sn struct
896 *
897 * Return value:
898 * none
899 **/
900 static void ipr_log_vpd(struct ipr_vpd *vpd)
901 {
902 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
903 + IPR_SERIAL_NUM_LEN];
904
905 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
906 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
907 IPR_PROD_ID_LEN);
908 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
909 ipr_err("Vendor/Product ID: %s\n", buffer);
910
911 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
912 buffer[IPR_SERIAL_NUM_LEN] = '\0';
913 ipr_err(" Serial Number: %s\n", buffer);
914 }
915
916 /**
917 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
918 * @vpd: vendor/product id/sn/wwn struct
919 *
920 * Return value:
921 * none
922 **/
923 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
924 {
925 ipr_log_vpd(&vpd->vpd);
926 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
927 be32_to_cpu(vpd->wwid[1]));
928 }
929
930 /**
931 * ipr_log_enhanced_cache_error - Log a cache error.
932 * @ioa_cfg: ioa config struct
933 * @hostrcb: hostrcb struct
934 *
935 * Return value:
936 * none
937 **/
938 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
939 struct ipr_hostrcb *hostrcb)
940 {
941 struct ipr_hostrcb_type_12_error *error =
942 &hostrcb->hcam.u.error.u.type_12_error;
943
944 ipr_err("-----Current Configuration-----\n");
945 ipr_err("Cache Directory Card Information:\n");
946 ipr_log_ext_vpd(&error->ioa_vpd);
947 ipr_err("Adapter Card Information:\n");
948 ipr_log_ext_vpd(&error->cfc_vpd);
949
950 ipr_err("-----Expected Configuration-----\n");
951 ipr_err("Cache Directory Card Information:\n");
952 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
953 ipr_err("Adapter Card Information:\n");
954 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
955
956 ipr_err("Additional IOA Data: %08X %08X %08X\n",
957 be32_to_cpu(error->ioa_data[0]),
958 be32_to_cpu(error->ioa_data[1]),
959 be32_to_cpu(error->ioa_data[2]));
960 }
961
962 /**
963 * ipr_log_cache_error - Log a cache error.
964 * @ioa_cfg: ioa config struct
965 * @hostrcb: hostrcb struct
966 *
967 * Return value:
968 * none
969 **/
970 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
971 struct ipr_hostrcb *hostrcb)
972 {
973 struct ipr_hostrcb_type_02_error *error =
974 &hostrcb->hcam.u.error.u.type_02_error;
975
976 ipr_err("-----Current Configuration-----\n");
977 ipr_err("Cache Directory Card Information:\n");
978 ipr_log_vpd(&error->ioa_vpd);
979 ipr_err("Adapter Card Information:\n");
980 ipr_log_vpd(&error->cfc_vpd);
981
982 ipr_err("-----Expected Configuration-----\n");
983 ipr_err("Cache Directory Card Information:\n");
984 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
985 ipr_err("Adapter Card Information:\n");
986 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
987
988 ipr_err("Additional IOA Data: %08X %08X %08X\n",
989 be32_to_cpu(error->ioa_data[0]),
990 be32_to_cpu(error->ioa_data[1]),
991 be32_to_cpu(error->ioa_data[2]));
992 }
993
994 /**
995 * ipr_log_enhanced_config_error - Log a configuration error.
996 * @ioa_cfg: ioa config struct
997 * @hostrcb: hostrcb struct
998 *
999 * Return value:
1000 * none
1001 **/
1002 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1003 struct ipr_hostrcb *hostrcb)
1004 {
1005 int errors_logged, i;
1006 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1007 struct ipr_hostrcb_type_13_error *error;
1008
1009 error = &hostrcb->hcam.u.error.u.type_13_error;
1010 errors_logged = be32_to_cpu(error->errors_logged);
1011
1012 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1013 be32_to_cpu(error->errors_detected), errors_logged);
1014
1015 dev_entry = error->dev;
1016
1017 for (i = 0; i < errors_logged; i++, dev_entry++) {
1018 ipr_err_separator;
1019
1020 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1021 ipr_log_ext_vpd(&dev_entry->vpd);
1022
1023 ipr_err("-----New Device Information-----\n");
1024 ipr_log_ext_vpd(&dev_entry->new_vpd);
1025
1026 ipr_err("Cache Directory Card Information:\n");
1027 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1028
1029 ipr_err("Adapter Card Information:\n");
1030 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1031 }
1032 }
1033
1034 /**
1035 * ipr_log_config_error - Log a configuration error.
1036 * @ioa_cfg: ioa config struct
1037 * @hostrcb: hostrcb struct
1038 *
1039 * Return value:
1040 * none
1041 **/
1042 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1043 struct ipr_hostrcb *hostrcb)
1044 {
1045 int errors_logged, i;
1046 struct ipr_hostrcb_device_data_entry *dev_entry;
1047 struct ipr_hostrcb_type_03_error *error;
1048
1049 error = &hostrcb->hcam.u.error.u.type_03_error;
1050 errors_logged = be32_to_cpu(error->errors_logged);
1051
1052 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1053 be32_to_cpu(error->errors_detected), errors_logged);
1054
1055 dev_entry = error->dev;
1056
1057 for (i = 0; i < errors_logged; i++, dev_entry++) {
1058 ipr_err_separator;
1059
1060 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1061 ipr_log_vpd(&dev_entry->vpd);
1062
1063 ipr_err("-----New Device Information-----\n");
1064 ipr_log_vpd(&dev_entry->new_vpd);
1065
1066 ipr_err("Cache Directory Card Information:\n");
1067 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1068
1069 ipr_err("Adapter Card Information:\n");
1070 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1071
1072 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1073 be32_to_cpu(dev_entry->ioa_data[0]),
1074 be32_to_cpu(dev_entry->ioa_data[1]),
1075 be32_to_cpu(dev_entry->ioa_data[2]),
1076 be32_to_cpu(dev_entry->ioa_data[3]),
1077 be32_to_cpu(dev_entry->ioa_data[4]));
1078 }
1079 }
1080
1081 /**
1082 * ipr_log_enhanced_array_error - Log an array configuration error.
1083 * @ioa_cfg: ioa config struct
1084 * @hostrcb: hostrcb struct
1085 *
1086 * Return value:
1087 * none
1088 **/
1089 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1090 struct ipr_hostrcb *hostrcb)
1091 {
1092 int i, num_entries;
1093 struct ipr_hostrcb_type_14_error *error;
1094 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1095 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1096
1097 error = &hostrcb->hcam.u.error.u.type_14_error;
1098
1099 ipr_err_separator;
1100
1101 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1102 error->protection_level,
1103 ioa_cfg->host->host_no,
1104 error->last_func_vset_res_addr.bus,
1105 error->last_func_vset_res_addr.target,
1106 error->last_func_vset_res_addr.lun);
1107
1108 ipr_err_separator;
1109
1110 array_entry = error->array_member;
1111 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1112 sizeof(error->array_member));
1113
1114 for (i = 0; i < num_entries; i++, array_entry++) {
1115 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1116 continue;
1117
1118 if (be32_to_cpu(error->exposed_mode_adn) == i)
1119 ipr_err("Exposed Array Member %d:\n", i);
1120 else
1121 ipr_err("Array Member %d:\n", i);
1122
1123 ipr_log_ext_vpd(&array_entry->vpd);
1124 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1125 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1126 "Expected Location");
1127
1128 ipr_err_separator;
1129 }
1130 }
1131
1132 /**
1133 * ipr_log_array_error - Log an array configuration error.
1134 * @ioa_cfg: ioa config struct
1135 * @hostrcb: hostrcb struct
1136 *
1137 * Return value:
1138 * none
1139 **/
1140 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1141 struct ipr_hostrcb *hostrcb)
1142 {
1143 int i;
1144 struct ipr_hostrcb_type_04_error *error;
1145 struct ipr_hostrcb_array_data_entry *array_entry;
1146 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1147
1148 error = &hostrcb->hcam.u.error.u.type_04_error;
1149
1150 ipr_err_separator;
1151
1152 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1153 error->protection_level,
1154 ioa_cfg->host->host_no,
1155 error->last_func_vset_res_addr.bus,
1156 error->last_func_vset_res_addr.target,
1157 error->last_func_vset_res_addr.lun);
1158
1159 ipr_err_separator;
1160
1161 array_entry = error->array_member;
1162
1163 for (i = 0; i < 18; i++) {
1164 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1165 continue;
1166
1167 if (be32_to_cpu(error->exposed_mode_adn) == i)
1168 ipr_err("Exposed Array Member %d:\n", i);
1169 else
1170 ipr_err("Array Member %d:\n", i);
1171
1172 ipr_log_vpd(&array_entry->vpd);
1173
1174 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1175 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1176 "Expected Location");
1177
1178 ipr_err_separator;
1179
1180 if (i == 9)
1181 array_entry = error->array_member2;
1182 else
1183 array_entry++;
1184 }
1185 }
1186
1187 /**
1188 * ipr_log_hex_data - Log additional hex IOA error data.
1189 * @data: IOA error data
1190 * @len: data length
1191 *
1192 * Return value:
1193 * none
1194 **/
1195 static void ipr_log_hex_data(u32 *data, int len)
1196 {
1197 int i;
1198
1199 if (len == 0)
1200 return;
1201
1202 for (i = 0; i < len / 4; i += 4) {
1203 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1204 be32_to_cpu(data[i]),
1205 be32_to_cpu(data[i+1]),
1206 be32_to_cpu(data[i+2]),
1207 be32_to_cpu(data[i+3]));
1208 }
1209 }
1210
1211 /**
1212 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1213 * @ioa_cfg: ioa config struct
1214 * @hostrcb: hostrcb struct
1215 *
1216 * Return value:
1217 * none
1218 **/
1219 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1220 struct ipr_hostrcb *hostrcb)
1221 {
1222 struct ipr_hostrcb_type_17_error *error;
1223
1224 error = &hostrcb->hcam.u.error.u.type_17_error;
1225 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1226
1227 ipr_err("%s\n", error->failure_reason);
1228 ipr_err("Remote Adapter VPD:\n");
1229 ipr_log_ext_vpd(&error->vpd);
1230 ipr_log_hex_data(error->data,
1231 be32_to_cpu(hostrcb->hcam.length) -
1232 (offsetof(struct ipr_hostrcb_error, u) +
1233 offsetof(struct ipr_hostrcb_type_17_error, data)));
1234 }
1235
1236 /**
1237 * ipr_log_dual_ioa_error - Log a dual adapter error.
1238 * @ioa_cfg: ioa config struct
1239 * @hostrcb: hostrcb struct
1240 *
1241 * Return value:
1242 * none
1243 **/
1244 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1245 struct ipr_hostrcb *hostrcb)
1246 {
1247 struct ipr_hostrcb_type_07_error *error;
1248
1249 error = &hostrcb->hcam.u.error.u.type_07_error;
1250 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1251
1252 ipr_err("%s\n", error->failure_reason);
1253 ipr_err("Remote Adapter VPD:\n");
1254 ipr_log_vpd(&error->vpd);
1255 ipr_log_hex_data(error->data,
1256 be32_to_cpu(hostrcb->hcam.length) -
1257 (offsetof(struct ipr_hostrcb_error, u) +
1258 offsetof(struct ipr_hostrcb_type_07_error, data)));
1259 }
1260
1261 /**
1262 * ipr_log_generic_error - Log an adapter error.
1263 * @ioa_cfg: ioa config struct
1264 * @hostrcb: hostrcb struct
1265 *
1266 * Return value:
1267 * none
1268 **/
1269 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1270 struct ipr_hostrcb *hostrcb)
1271 {
1272 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1273 be32_to_cpu(hostrcb->hcam.length));
1274 }
1275
1276 /**
1277 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1278 * @ioasc: IOASC
1279 *
1280 * This function will return the index of into the ipr_error_table
1281 * for the specified IOASC. If the IOASC is not in the table,
1282 * 0 will be returned, which points to the entry used for unknown errors.
1283 *
1284 * Return value:
1285 * index into the ipr_error_table
1286 **/
1287 static u32 ipr_get_error(u32 ioasc)
1288 {
1289 int i;
1290
1291 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1292 if (ipr_error_table[i].ioasc == ioasc)
1293 return i;
1294
1295 return 0;
1296 }
1297
1298 /**
1299 * ipr_handle_log_data - Log an adapter error.
1300 * @ioa_cfg: ioa config struct
1301 * @hostrcb: hostrcb struct
1302 *
1303 * This function logs an adapter error to the system.
1304 *
1305 * Return value:
1306 * none
1307 **/
1308 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1310 {
1311 u32 ioasc;
1312 int error_index;
1313
1314 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1315 return;
1316
1317 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1318 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1319
1320 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1321
1322 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1323 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1324 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1325 scsi_report_bus_reset(ioa_cfg->host,
1326 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1327 }
1328
1329 error_index = ipr_get_error(ioasc);
1330
1331 if (!ipr_error_table[error_index].log_hcam)
1332 return;
1333
1334 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1335 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1336 "%s\n", ipr_error_table[error_index].error);
1337 } else {
1338 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1339 ipr_error_table[error_index].error);
1340 }
1341
1342 /* Set indication we have logged an error */
1343 ioa_cfg->errors_logged++;
1344
1345 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1346 return;
1347 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1348 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1349
1350 switch (hostrcb->hcam.overlay_id) {
1351 case IPR_HOST_RCB_OVERLAY_ID_2:
1352 ipr_log_cache_error(ioa_cfg, hostrcb);
1353 break;
1354 case IPR_HOST_RCB_OVERLAY_ID_3:
1355 ipr_log_config_error(ioa_cfg, hostrcb);
1356 break;
1357 case IPR_HOST_RCB_OVERLAY_ID_4:
1358 case IPR_HOST_RCB_OVERLAY_ID_6:
1359 ipr_log_array_error(ioa_cfg, hostrcb);
1360 break;
1361 case IPR_HOST_RCB_OVERLAY_ID_7:
1362 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1363 break;
1364 case IPR_HOST_RCB_OVERLAY_ID_12:
1365 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1366 break;
1367 case IPR_HOST_RCB_OVERLAY_ID_13:
1368 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1369 break;
1370 case IPR_HOST_RCB_OVERLAY_ID_14:
1371 case IPR_HOST_RCB_OVERLAY_ID_16:
1372 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1373 break;
1374 case IPR_HOST_RCB_OVERLAY_ID_17:
1375 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1376 break;
1377 case IPR_HOST_RCB_OVERLAY_ID_1:
1378 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1379 default:
1380 ipr_log_generic_error(ioa_cfg, hostrcb);
1381 break;
1382 }
1383 }
1384
1385 /**
1386 * ipr_process_error - Op done function for an adapter error log.
1387 * @ipr_cmd: ipr command struct
1388 *
1389 * This function is the op done function for an error log host
1390 * controlled async from the adapter. It will log the error and
1391 * send the HCAM back to the adapter.
1392 *
1393 * Return value:
1394 * none
1395 **/
1396 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1397 {
1398 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1399 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1400 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1401
1402 list_del(&hostrcb->queue);
1403 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1404
1405 if (!ioasc) {
1406 ipr_handle_log_data(ioa_cfg, hostrcb);
1407 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1408 dev_err(&ioa_cfg->pdev->dev,
1409 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1410 }
1411
1412 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1413 }
1414
1415 /**
1416 * ipr_timeout - An internally generated op has timed out.
1417 * @ipr_cmd: ipr command struct
1418 *
1419 * This function blocks host requests and initiates an
1420 * adapter reset.
1421 *
1422 * Return value:
1423 * none
1424 **/
1425 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1426 {
1427 unsigned long lock_flags = 0;
1428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1429
1430 ENTER;
1431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1432
1433 ioa_cfg->errors_logged++;
1434 dev_err(&ioa_cfg->pdev->dev,
1435 "Adapter being reset due to command timeout.\n");
1436
1437 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1438 ioa_cfg->sdt_state = GET_DUMP;
1439
1440 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1441 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1442
1443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1444 LEAVE;
1445 }
1446
1447 /**
1448 * ipr_oper_timeout - Adapter timed out transitioning to operational
1449 * @ipr_cmd: ipr command struct
1450 *
1451 * This function blocks host requests and initiates an
1452 * adapter reset.
1453 *
1454 * Return value:
1455 * none
1456 **/
1457 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1458 {
1459 unsigned long lock_flags = 0;
1460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1461
1462 ENTER;
1463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1464
1465 ioa_cfg->errors_logged++;
1466 dev_err(&ioa_cfg->pdev->dev,
1467 "Adapter timed out transitioning to operational.\n");
1468
1469 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1470 ioa_cfg->sdt_state = GET_DUMP;
1471
1472 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1473 if (ipr_fastfail)
1474 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1475 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1476 }
1477
1478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1479 LEAVE;
1480 }
1481
1482 /**
1483 * ipr_reset_reload - Reset/Reload the IOA
1484 * @ioa_cfg: ioa config struct
1485 * @shutdown_type: shutdown type
1486 *
1487 * This function resets the adapter and re-initializes it.
1488 * This function assumes that all new host commands have been stopped.
1489 * Return value:
1490 * SUCCESS / FAILED
1491 **/
1492 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1493 enum ipr_shutdown_type shutdown_type)
1494 {
1495 if (!ioa_cfg->in_reset_reload)
1496 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1497
1498 spin_unlock_irq(ioa_cfg->host->host_lock);
1499 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1500 spin_lock_irq(ioa_cfg->host->host_lock);
1501
1502 /* If we got hit with a host reset while we were already resetting
1503 the adapter for some reason, and the reset failed. */
1504 if (ioa_cfg->ioa_is_dead) {
1505 ipr_trace;
1506 return FAILED;
1507 }
1508
1509 return SUCCESS;
1510 }
1511
1512 /**
1513 * ipr_find_ses_entry - Find matching SES in SES table
1514 * @res: resource entry struct of SES
1515 *
1516 * Return value:
1517 * pointer to SES table entry / NULL on failure
1518 **/
1519 static const struct ipr_ses_table_entry *
1520 ipr_find_ses_entry(struct ipr_resource_entry *res)
1521 {
1522 int i, j, matches;
1523 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1524
1525 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1526 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1527 if (ste->compare_product_id_byte[j] == 'X') {
1528 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1529 matches++;
1530 else
1531 break;
1532 } else
1533 matches++;
1534 }
1535
1536 if (matches == IPR_PROD_ID_LEN)
1537 return ste;
1538 }
1539
1540 return NULL;
1541 }
1542
1543 /**
1544 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1545 * @ioa_cfg: ioa config struct
1546 * @bus: SCSI bus
1547 * @bus_width: bus width
1548 *
1549 * Return value:
1550 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1551 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1552 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1553 * max 160MHz = max 320MB/sec).
1554 **/
1555 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1556 {
1557 struct ipr_resource_entry *res;
1558 const struct ipr_ses_table_entry *ste;
1559 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1560
1561 /* Loop through each config table entry in the config table buffer */
1562 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1563 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1564 continue;
1565
1566 if (bus != res->cfgte.res_addr.bus)
1567 continue;
1568
1569 if (!(ste = ipr_find_ses_entry(res)))
1570 continue;
1571
1572 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1573 }
1574
1575 return max_xfer_rate;
1576 }
1577
1578 /**
1579 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1580 * @ioa_cfg: ioa config struct
1581 * @max_delay: max delay in micro-seconds to wait
1582 *
1583 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1584 *
1585 * Return value:
1586 * 0 on success / other on failure
1587 **/
1588 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1589 {
1590 volatile u32 pcii_reg;
1591 int delay = 1;
1592
1593 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1594 while (delay < max_delay) {
1595 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1596
1597 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1598 return 0;
1599
1600 /* udelay cannot be used if delay is more than a few milliseconds */
1601 if ((delay / 1000) > MAX_UDELAY_MS)
1602 mdelay(delay / 1000);
1603 else
1604 udelay(delay);
1605
1606 delay += delay;
1607 }
1608 return -EIO;
1609 }
1610
1611 /**
1612 * ipr_get_ldump_data_section - Dump IOA memory
1613 * @ioa_cfg: ioa config struct
1614 * @start_addr: adapter address to dump
1615 * @dest: destination kernel buffer
1616 * @length_in_words: length to dump in 4 byte words
1617 *
1618 * Return value:
1619 * 0 on success / -EIO on failure
1620 **/
1621 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1622 u32 start_addr,
1623 __be32 *dest, u32 length_in_words)
1624 {
1625 volatile u32 temp_pcii_reg;
1626 int i, delay = 0;
1627
1628 /* Write IOA interrupt reg starting LDUMP state */
1629 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1630 ioa_cfg->regs.set_uproc_interrupt_reg);
1631
1632 /* Wait for IO debug acknowledge */
1633 if (ipr_wait_iodbg_ack(ioa_cfg,
1634 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1635 dev_err(&ioa_cfg->pdev->dev,
1636 "IOA dump long data transfer timeout\n");
1637 return -EIO;
1638 }
1639
1640 /* Signal LDUMP interlocked - clear IO debug ack */
1641 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1642 ioa_cfg->regs.clr_interrupt_reg);
1643
1644 /* Write Mailbox with starting address */
1645 writel(start_addr, ioa_cfg->ioa_mailbox);
1646
1647 /* Signal address valid - clear IOA Reset alert */
1648 writel(IPR_UPROCI_RESET_ALERT,
1649 ioa_cfg->regs.clr_uproc_interrupt_reg);
1650
1651 for (i = 0; i < length_in_words; i++) {
1652 /* Wait for IO debug acknowledge */
1653 if (ipr_wait_iodbg_ack(ioa_cfg,
1654 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1655 dev_err(&ioa_cfg->pdev->dev,
1656 "IOA dump short data transfer timeout\n");
1657 return -EIO;
1658 }
1659
1660 /* Read data from mailbox and increment destination pointer */
1661 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1662 dest++;
1663
1664 /* For all but the last word of data, signal data received */
1665 if (i < (length_in_words - 1)) {
1666 /* Signal dump data received - Clear IO debug Ack */
1667 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1668 ioa_cfg->regs.clr_interrupt_reg);
1669 }
1670 }
1671
1672 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1673 writel(IPR_UPROCI_RESET_ALERT,
1674 ioa_cfg->regs.set_uproc_interrupt_reg);
1675
1676 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1677 ioa_cfg->regs.clr_uproc_interrupt_reg);
1678
1679 /* Signal dump data received - Clear IO debug Ack */
1680 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1681 ioa_cfg->regs.clr_interrupt_reg);
1682
1683 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1684 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1685 temp_pcii_reg =
1686 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1687
1688 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1689 return 0;
1690
1691 udelay(10);
1692 delay += 10;
1693 }
1694
1695 return 0;
1696 }
1697
1698 #ifdef CONFIG_SCSI_IPR_DUMP
1699 /**
1700 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1701 * @ioa_cfg: ioa config struct
1702 * @pci_address: adapter address
1703 * @length: length of data to copy
1704 *
1705 * Copy data from PCI adapter to kernel buffer.
1706 * Note: length MUST be a 4 byte multiple
1707 * Return value:
1708 * 0 on success / other on failure
1709 **/
1710 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1711 unsigned long pci_address, u32 length)
1712 {
1713 int bytes_copied = 0;
1714 int cur_len, rc, rem_len, rem_page_len;
1715 __be32 *page;
1716 unsigned long lock_flags = 0;
1717 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1718
1719 while (bytes_copied < length &&
1720 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1721 if (ioa_dump->page_offset >= PAGE_SIZE ||
1722 ioa_dump->page_offset == 0) {
1723 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1724
1725 if (!page) {
1726 ipr_trace;
1727 return bytes_copied;
1728 }
1729
1730 ioa_dump->page_offset = 0;
1731 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1732 ioa_dump->next_page_index++;
1733 } else
1734 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1735
1736 rem_len = length - bytes_copied;
1737 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1738 cur_len = min(rem_len, rem_page_len);
1739
1740 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1741 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1742 rc = -EIO;
1743 } else {
1744 rc = ipr_get_ldump_data_section(ioa_cfg,
1745 pci_address + bytes_copied,
1746 &page[ioa_dump->page_offset / 4],
1747 (cur_len / sizeof(u32)));
1748 }
1749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1750
1751 if (!rc) {
1752 ioa_dump->page_offset += cur_len;
1753 bytes_copied += cur_len;
1754 } else {
1755 ipr_trace;
1756 break;
1757 }
1758 schedule();
1759 }
1760
1761 return bytes_copied;
1762 }
1763
1764 /**
1765 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1766 * @hdr: dump entry header struct
1767 *
1768 * Return value:
1769 * nothing
1770 **/
1771 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1772 {
1773 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1774 hdr->num_elems = 1;
1775 hdr->offset = sizeof(*hdr);
1776 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1777 }
1778
1779 /**
1780 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1781 * @ioa_cfg: ioa config struct
1782 * @driver_dump: driver dump struct
1783 *
1784 * Return value:
1785 * nothing
1786 **/
1787 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1788 struct ipr_driver_dump *driver_dump)
1789 {
1790 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1791
1792 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1793 driver_dump->ioa_type_entry.hdr.len =
1794 sizeof(struct ipr_dump_ioa_type_entry) -
1795 sizeof(struct ipr_dump_entry_header);
1796 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1797 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1798 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1799 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1800 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1801 ucode_vpd->minor_release[1];
1802 driver_dump->hdr.num_entries++;
1803 }
1804
1805 /**
1806 * ipr_dump_version_data - Fill in the driver version in the dump.
1807 * @ioa_cfg: ioa config struct
1808 * @driver_dump: driver dump struct
1809 *
1810 * Return value:
1811 * nothing
1812 **/
1813 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1814 struct ipr_driver_dump *driver_dump)
1815 {
1816 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1817 driver_dump->version_entry.hdr.len =
1818 sizeof(struct ipr_dump_version_entry) -
1819 sizeof(struct ipr_dump_entry_header);
1820 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1821 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1822 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1823 driver_dump->hdr.num_entries++;
1824 }
1825
1826 /**
1827 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1828 * @ioa_cfg: ioa config struct
1829 * @driver_dump: driver dump struct
1830 *
1831 * Return value:
1832 * nothing
1833 **/
1834 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1835 struct ipr_driver_dump *driver_dump)
1836 {
1837 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1838 driver_dump->trace_entry.hdr.len =
1839 sizeof(struct ipr_dump_trace_entry) -
1840 sizeof(struct ipr_dump_entry_header);
1841 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1842 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1843 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1844 driver_dump->hdr.num_entries++;
1845 }
1846
1847 /**
1848 * ipr_dump_location_data - Fill in the IOA location in the dump.
1849 * @ioa_cfg: ioa config struct
1850 * @driver_dump: driver dump struct
1851 *
1852 * Return value:
1853 * nothing
1854 **/
1855 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1856 struct ipr_driver_dump *driver_dump)
1857 {
1858 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1859 driver_dump->location_entry.hdr.len =
1860 sizeof(struct ipr_dump_location_entry) -
1861 sizeof(struct ipr_dump_entry_header);
1862 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1863 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1864 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1865 driver_dump->hdr.num_entries++;
1866 }
1867
1868 /**
1869 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1870 * @ioa_cfg: ioa config struct
1871 * @dump: dump struct
1872 *
1873 * Return value:
1874 * nothing
1875 **/
1876 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1877 {
1878 unsigned long start_addr, sdt_word;
1879 unsigned long lock_flags = 0;
1880 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1881 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1882 u32 num_entries, start_off, end_off;
1883 u32 bytes_to_copy, bytes_copied, rc;
1884 struct ipr_sdt *sdt;
1885 int i;
1886
1887 ENTER;
1888
1889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1890
1891 if (ioa_cfg->sdt_state != GET_DUMP) {
1892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1893 return;
1894 }
1895
1896 start_addr = readl(ioa_cfg->ioa_mailbox);
1897
1898 if (!ipr_sdt_is_fmt2(start_addr)) {
1899 dev_err(&ioa_cfg->pdev->dev,
1900 "Invalid dump table format: %lx\n", start_addr);
1901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1902 return;
1903 }
1904
1905 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1906
1907 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1908
1909 /* Initialize the overall dump header */
1910 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1911 driver_dump->hdr.num_entries = 1;
1912 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1913 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1914 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1915 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1916
1917 ipr_dump_version_data(ioa_cfg, driver_dump);
1918 ipr_dump_location_data(ioa_cfg, driver_dump);
1919 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1920 ipr_dump_trace_data(ioa_cfg, driver_dump);
1921
1922 /* Update dump_header */
1923 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1924
1925 /* IOA Dump entry */
1926 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1927 ioa_dump->format = IPR_SDT_FMT2;
1928 ioa_dump->hdr.len = 0;
1929 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1930 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1931
1932 /* First entries in sdt are actually a list of dump addresses and
1933 lengths to gather the real dump data. sdt represents the pointer
1934 to the ioa generated dump table. Dump data will be extracted based
1935 on entries in this table */
1936 sdt = &ioa_dump->sdt;
1937
1938 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1939 sizeof(struct ipr_sdt) / sizeof(__be32));
1940
1941 /* Smart Dump table is ready to use and the first entry is valid */
1942 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1943 dev_err(&ioa_cfg->pdev->dev,
1944 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1945 rc, be32_to_cpu(sdt->hdr.state));
1946 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1947 ioa_cfg->sdt_state = DUMP_OBTAINED;
1948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1949 return;
1950 }
1951
1952 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1953
1954 if (num_entries > IPR_NUM_SDT_ENTRIES)
1955 num_entries = IPR_NUM_SDT_ENTRIES;
1956
1957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1958
1959 for (i = 0; i < num_entries; i++) {
1960 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1961 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1962 break;
1963 }
1964
1965 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1966 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1967 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1968 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1969
1970 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1971 bytes_to_copy = end_off - start_off;
1972 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1973 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1974 continue;
1975 }
1976
1977 /* Copy data from adapter to driver buffers */
1978 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1979 bytes_to_copy);
1980
1981 ioa_dump->hdr.len += bytes_copied;
1982
1983 if (bytes_copied != bytes_to_copy) {
1984 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1985 break;
1986 }
1987 }
1988 }
1989 }
1990
1991 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1992
1993 /* Update dump_header */
1994 driver_dump->hdr.len += ioa_dump->hdr.len;
1995 wmb();
1996 ioa_cfg->sdt_state = DUMP_OBTAINED;
1997 LEAVE;
1998 }
1999
2000 #else
2001 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2002 #endif
2003
2004 /**
2005 * ipr_release_dump - Free adapter dump memory
2006 * @kref: kref struct
2007 *
2008 * Return value:
2009 * nothing
2010 **/
2011 static void ipr_release_dump(struct kref *kref)
2012 {
2013 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2014 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2015 unsigned long lock_flags = 0;
2016 int i;
2017
2018 ENTER;
2019 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2020 ioa_cfg->dump = NULL;
2021 ioa_cfg->sdt_state = INACTIVE;
2022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2023
2024 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2025 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2026
2027 kfree(dump);
2028 LEAVE;
2029 }
2030
2031 /**
2032 * ipr_worker_thread - Worker thread
2033 * @data: ioa config struct
2034 *
2035 * Called at task level from a work thread. This function takes care
2036 * of adding and removing device from the mid-layer as configuration
2037 * changes are detected by the adapter.
2038 *
2039 * Return value:
2040 * nothing
2041 **/
2042 static void ipr_worker_thread(void *data)
2043 {
2044 unsigned long lock_flags;
2045 struct ipr_resource_entry *res;
2046 struct scsi_device *sdev;
2047 struct ipr_dump *dump;
2048 struct ipr_ioa_cfg *ioa_cfg = data;
2049 u8 bus, target, lun;
2050 int did_work;
2051
2052 ENTER;
2053 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2054
2055 if (ioa_cfg->sdt_state == GET_DUMP) {
2056 dump = ioa_cfg->dump;
2057 if (!dump) {
2058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2059 return;
2060 }
2061 kref_get(&dump->kref);
2062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2063 ipr_get_ioa_dump(ioa_cfg, dump);
2064 kref_put(&dump->kref, ipr_release_dump);
2065
2066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2067 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2068 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2070 return;
2071 }
2072
2073 restart:
2074 do {
2075 did_work = 0;
2076 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2078 return;
2079 }
2080
2081 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2082 if (res->del_from_ml && res->sdev) {
2083 did_work = 1;
2084 sdev = res->sdev;
2085 if (!scsi_device_get(sdev)) {
2086 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2088 scsi_remove_device(sdev);
2089 scsi_device_put(sdev);
2090 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2091 }
2092 break;
2093 }
2094 }
2095 } while(did_work);
2096
2097 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2098 if (res->add_to_ml) {
2099 bus = res->cfgte.res_addr.bus;
2100 target = res->cfgte.res_addr.target;
2101 lun = res->cfgte.res_addr.lun;
2102 res->add_to_ml = 0;
2103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2104 scsi_add_device(ioa_cfg->host, bus, target, lun);
2105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2106 goto restart;
2107 }
2108 }
2109
2110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2111 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2112 LEAVE;
2113 }
2114
2115 #ifdef CONFIG_SCSI_IPR_TRACE
2116 /**
2117 * ipr_read_trace - Dump the adapter trace
2118 * @kobj: kobject struct
2119 * @buf: buffer
2120 * @off: offset
2121 * @count: buffer size
2122 *
2123 * Return value:
2124 * number of bytes printed to buffer
2125 **/
2126 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2127 loff_t off, size_t count)
2128 {
2129 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2130 struct Scsi_Host *shost = class_to_shost(cdev);
2131 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2132 unsigned long lock_flags = 0;
2133 int size = IPR_TRACE_SIZE;
2134 char *src = (char *)ioa_cfg->trace;
2135
2136 if (off > size)
2137 return 0;
2138 if (off + count > size) {
2139 size -= off;
2140 count = size;
2141 }
2142
2143 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2144 memcpy(buf, &src[off], count);
2145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2146 return count;
2147 }
2148
2149 static struct bin_attribute ipr_trace_attr = {
2150 .attr = {
2151 .name = "trace",
2152 .mode = S_IRUGO,
2153 },
2154 .size = 0,
2155 .read = ipr_read_trace,
2156 };
2157 #endif
2158
2159 static const struct {
2160 enum ipr_cache_state state;
2161 char *name;
2162 } cache_state [] = {
2163 { CACHE_NONE, "none" },
2164 { CACHE_DISABLED, "disabled" },
2165 { CACHE_ENABLED, "enabled" }
2166 };
2167
2168 /**
2169 * ipr_show_write_caching - Show the write caching attribute
2170 * @class_dev: class device struct
2171 * @buf: buffer
2172 *
2173 * Return value:
2174 * number of bytes printed to buffer
2175 **/
2176 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2177 {
2178 struct Scsi_Host *shost = class_to_shost(class_dev);
2179 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2180 unsigned long lock_flags = 0;
2181 int i, len = 0;
2182
2183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2184 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2185 if (cache_state[i].state == ioa_cfg->cache_state) {
2186 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2187 break;
2188 }
2189 }
2190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2191 return len;
2192 }
2193
2194
2195 /**
2196 * ipr_store_write_caching - Enable/disable adapter write cache
2197 * @class_dev: class_device struct
2198 * @buf: buffer
2199 * @count: buffer size
2200 *
2201 * This function will enable/disable adapter write cache.
2202 *
2203 * Return value:
2204 * count on success / other on failure
2205 **/
2206 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2207 const char *buf, size_t count)
2208 {
2209 struct Scsi_Host *shost = class_to_shost(class_dev);
2210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2211 unsigned long lock_flags = 0;
2212 enum ipr_cache_state new_state = CACHE_INVALID;
2213 int i;
2214
2215 if (!capable(CAP_SYS_ADMIN))
2216 return -EACCES;
2217 if (ioa_cfg->cache_state == CACHE_NONE)
2218 return -EINVAL;
2219
2220 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2221 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2222 new_state = cache_state[i].state;
2223 break;
2224 }
2225 }
2226
2227 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2228 return -EINVAL;
2229
2230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2231 if (ioa_cfg->cache_state == new_state) {
2232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2233 return count;
2234 }
2235
2236 ioa_cfg->cache_state = new_state;
2237 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2238 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2239 if (!ioa_cfg->in_reset_reload)
2240 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2242 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2243
2244 return count;
2245 }
2246
2247 static struct class_device_attribute ipr_ioa_cache_attr = {
2248 .attr = {
2249 .name = "write_cache",
2250 .mode = S_IRUGO | S_IWUSR,
2251 },
2252 .show = ipr_show_write_caching,
2253 .store = ipr_store_write_caching
2254 };
2255
2256 /**
2257 * ipr_show_fw_version - Show the firmware version
2258 * @class_dev: class device struct
2259 * @buf: buffer
2260 *
2261 * Return value:
2262 * number of bytes printed to buffer
2263 **/
2264 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2265 {
2266 struct Scsi_Host *shost = class_to_shost(class_dev);
2267 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2268 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2269 unsigned long lock_flags = 0;
2270 int len;
2271
2272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2273 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2274 ucode_vpd->major_release, ucode_vpd->card_type,
2275 ucode_vpd->minor_release[0],
2276 ucode_vpd->minor_release[1]);
2277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2278 return len;
2279 }
2280
2281 static struct class_device_attribute ipr_fw_version_attr = {
2282 .attr = {
2283 .name = "fw_version",
2284 .mode = S_IRUGO,
2285 },
2286 .show = ipr_show_fw_version,
2287 };
2288
2289 /**
2290 * ipr_show_log_level - Show the adapter's error logging level
2291 * @class_dev: class device struct
2292 * @buf: buffer
2293 *
2294 * Return value:
2295 * number of bytes printed to buffer
2296 **/
2297 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2298 {
2299 struct Scsi_Host *shost = class_to_shost(class_dev);
2300 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2301 unsigned long lock_flags = 0;
2302 int len;
2303
2304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2305 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2307 return len;
2308 }
2309
2310 /**
2311 * ipr_store_log_level - Change the adapter's error logging level
2312 * @class_dev: class device struct
2313 * @buf: buffer
2314 *
2315 * Return value:
2316 * number of bytes printed to buffer
2317 **/
2318 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2319 const char *buf, size_t count)
2320 {
2321 struct Scsi_Host *shost = class_to_shost(class_dev);
2322 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323 unsigned long lock_flags = 0;
2324
2325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2326 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2328 return strlen(buf);
2329 }
2330
2331 static struct class_device_attribute ipr_log_level_attr = {
2332 .attr = {
2333 .name = "log_level",
2334 .mode = S_IRUGO | S_IWUSR,
2335 },
2336 .show = ipr_show_log_level,
2337 .store = ipr_store_log_level
2338 };
2339
2340 /**
2341 * ipr_store_diagnostics - IOA Diagnostics interface
2342 * @class_dev: class_device struct
2343 * @buf: buffer
2344 * @count: buffer size
2345 *
2346 * This function will reset the adapter and wait a reasonable
2347 * amount of time for any errors that the adapter might log.
2348 *
2349 * Return value:
2350 * count on success / other on failure
2351 **/
2352 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2353 const char *buf, size_t count)
2354 {
2355 struct Scsi_Host *shost = class_to_shost(class_dev);
2356 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2357 unsigned long lock_flags = 0;
2358 int rc = count;
2359
2360 if (!capable(CAP_SYS_ADMIN))
2361 return -EACCES;
2362
2363 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2364 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2365 ioa_cfg->errors_logged = 0;
2366 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2367
2368 if (ioa_cfg->in_reset_reload) {
2369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2370 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2371
2372 /* Wait for a second for any errors to be logged */
2373 msleep(1000);
2374 } else {
2375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2376 return -EIO;
2377 }
2378
2379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2380 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2381 rc = -EIO;
2382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383
2384 return rc;
2385 }
2386
2387 static struct class_device_attribute ipr_diagnostics_attr = {
2388 .attr = {
2389 .name = "run_diagnostics",
2390 .mode = S_IWUSR,
2391 },
2392 .store = ipr_store_diagnostics
2393 };
2394
2395 /**
2396 * ipr_show_adapter_state - Show the adapter's state
2397 * @class_dev: class device struct
2398 * @buf: buffer
2399 *
2400 * Return value:
2401 * number of bytes printed to buffer
2402 **/
2403 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2404 {
2405 struct Scsi_Host *shost = class_to_shost(class_dev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int len;
2409
2410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2411 if (ioa_cfg->ioa_is_dead)
2412 len = snprintf(buf, PAGE_SIZE, "offline\n");
2413 else
2414 len = snprintf(buf, PAGE_SIZE, "online\n");
2415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2416 return len;
2417 }
2418
2419 /**
2420 * ipr_store_adapter_state - Change adapter state
2421 * @class_dev: class_device struct
2422 * @buf: buffer
2423 * @count: buffer size
2424 *
2425 * This function will change the adapter's state.
2426 *
2427 * Return value:
2428 * count on success / other on failure
2429 **/
2430 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2431 const char *buf, size_t count)
2432 {
2433 struct Scsi_Host *shost = class_to_shost(class_dev);
2434 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2435 unsigned long lock_flags;
2436 int result = count;
2437
2438 if (!capable(CAP_SYS_ADMIN))
2439 return -EACCES;
2440
2441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2442 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2443 ioa_cfg->ioa_is_dead = 0;
2444 ioa_cfg->reset_retries = 0;
2445 ioa_cfg->in_ioa_bringdown = 0;
2446 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2447 }
2448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2449 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2450
2451 return result;
2452 }
2453
2454 static struct class_device_attribute ipr_ioa_state_attr = {
2455 .attr = {
2456 .name = "state",
2457 .mode = S_IRUGO | S_IWUSR,
2458 },
2459 .show = ipr_show_adapter_state,
2460 .store = ipr_store_adapter_state
2461 };
2462
2463 /**
2464 * ipr_store_reset_adapter - Reset the adapter
2465 * @class_dev: class_device struct
2466 * @buf: buffer
2467 * @count: buffer size
2468 *
2469 * This function will reset the adapter.
2470 *
2471 * Return value:
2472 * count on success / other on failure
2473 **/
2474 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2475 const char *buf, size_t count)
2476 {
2477 struct Scsi_Host *shost = class_to_shost(class_dev);
2478 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2479 unsigned long lock_flags;
2480 int result = count;
2481
2482 if (!capable(CAP_SYS_ADMIN))
2483 return -EACCES;
2484
2485 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2486 if (!ioa_cfg->in_reset_reload)
2487 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2490
2491 return result;
2492 }
2493
2494 static struct class_device_attribute ipr_ioa_reset_attr = {
2495 .attr = {
2496 .name = "reset_host",
2497 .mode = S_IWUSR,
2498 },
2499 .store = ipr_store_reset_adapter
2500 };
2501
2502 /**
2503 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2504 * @buf_len: buffer length
2505 *
2506 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2507 * list to use for microcode download
2508 *
2509 * Return value:
2510 * pointer to sglist / NULL on failure
2511 **/
2512 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2513 {
2514 int sg_size, order, bsize_elem, num_elem, i, j;
2515 struct ipr_sglist *sglist;
2516 struct scatterlist *scatterlist;
2517 struct page *page;
2518
2519 /* Get the minimum size per scatter/gather element */
2520 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2521
2522 /* Get the actual size per element */
2523 order = get_order(sg_size);
2524
2525 /* Determine the actual number of bytes per element */
2526 bsize_elem = PAGE_SIZE * (1 << order);
2527
2528 /* Determine the actual number of sg entries needed */
2529 if (buf_len % bsize_elem)
2530 num_elem = (buf_len / bsize_elem) + 1;
2531 else
2532 num_elem = buf_len / bsize_elem;
2533
2534 /* Allocate a scatter/gather list for the DMA */
2535 sglist = kzalloc(sizeof(struct ipr_sglist) +
2536 (sizeof(struct scatterlist) * (num_elem - 1)),
2537 GFP_KERNEL);
2538
2539 if (sglist == NULL) {
2540 ipr_trace;
2541 return NULL;
2542 }
2543
2544 scatterlist = sglist->scatterlist;
2545
2546 sglist->order = order;
2547 sglist->num_sg = num_elem;
2548
2549 /* Allocate a bunch of sg elements */
2550 for (i = 0; i < num_elem; i++) {
2551 page = alloc_pages(GFP_KERNEL, order);
2552 if (!page) {
2553 ipr_trace;
2554
2555 /* Free up what we already allocated */
2556 for (j = i - 1; j >= 0; j--)
2557 __free_pages(scatterlist[j].page, order);
2558 kfree(sglist);
2559 return NULL;
2560 }
2561
2562 scatterlist[i].page = page;
2563 }
2564
2565 return sglist;
2566 }
2567
2568 /**
2569 * ipr_free_ucode_buffer - Frees a microcode download buffer
2570 * @p_dnld: scatter/gather list pointer
2571 *
2572 * Free a DMA'able ucode download buffer previously allocated with
2573 * ipr_alloc_ucode_buffer
2574 *
2575 * Return value:
2576 * nothing
2577 **/
2578 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2579 {
2580 int i;
2581
2582 for (i = 0; i < sglist->num_sg; i++)
2583 __free_pages(sglist->scatterlist[i].page, sglist->order);
2584
2585 kfree(sglist);
2586 }
2587
2588 /**
2589 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2590 * @sglist: scatter/gather list pointer
2591 * @buffer: buffer pointer
2592 * @len: buffer length
2593 *
2594 * Copy a microcode image from a user buffer into a buffer allocated by
2595 * ipr_alloc_ucode_buffer
2596 *
2597 * Return value:
2598 * 0 on success / other on failure
2599 **/
2600 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2601 u8 *buffer, u32 len)
2602 {
2603 int bsize_elem, i, result = 0;
2604 struct scatterlist *scatterlist;
2605 void *kaddr;
2606
2607 /* Determine the actual number of bytes per element */
2608 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2609
2610 scatterlist = sglist->scatterlist;
2611
2612 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2613 kaddr = kmap(scatterlist[i].page);
2614 memcpy(kaddr, buffer, bsize_elem);
2615 kunmap(scatterlist[i].page);
2616
2617 scatterlist[i].length = bsize_elem;
2618
2619 if (result != 0) {
2620 ipr_trace;
2621 return result;
2622 }
2623 }
2624
2625 if (len % bsize_elem) {
2626 kaddr = kmap(scatterlist[i].page);
2627 memcpy(kaddr, buffer, len % bsize_elem);
2628 kunmap(scatterlist[i].page);
2629
2630 scatterlist[i].length = len % bsize_elem;
2631 }
2632
2633 sglist->buffer_len = len;
2634 return result;
2635 }
2636
2637 /**
2638 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2639 * @ipr_cmd: ipr command struct
2640 * @sglist: scatter/gather list
2641 *
2642 * Builds a microcode download IOA data list (IOADL).
2643 *
2644 **/
2645 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2646 struct ipr_sglist *sglist)
2647 {
2648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2649 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2650 struct scatterlist *scatterlist = sglist->scatterlist;
2651 int i;
2652
2653 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2655 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2656 ioarcb->write_ioadl_len =
2657 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2658
2659 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2660 ioadl[i].flags_and_data_len =
2661 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2662 ioadl[i].address =
2663 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2664 }
2665
2666 ioadl[i-1].flags_and_data_len |=
2667 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2668 }
2669
2670 /**
2671 * ipr_update_ioa_ucode - Update IOA's microcode
2672 * @ioa_cfg: ioa config struct
2673 * @sglist: scatter/gather list
2674 *
2675 * Initiate an adapter reset to update the IOA's microcode
2676 *
2677 * Return value:
2678 * 0 on success / -EIO on failure
2679 **/
2680 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2681 struct ipr_sglist *sglist)
2682 {
2683 unsigned long lock_flags;
2684
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686
2687 if (ioa_cfg->ucode_sglist) {
2688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2689 dev_err(&ioa_cfg->pdev->dev,
2690 "Microcode download already in progress\n");
2691 return -EIO;
2692 }
2693
2694 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2695 sglist->num_sg, DMA_TO_DEVICE);
2696
2697 if (!sglist->num_dma_sg) {
2698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2699 dev_err(&ioa_cfg->pdev->dev,
2700 "Failed to map microcode download buffer!\n");
2701 return -EIO;
2702 }
2703
2704 ioa_cfg->ucode_sglist = sglist;
2705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2707 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2708
2709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2710 ioa_cfg->ucode_sglist = NULL;
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 return 0;
2713 }
2714
2715 /**
2716 * ipr_store_update_fw - Update the firmware on the adapter
2717 * @class_dev: class_device struct
2718 * @buf: buffer
2719 * @count: buffer size
2720 *
2721 * This function will update the firmware on the adapter.
2722 *
2723 * Return value:
2724 * count on success / other on failure
2725 **/
2726 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2727 const char *buf, size_t count)
2728 {
2729 struct Scsi_Host *shost = class_to_shost(class_dev);
2730 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2731 struct ipr_ucode_image_header *image_hdr;
2732 const struct firmware *fw_entry;
2733 struct ipr_sglist *sglist;
2734 char fname[100];
2735 char *src;
2736 int len, result, dnld_size;
2737
2738 if (!capable(CAP_SYS_ADMIN))
2739 return -EACCES;
2740
2741 len = snprintf(fname, 99, "%s", buf);
2742 fname[len-1] = '\0';
2743
2744 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2745 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2746 return -EIO;
2747 }
2748
2749 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2750
2751 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2752 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2753 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2754 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2755 release_firmware(fw_entry);
2756 return -EINVAL;
2757 }
2758
2759 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2760 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2761 sglist = ipr_alloc_ucode_buffer(dnld_size);
2762
2763 if (!sglist) {
2764 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2765 release_firmware(fw_entry);
2766 return -ENOMEM;
2767 }
2768
2769 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2770
2771 if (result) {
2772 dev_err(&ioa_cfg->pdev->dev,
2773 "Microcode buffer copy to DMA buffer failed\n");
2774 goto out;
2775 }
2776
2777 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2778
2779 if (!result)
2780 result = count;
2781 out:
2782 ipr_free_ucode_buffer(sglist);
2783 release_firmware(fw_entry);
2784 return result;
2785 }
2786
2787 static struct class_device_attribute ipr_update_fw_attr = {
2788 .attr = {
2789 .name = "update_fw",
2790 .mode = S_IWUSR,
2791 },
2792 .store = ipr_store_update_fw
2793 };
2794
2795 static struct class_device_attribute *ipr_ioa_attrs[] = {
2796 &ipr_fw_version_attr,
2797 &ipr_log_level_attr,
2798 &ipr_diagnostics_attr,
2799 &ipr_ioa_state_attr,
2800 &ipr_ioa_reset_attr,
2801 &ipr_update_fw_attr,
2802 &ipr_ioa_cache_attr,
2803 NULL,
2804 };
2805
2806 #ifdef CONFIG_SCSI_IPR_DUMP
2807 /**
2808 * ipr_read_dump - Dump the adapter
2809 * @kobj: kobject struct
2810 * @buf: buffer
2811 * @off: offset
2812 * @count: buffer size
2813 *
2814 * Return value:
2815 * number of bytes printed to buffer
2816 **/
2817 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2818 loff_t off, size_t count)
2819 {
2820 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2821 struct Scsi_Host *shost = class_to_shost(cdev);
2822 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2823 struct ipr_dump *dump;
2824 unsigned long lock_flags = 0;
2825 char *src;
2826 int len;
2827 size_t rc = count;
2828
2829 if (!capable(CAP_SYS_ADMIN))
2830 return -EACCES;
2831
2832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2833 dump = ioa_cfg->dump;
2834
2835 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2837 return 0;
2838 }
2839 kref_get(&dump->kref);
2840 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2841
2842 if (off > dump->driver_dump.hdr.len) {
2843 kref_put(&dump->kref, ipr_release_dump);
2844 return 0;
2845 }
2846
2847 if (off + count > dump->driver_dump.hdr.len) {
2848 count = dump->driver_dump.hdr.len - off;
2849 rc = count;
2850 }
2851
2852 if (count && off < sizeof(dump->driver_dump)) {
2853 if (off + count > sizeof(dump->driver_dump))
2854 len = sizeof(dump->driver_dump) - off;
2855 else
2856 len = count;
2857 src = (u8 *)&dump->driver_dump + off;
2858 memcpy(buf, src, len);
2859 buf += len;
2860 off += len;
2861 count -= len;
2862 }
2863
2864 off -= sizeof(dump->driver_dump);
2865
2866 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2867 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2868 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2869 else
2870 len = count;
2871 src = (u8 *)&dump->ioa_dump + off;
2872 memcpy(buf, src, len);
2873 buf += len;
2874 off += len;
2875 count -= len;
2876 }
2877
2878 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2879
2880 while (count) {
2881 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2882 len = PAGE_ALIGN(off) - off;
2883 else
2884 len = count;
2885 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2886 src += off & ~PAGE_MASK;
2887 memcpy(buf, src, len);
2888 buf += len;
2889 off += len;
2890 count -= len;
2891 }
2892
2893 kref_put(&dump->kref, ipr_release_dump);
2894 return rc;
2895 }
2896
2897 /**
2898 * ipr_alloc_dump - Prepare for adapter dump
2899 * @ioa_cfg: ioa config struct
2900 *
2901 * Return value:
2902 * 0 on success / other on failure
2903 **/
2904 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2905 {
2906 struct ipr_dump *dump;
2907 unsigned long lock_flags = 0;
2908
2909 ENTER;
2910 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2911
2912 if (!dump) {
2913 ipr_err("Dump memory allocation failed\n");
2914 return -ENOMEM;
2915 }
2916
2917 kref_init(&dump->kref);
2918 dump->ioa_cfg = ioa_cfg;
2919
2920 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2921
2922 if (INACTIVE != ioa_cfg->sdt_state) {
2923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2924 kfree(dump);
2925 return 0;
2926 }
2927
2928 ioa_cfg->dump = dump;
2929 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2930 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2931 ioa_cfg->dump_taken = 1;
2932 schedule_work(&ioa_cfg->work_q);
2933 }
2934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2935
2936 LEAVE;
2937 return 0;
2938 }
2939
2940 /**
2941 * ipr_free_dump - Free adapter dump memory
2942 * @ioa_cfg: ioa config struct
2943 *
2944 * Return value:
2945 * 0 on success / other on failure
2946 **/
2947 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2948 {
2949 struct ipr_dump *dump;
2950 unsigned long lock_flags = 0;
2951
2952 ENTER;
2953
2954 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2955 dump = ioa_cfg->dump;
2956 if (!dump) {
2957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2958 return 0;
2959 }
2960
2961 ioa_cfg->dump = NULL;
2962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2963
2964 kref_put(&dump->kref, ipr_release_dump);
2965
2966 LEAVE;
2967 return 0;
2968 }
2969
2970 /**
2971 * ipr_write_dump - Setup dump state of adapter
2972 * @kobj: kobject struct
2973 * @buf: buffer
2974 * @off: offset
2975 * @count: buffer size
2976 *
2977 * Return value:
2978 * number of bytes printed to buffer
2979 **/
2980 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2981 loff_t off, size_t count)
2982 {
2983 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2984 struct Scsi_Host *shost = class_to_shost(cdev);
2985 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2986 int rc;
2987
2988 if (!capable(CAP_SYS_ADMIN))
2989 return -EACCES;
2990
2991 if (buf[0] == '1')
2992 rc = ipr_alloc_dump(ioa_cfg);
2993 else if (buf[0] == '0')
2994 rc = ipr_free_dump(ioa_cfg);
2995 else
2996 return -EINVAL;
2997
2998 if (rc)
2999 return rc;
3000 else
3001 return count;
3002 }
3003
3004 static struct bin_attribute ipr_dump_attr = {
3005 .attr = {
3006 .name = "dump",
3007 .mode = S_IRUSR | S_IWUSR,
3008 },
3009 .size = 0,
3010 .read = ipr_read_dump,
3011 .write = ipr_write_dump
3012 };
3013 #else
3014 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3015 #endif
3016
3017 /**
3018 * ipr_change_queue_depth - Change the device's queue depth
3019 * @sdev: scsi device struct
3020 * @qdepth: depth to set
3021 *
3022 * Return value:
3023 * actual depth set
3024 **/
3025 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3026 {
3027 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3028 return sdev->queue_depth;
3029 }
3030
3031 /**
3032 * ipr_change_queue_type - Change the device's queue type
3033 * @dsev: scsi device struct
3034 * @tag_type: type of tags to use
3035 *
3036 * Return value:
3037 * actual queue type set
3038 **/
3039 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3040 {
3041 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3042 struct ipr_resource_entry *res;
3043 unsigned long lock_flags = 0;
3044
3045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3046 res = (struct ipr_resource_entry *)sdev->hostdata;
3047
3048 if (res) {
3049 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3050 /*
3051 * We don't bother quiescing the device here since the
3052 * adapter firmware does it for us.
3053 */
3054 scsi_set_tag_type(sdev, tag_type);
3055
3056 if (tag_type)
3057 scsi_activate_tcq(sdev, sdev->queue_depth);
3058 else
3059 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3060 } else
3061 tag_type = 0;
3062 } else
3063 tag_type = 0;
3064
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 return tag_type;
3067 }
3068
3069 /**
3070 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3071 * @dev: device struct
3072 * @buf: buffer
3073 *
3074 * Return value:
3075 * number of bytes printed to buffer
3076 **/
3077 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3078 {
3079 struct scsi_device *sdev = to_scsi_device(dev);
3080 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3081 struct ipr_resource_entry *res;
3082 unsigned long lock_flags = 0;
3083 ssize_t len = -ENXIO;
3084
3085 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3086 res = (struct ipr_resource_entry *)sdev->hostdata;
3087 if (res)
3088 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 return len;
3091 }
3092
3093 static struct device_attribute ipr_adapter_handle_attr = {
3094 .attr = {
3095 .name = "adapter_handle",
3096 .mode = S_IRUSR,
3097 },
3098 .show = ipr_show_adapter_handle
3099 };
3100
3101 static struct device_attribute *ipr_dev_attrs[] = {
3102 &ipr_adapter_handle_attr,
3103 NULL,
3104 };
3105
3106 /**
3107 * ipr_biosparam - Return the HSC mapping
3108 * @sdev: scsi device struct
3109 * @block_device: block device pointer
3110 * @capacity: capacity of the device
3111 * @parm: Array containing returned HSC values.
3112 *
3113 * This function generates the HSC parms that fdisk uses.
3114 * We want to make sure we return something that places partitions
3115 * on 4k boundaries for best performance with the IOA.
3116 *
3117 * Return value:
3118 * 0 on success
3119 **/
3120 static int ipr_biosparam(struct scsi_device *sdev,
3121 struct block_device *block_device,
3122 sector_t capacity, int *parm)
3123 {
3124 int heads, sectors;
3125 sector_t cylinders;
3126
3127 heads = 128;
3128 sectors = 32;
3129
3130 cylinders = capacity;
3131 sector_div(cylinders, (128 * 32));
3132
3133 /* return result */
3134 parm[0] = heads;
3135 parm[1] = sectors;
3136 parm[2] = cylinders;
3137
3138 return 0;
3139 }
3140
3141 /**
3142 * ipr_slave_destroy - Unconfigure a SCSI device
3143 * @sdev: scsi device struct
3144 *
3145 * Return value:
3146 * nothing
3147 **/
3148 static void ipr_slave_destroy(struct scsi_device *sdev)
3149 {
3150 struct ipr_resource_entry *res;
3151 struct ipr_ioa_cfg *ioa_cfg;
3152 unsigned long lock_flags = 0;
3153
3154 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3155
3156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3157 res = (struct ipr_resource_entry *) sdev->hostdata;
3158 if (res) {
3159 sdev->hostdata = NULL;
3160 res->sdev = NULL;
3161 }
3162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163 }
3164
3165 /**
3166 * ipr_slave_configure - Configure a SCSI device
3167 * @sdev: scsi device struct
3168 *
3169 * This function configures the specified scsi device.
3170 *
3171 * Return value:
3172 * 0 on success
3173 **/
3174 static int ipr_slave_configure(struct scsi_device *sdev)
3175 {
3176 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3177 struct ipr_resource_entry *res;
3178 unsigned long lock_flags = 0;
3179
3180 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3181 res = sdev->hostdata;
3182 if (res) {
3183 if (ipr_is_af_dasd_device(res))
3184 sdev->type = TYPE_RAID;
3185 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3186 sdev->scsi_level = 4;
3187 sdev->no_uld_attach = 1;
3188 }
3189 if (ipr_is_vset_device(res)) {
3190 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3191 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3192 }
3193 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3194 sdev->allow_restart = 1;
3195 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3196 }
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3198 return 0;
3199 }
3200
3201 /**
3202 * ipr_slave_alloc - Prepare for commands to a device.
3203 * @sdev: scsi device struct
3204 *
3205 * This function saves a pointer to the resource entry
3206 * in the scsi device struct if the device exists. We
3207 * can then use this pointer in ipr_queuecommand when
3208 * handling new commands.
3209 *
3210 * Return value:
3211 * 0 on success / -ENXIO if device does not exist
3212 **/
3213 static int ipr_slave_alloc(struct scsi_device *sdev)
3214 {
3215 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3216 struct ipr_resource_entry *res;
3217 unsigned long lock_flags;
3218 int rc = -ENXIO;
3219
3220 sdev->hostdata = NULL;
3221
3222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3223
3224 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3225 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3226 (res->cfgte.res_addr.target == sdev->id) &&
3227 (res->cfgte.res_addr.lun == sdev->lun)) {
3228 res->sdev = sdev;
3229 res->add_to_ml = 0;
3230 res->in_erp = 0;
3231 sdev->hostdata = res;
3232 if (!ipr_is_naca_model(res))
3233 res->needs_sync_complete = 1;
3234 rc = 0;
3235 break;
3236 }
3237 }
3238
3239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3240
3241 return rc;
3242 }
3243
3244 /**
3245 * ipr_eh_host_reset - Reset the host adapter
3246 * @scsi_cmd: scsi command struct
3247 *
3248 * Return value:
3249 * SUCCESS / FAILED
3250 **/
3251 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3252 {
3253 struct ipr_ioa_cfg *ioa_cfg;
3254 int rc;
3255
3256 ENTER;
3257 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3258
3259 dev_err(&ioa_cfg->pdev->dev,
3260 "Adapter being reset as a result of error recovery.\n");
3261
3262 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3263 ioa_cfg->sdt_state = GET_DUMP;
3264
3265 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3266
3267 LEAVE;
3268 return rc;
3269 }
3270
3271 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3272 {
3273 int rc;
3274
3275 spin_lock_irq(cmd->device->host->host_lock);
3276 rc = __ipr_eh_host_reset(cmd);
3277 spin_unlock_irq(cmd->device->host->host_lock);
3278
3279 return rc;
3280 }
3281
3282 /**
3283 * ipr_device_reset - Reset the device
3284 * @ioa_cfg: ioa config struct
3285 * @res: resource entry struct
3286 *
3287 * This function issues a device reset to the affected device.
3288 * If the device is a SCSI device, a LUN reset will be sent
3289 * to the device first. If that does not work, a target reset
3290 * will be sent.
3291 *
3292 * Return value:
3293 * 0 on success / non-zero on failure
3294 **/
3295 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3296 struct ipr_resource_entry *res)
3297 {
3298 struct ipr_cmnd *ipr_cmd;
3299 struct ipr_ioarcb *ioarcb;
3300 struct ipr_cmd_pkt *cmd_pkt;
3301 u32 ioasc;
3302
3303 ENTER;
3304 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3305 ioarcb = &ipr_cmd->ioarcb;
3306 cmd_pkt = &ioarcb->cmd_pkt;
3307
3308 ioarcb->res_handle = res->cfgte.res_handle;
3309 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3310 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3311
3312 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3313 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3314 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3315
3316 LEAVE;
3317 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3318 }
3319
3320 /**
3321 * ipr_eh_dev_reset - Reset the device
3322 * @scsi_cmd: scsi command struct
3323 *
3324 * This function issues a device reset to the affected device.
3325 * A LUN reset will be sent to the device first. If that does
3326 * not work, a target reset will be sent.
3327 *
3328 * Return value:
3329 * SUCCESS / FAILED
3330 **/
3331 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3332 {
3333 struct ipr_cmnd *ipr_cmd;
3334 struct ipr_ioa_cfg *ioa_cfg;
3335 struct ipr_resource_entry *res;
3336 int rc;
3337
3338 ENTER;
3339 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3340 res = scsi_cmd->device->hostdata;
3341
3342 if (!res)
3343 return FAILED;
3344
3345 /*
3346 * If we are currently going through reset/reload, return failed. This will force the
3347 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3348 * reset to complete
3349 */
3350 if (ioa_cfg->in_reset_reload)
3351 return FAILED;
3352 if (ioa_cfg->ioa_is_dead)
3353 return FAILED;
3354
3355 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3356 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3357 if (ipr_cmd->scsi_cmd)
3358 ipr_cmd->done = ipr_scsi_eh_done;
3359 }
3360 }
3361
3362 res->resetting_device = 1;
3363 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3364 rc = ipr_device_reset(ioa_cfg, res);
3365 res->resetting_device = 0;
3366
3367 LEAVE;
3368 return (rc ? FAILED : SUCCESS);
3369 }
3370
3371 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3372 {
3373 int rc;
3374
3375 spin_lock_irq(cmd->device->host->host_lock);
3376 rc = __ipr_eh_dev_reset(cmd);
3377 spin_unlock_irq(cmd->device->host->host_lock);
3378
3379 return rc;
3380 }
3381
3382 /**
3383 * ipr_bus_reset_done - Op done function for bus reset.
3384 * @ipr_cmd: ipr command struct
3385 *
3386 * This function is the op done function for a bus reset
3387 *
3388 * Return value:
3389 * none
3390 **/
3391 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3392 {
3393 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3394 struct ipr_resource_entry *res;
3395
3396 ENTER;
3397 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3399 sizeof(res->cfgte.res_handle))) {
3400 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3401 break;
3402 }
3403 }
3404
3405 /*
3406 * If abort has not completed, indicate the reset has, else call the
3407 * abort's done function to wake the sleeping eh thread
3408 */
3409 if (ipr_cmd->sibling->sibling)
3410 ipr_cmd->sibling->sibling = NULL;
3411 else
3412 ipr_cmd->sibling->done(ipr_cmd->sibling);
3413
3414 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3415 LEAVE;
3416 }
3417
3418 /**
3419 * ipr_abort_timeout - An abort task has timed out
3420 * @ipr_cmd: ipr command struct
3421 *
3422 * This function handles when an abort task times out. If this
3423 * happens we issue a bus reset since we have resources tied
3424 * up that must be freed before returning to the midlayer.
3425 *
3426 * Return value:
3427 * none
3428 **/
3429 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3430 {
3431 struct ipr_cmnd *reset_cmd;
3432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3433 struct ipr_cmd_pkt *cmd_pkt;
3434 unsigned long lock_flags = 0;
3435
3436 ENTER;
3437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3438 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 return;
3441 }
3442
3443 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3444 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3445 ipr_cmd->sibling = reset_cmd;
3446 reset_cmd->sibling = ipr_cmd;
3447 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3448 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3449 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3450 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3451 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3452
3453 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 LEAVE;
3456 }
3457
3458 /**
3459 * ipr_cancel_op - Cancel specified op
3460 * @scsi_cmd: scsi command struct
3461 *
3462 * This function cancels specified op.
3463 *
3464 * Return value:
3465 * SUCCESS / FAILED
3466 **/
3467 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3468 {
3469 struct ipr_cmnd *ipr_cmd;
3470 struct ipr_ioa_cfg *ioa_cfg;
3471 struct ipr_resource_entry *res;
3472 struct ipr_cmd_pkt *cmd_pkt;
3473 u32 ioasc;
3474 int op_found = 0;
3475
3476 ENTER;
3477 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3478 res = scsi_cmd->device->hostdata;
3479
3480 /* If we are currently going through reset/reload, return failed.
3481 * This will force the mid-layer to call ipr_eh_host_reset,
3482 * which will then go to sleep and wait for the reset to complete
3483 */
3484 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3485 return FAILED;
3486 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3487 return FAILED;
3488
3489 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3490 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3491 ipr_cmd->done = ipr_scsi_eh_done;
3492 op_found = 1;
3493 break;
3494 }
3495 }
3496
3497 if (!op_found)
3498 return SUCCESS;
3499
3500 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3501 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3502 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3503 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3504 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3505 ipr_cmd->u.sdev = scsi_cmd->device;
3506
3507 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3508 scsi_cmd->cmnd[0]);
3509 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3510 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3511
3512 /*
3513 * If the abort task timed out and we sent a bus reset, we will get
3514 * one the following responses to the abort
3515 */
3516 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3517 ioasc = 0;
3518 ipr_trace;
3519 }
3520
3521 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3522 if (!ipr_is_naca_model(res))
3523 res->needs_sync_complete = 1;
3524
3525 LEAVE;
3526 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3527 }
3528
3529 /**
3530 * ipr_eh_abort - Abort a single op
3531 * @scsi_cmd: scsi command struct
3532 *
3533 * Return value:
3534 * SUCCESS / FAILED
3535 **/
3536 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3537 {
3538 unsigned long flags;
3539 int rc;
3540
3541 ENTER;
3542
3543 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3544 rc = ipr_cancel_op(scsi_cmd);
3545 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3546
3547 LEAVE;
3548 return rc;
3549 }
3550
3551 /**
3552 * ipr_handle_other_interrupt - Handle "other" interrupts
3553 * @ioa_cfg: ioa config struct
3554 * @int_reg: interrupt register
3555 *
3556 * Return value:
3557 * IRQ_NONE / IRQ_HANDLED
3558 **/
3559 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3560 volatile u32 int_reg)
3561 {
3562 irqreturn_t rc = IRQ_HANDLED;
3563
3564 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3565 /* Mask the interrupt */
3566 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3567
3568 /* Clear the interrupt */
3569 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3570 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3571
3572 list_del(&ioa_cfg->reset_cmd->queue);
3573 del_timer(&ioa_cfg->reset_cmd->timer);
3574 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3575 } else {
3576 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3577 ioa_cfg->ioa_unit_checked = 1;
3578 else
3579 dev_err(&ioa_cfg->pdev->dev,
3580 "Permanent IOA failure. 0x%08X\n", int_reg);
3581
3582 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3583 ioa_cfg->sdt_state = GET_DUMP;
3584
3585 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3586 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3587 }
3588
3589 return rc;
3590 }
3591
3592 /**
3593 * ipr_isr - Interrupt service routine
3594 * @irq: irq number
3595 * @devp: pointer to ioa config struct
3596 * @regs: pt_regs struct
3597 *
3598 * Return value:
3599 * IRQ_NONE / IRQ_HANDLED
3600 **/
3601 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3602 {
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3604 unsigned long lock_flags = 0;
3605 volatile u32 int_reg, int_mask_reg;
3606 u32 ioasc;
3607 u16 cmd_index;
3608 struct ipr_cmnd *ipr_cmd;
3609 irqreturn_t rc = IRQ_NONE;
3610
3611 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3612
3613 /* If interrupts are disabled, ignore the interrupt */
3614 if (!ioa_cfg->allow_interrupts) {
3615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616 return IRQ_NONE;
3617 }
3618
3619 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3620 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3621
3622 /* If an interrupt on the adapter did not occur, ignore it */
3623 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3624 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3625 return IRQ_NONE;
3626 }
3627
3628 while (1) {
3629 ipr_cmd = NULL;
3630
3631 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3632 ioa_cfg->toggle_bit) {
3633
3634 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3635 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3636
3637 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3638 ioa_cfg->errors_logged++;
3639 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3640
3641 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3642 ioa_cfg->sdt_state = GET_DUMP;
3643
3644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 return IRQ_HANDLED;
3647 }
3648
3649 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3650
3651 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3652
3653 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3654
3655 list_del(&ipr_cmd->queue);
3656 del_timer(&ipr_cmd->timer);
3657 ipr_cmd->done(ipr_cmd);
3658
3659 rc = IRQ_HANDLED;
3660
3661 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3662 ioa_cfg->hrrq_curr++;
3663 } else {
3664 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3665 ioa_cfg->toggle_bit ^= 1u;
3666 }
3667 }
3668
3669 if (ipr_cmd != NULL) {
3670 /* Clear the PCI interrupt */
3671 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3672 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3673 } else
3674 break;
3675 }
3676
3677 if (unlikely(rc == IRQ_NONE))
3678 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3679
3680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3681 return rc;
3682 }
3683
3684 /**
3685 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3686 * @ioa_cfg: ioa config struct
3687 * @ipr_cmd: ipr command struct
3688 *
3689 * Return value:
3690 * 0 on success / -1 on failure
3691 **/
3692 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3693 struct ipr_cmnd *ipr_cmd)
3694 {
3695 int i;
3696 struct scatterlist *sglist;
3697 u32 length;
3698 u32 ioadl_flags = 0;
3699 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3700 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3701 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3702
3703 length = scsi_cmd->request_bufflen;
3704
3705 if (length == 0)
3706 return 0;
3707
3708 if (scsi_cmd->use_sg) {
3709 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3710 scsi_cmd->request_buffer,
3711 scsi_cmd->use_sg,
3712 scsi_cmd->sc_data_direction);
3713
3714 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3715 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3716 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3717 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3718 ioarcb->write_ioadl_len =
3719 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3720 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3721 ioadl_flags = IPR_IOADL_FLAGS_READ;
3722 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3723 ioarcb->read_ioadl_len =
3724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3725 }
3726
3727 sglist = scsi_cmd->request_buffer;
3728
3729 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3730 ioadl[i].flags_and_data_len =
3731 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3732 ioadl[i].address =
3733 cpu_to_be32(sg_dma_address(&sglist[i]));
3734 }
3735
3736 if (likely(ipr_cmd->dma_use_sg)) {
3737 ioadl[i-1].flags_and_data_len |=
3738 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3739 return 0;
3740 } else
3741 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3742 } else {
3743 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3744 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3745 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3746 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3747 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3748 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3749 ioadl_flags = IPR_IOADL_FLAGS_READ;
3750 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3751 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3752 }
3753
3754 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3755 scsi_cmd->request_buffer, length,
3756 scsi_cmd->sc_data_direction);
3757
3758 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3759 ipr_cmd->dma_use_sg = 1;
3760 ioadl[0].flags_and_data_len =
3761 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3762 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3763 return 0;
3764 } else
3765 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3766 }
3767
3768 return -1;
3769 }
3770
3771 /**
3772 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3773 * @scsi_cmd: scsi command struct
3774 *
3775 * Return value:
3776 * task attributes
3777 **/
3778 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3779 {
3780 u8 tag[2];
3781 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3782
3783 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3784 switch (tag[0]) {
3785 case MSG_SIMPLE_TAG:
3786 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3787 break;
3788 case MSG_HEAD_TAG:
3789 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3790 break;
3791 case MSG_ORDERED_TAG:
3792 rc = IPR_FLAGS_LO_ORDERED_TASK;
3793 break;
3794 };
3795 }
3796
3797 return rc;
3798 }
3799
3800 /**
3801 * ipr_erp_done - Process completion of ERP for a device
3802 * @ipr_cmd: ipr command struct
3803 *
3804 * This function copies the sense buffer into the scsi_cmd
3805 * struct and pushes the scsi_done function.
3806 *
3807 * Return value:
3808 * nothing
3809 **/
3810 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3811 {
3812 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3813 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3814 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3815 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3816
3817 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3818 scsi_cmd->result |= (DID_ERROR << 16);
3819 scmd_printk(KERN_ERR, scsi_cmd,
3820 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3821 } else {
3822 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3823 SCSI_SENSE_BUFFERSIZE);
3824 }
3825
3826 if (res) {
3827 if (!ipr_is_naca_model(res))
3828 res->needs_sync_complete = 1;
3829 res->in_erp = 0;
3830 }
3831 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3832 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3833 scsi_cmd->scsi_done(scsi_cmd);
3834 }
3835
3836 /**
3837 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3838 * @ipr_cmd: ipr command struct
3839 *
3840 * Return value:
3841 * none
3842 **/
3843 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3844 {
3845 struct ipr_ioarcb *ioarcb;
3846 struct ipr_ioasa *ioasa;
3847
3848 ioarcb = &ipr_cmd->ioarcb;
3849 ioasa = &ipr_cmd->ioasa;
3850
3851 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3852 ioarcb->write_data_transfer_length = 0;
3853 ioarcb->read_data_transfer_length = 0;
3854 ioarcb->write_ioadl_len = 0;
3855 ioarcb->read_ioadl_len = 0;
3856 ioasa->ioasc = 0;
3857 ioasa->residual_data_len = 0;
3858 }
3859
3860 /**
3861 * ipr_erp_request_sense - Send request sense to a device
3862 * @ipr_cmd: ipr command struct
3863 *
3864 * This function sends a request sense to a device as a result
3865 * of a check condition.
3866 *
3867 * Return value:
3868 * nothing
3869 **/
3870 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3871 {
3872 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3873 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3874
3875 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3876 ipr_erp_done(ipr_cmd);
3877 return;
3878 }
3879
3880 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3881
3882 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3883 cmd_pkt->cdb[0] = REQUEST_SENSE;
3884 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3885 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3886 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3887 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3888
3889 ipr_cmd->ioadl[0].flags_and_data_len =
3890 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3891 ipr_cmd->ioadl[0].address =
3892 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3893
3894 ipr_cmd->ioarcb.read_ioadl_len =
3895 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3896 ipr_cmd->ioarcb.read_data_transfer_length =
3897 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3898
3899 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3900 IPR_REQUEST_SENSE_TIMEOUT * 2);
3901 }
3902
3903 /**
3904 * ipr_erp_cancel_all - Send cancel all to a device
3905 * @ipr_cmd: ipr command struct
3906 *
3907 * This function sends a cancel all to a device to clear the
3908 * queue. If we are running TCQ on the device, QERR is set to 1,
3909 * which means all outstanding ops have been dropped on the floor.
3910 * Cancel all will return them to us.
3911 *
3912 * Return value:
3913 * nothing
3914 **/
3915 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3916 {
3917 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3918 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3919 struct ipr_cmd_pkt *cmd_pkt;
3920
3921 res->in_erp = 1;
3922
3923 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3924
3925 if (!scsi_get_tag_type(scsi_cmd->device)) {
3926 ipr_erp_request_sense(ipr_cmd);
3927 return;
3928 }
3929
3930 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3931 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3932 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3933
3934 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3935 IPR_CANCEL_ALL_TIMEOUT);
3936 }
3937
3938 /**
3939 * ipr_dump_ioasa - Dump contents of IOASA
3940 * @ioa_cfg: ioa config struct
3941 * @ipr_cmd: ipr command struct
3942 * @res: resource entry struct
3943 *
3944 * This function is invoked by the interrupt handler when ops
3945 * fail. It will log the IOASA if appropriate. Only called
3946 * for GPDD ops.
3947 *
3948 * Return value:
3949 * none
3950 **/
3951 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3952 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3953 {
3954 int i;
3955 u16 data_len;
3956 u32 ioasc;
3957 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3958 __be32 *ioasa_data = (__be32 *)ioasa;
3959 int error_index;
3960
3961 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3962
3963 if (0 == ioasc)
3964 return;
3965
3966 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3967 return;
3968
3969 error_index = ipr_get_error(ioasc);
3970
3971 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3972 /* Don't log an error if the IOA already logged one */
3973 if (ioasa->ilid != 0)
3974 return;
3975
3976 if (ipr_error_table[error_index].log_ioasa == 0)
3977 return;
3978 }
3979
3980 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3981
3982 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3983 data_len = sizeof(struct ipr_ioasa);
3984 else
3985 data_len = be16_to_cpu(ioasa->ret_stat_len);
3986
3987 ipr_err("IOASA Dump:\n");
3988
3989 for (i = 0; i < data_len / 4; i += 4) {
3990 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3991 be32_to_cpu(ioasa_data[i]),
3992 be32_to_cpu(ioasa_data[i+1]),
3993 be32_to_cpu(ioasa_data[i+2]),
3994 be32_to_cpu(ioasa_data[i+3]));
3995 }
3996 }
3997
3998 /**
3999 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4000 * @ioasa: IOASA
4001 * @sense_buf: sense data buffer
4002 *
4003 * Return value:
4004 * none
4005 **/
4006 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4007 {
4008 u32 failing_lba;
4009 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4010 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4011 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4012 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4013
4014 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4015
4016 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4017 return;
4018
4019 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4020
4021 if (ipr_is_vset_device(res) &&
4022 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4023 ioasa->u.vset.failing_lba_hi != 0) {
4024 sense_buf[0] = 0x72;
4025 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4026 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4027 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4028
4029 sense_buf[7] = 12;
4030 sense_buf[8] = 0;
4031 sense_buf[9] = 0x0A;
4032 sense_buf[10] = 0x80;
4033
4034 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4035
4036 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4037 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4038 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4039 sense_buf[15] = failing_lba & 0x000000ff;
4040
4041 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4042
4043 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4044 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4045 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4046 sense_buf[19] = failing_lba & 0x000000ff;
4047 } else {
4048 sense_buf[0] = 0x70;
4049 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4050 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4051 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4052
4053 /* Illegal request */
4054 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4055 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4056 sense_buf[7] = 10; /* additional length */
4057
4058 /* IOARCB was in error */
4059 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4060 sense_buf[15] = 0xC0;
4061 else /* Parameter data was invalid */
4062 sense_buf[15] = 0x80;
4063
4064 sense_buf[16] =
4065 ((IPR_FIELD_POINTER_MASK &
4066 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4067 sense_buf[17] =
4068 (IPR_FIELD_POINTER_MASK &
4069 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4070 } else {
4071 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4072 if (ipr_is_vset_device(res))
4073 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4074 else
4075 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4076
4077 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4078 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4079 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4080 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4081 sense_buf[6] = failing_lba & 0x000000ff;
4082 }
4083
4084 sense_buf[7] = 6; /* additional length */
4085 }
4086 }
4087 }
4088
4089 /**
4090 * ipr_get_autosense - Copy autosense data to sense buffer
4091 * @ipr_cmd: ipr command struct
4092 *
4093 * This function copies the autosense buffer to the buffer
4094 * in the scsi_cmd, if there is autosense available.
4095 *
4096 * Return value:
4097 * 1 if autosense was available / 0 if not
4098 **/
4099 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4100 {
4101 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4102
4103 if ((be32_to_cpu(ioasa->ioasc_specific) &
4104 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4105 return 0;
4106
4107 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4108 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4109 SCSI_SENSE_BUFFERSIZE));
4110 return 1;
4111 }
4112
4113 /**
4114 * ipr_erp_start - Process an error response for a SCSI op
4115 * @ioa_cfg: ioa config struct
4116 * @ipr_cmd: ipr command struct
4117 *
4118 * This function determines whether or not to initiate ERP
4119 * on the affected device.
4120 *
4121 * Return value:
4122 * nothing
4123 **/
4124 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4125 struct ipr_cmnd *ipr_cmd)
4126 {
4127 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4128 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4129 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4130
4131 if (!res) {
4132 ipr_scsi_eh_done(ipr_cmd);
4133 return;
4134 }
4135
4136 if (ipr_is_gscsi(res))
4137 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4138 else
4139 ipr_gen_sense(ipr_cmd);
4140
4141 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4142 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4143 if (ipr_is_naca_model(res))
4144 scsi_cmd->result |= (DID_ABORT << 16);
4145 else
4146 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4147 break;
4148 case IPR_IOASC_IR_RESOURCE_HANDLE:
4149 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4150 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4151 break;
4152 case IPR_IOASC_HW_SEL_TIMEOUT:
4153 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4154 if (!ipr_is_naca_model(res))
4155 res->needs_sync_complete = 1;
4156 break;
4157 case IPR_IOASC_SYNC_REQUIRED:
4158 if (!res->in_erp)
4159 res->needs_sync_complete = 1;
4160 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4161 break;
4162 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4163 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4164 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4165 break;
4166 case IPR_IOASC_BUS_WAS_RESET:
4167 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4168 /*
4169 * Report the bus reset and ask for a retry. The device
4170 * will give CC/UA the next command.
4171 */
4172 if (!res->resetting_device)
4173 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4174 scsi_cmd->result |= (DID_ERROR << 16);
4175 if (!ipr_is_naca_model(res))
4176 res->needs_sync_complete = 1;
4177 break;
4178 case IPR_IOASC_HW_DEV_BUS_STATUS:
4179 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4180 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4181 if (!ipr_get_autosense(ipr_cmd)) {
4182 if (!ipr_is_naca_model(res)) {
4183 ipr_erp_cancel_all(ipr_cmd);
4184 return;
4185 }
4186 }
4187 }
4188 if (!ipr_is_naca_model(res))
4189 res->needs_sync_complete = 1;
4190 break;
4191 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4192 break;
4193 default:
4194 scsi_cmd->result |= (DID_ERROR << 16);
4195 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4196 res->needs_sync_complete = 1;
4197 break;
4198 }
4199
4200 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4201 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4202 scsi_cmd->scsi_done(scsi_cmd);
4203 }
4204
4205 /**
4206 * ipr_scsi_done - mid-layer done function
4207 * @ipr_cmd: ipr command struct
4208 *
4209 * This function is invoked by the interrupt handler for
4210 * ops generated by the SCSI mid-layer
4211 *
4212 * Return value:
4213 * none
4214 **/
4215 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4216 {
4217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4218 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4219 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4220
4221 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4222
4223 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4224 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4226 scsi_cmd->scsi_done(scsi_cmd);
4227 } else
4228 ipr_erp_start(ioa_cfg, ipr_cmd);
4229 }
4230
4231 /**
4232 * ipr_queuecommand - Queue a mid-layer request
4233 * @scsi_cmd: scsi command struct
4234 * @done: done function
4235 *
4236 * This function queues a request generated by the mid-layer.
4237 *
4238 * Return value:
4239 * 0 on success
4240 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4241 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4242 **/
4243 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4244 void (*done) (struct scsi_cmnd *))
4245 {
4246 struct ipr_ioa_cfg *ioa_cfg;
4247 struct ipr_resource_entry *res;
4248 struct ipr_ioarcb *ioarcb;
4249 struct ipr_cmnd *ipr_cmd;
4250 int rc = 0;
4251
4252 scsi_cmd->scsi_done = done;
4253 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4254 res = scsi_cmd->device->hostdata;
4255 scsi_cmd->result = (DID_OK << 16);
4256
4257 /*
4258 * We are currently blocking all devices due to a host reset
4259 * We have told the host to stop giving us new requests, but
4260 * ERP ops don't count. FIXME
4261 */
4262 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4263 return SCSI_MLQUEUE_HOST_BUSY;
4264
4265 /*
4266 * FIXME - Create scsi_set_host_offline interface
4267 * and the ioa_is_dead check can be removed
4268 */
4269 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4270 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4271 scsi_cmd->result = (DID_NO_CONNECT << 16);
4272 scsi_cmd->scsi_done(scsi_cmd);
4273 return 0;
4274 }
4275
4276 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4277 ioarcb = &ipr_cmd->ioarcb;
4278 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4279
4280 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4281 ipr_cmd->scsi_cmd = scsi_cmd;
4282 ioarcb->res_handle = res->cfgte.res_handle;
4283 ipr_cmd->done = ipr_scsi_done;
4284 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4285
4286 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4287 if (scsi_cmd->underflow == 0)
4288 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4289
4290 if (res->needs_sync_complete) {
4291 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4292 res->needs_sync_complete = 0;
4293 }
4294
4295 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4296 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4297 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4298 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4299 }
4300
4301 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4302 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4303 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4304
4305 if (likely(rc == 0))
4306 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4307
4308 if (likely(rc == 0)) {
4309 mb();
4310 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4311 ioa_cfg->regs.ioarrin_reg);
4312 } else {
4313 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4314 return SCSI_MLQUEUE_HOST_BUSY;
4315 }
4316
4317 return 0;
4318 }
4319
4320 /**
4321 * ipr_info - Get information about the card/driver
4322 * @scsi_host: scsi host struct
4323 *
4324 * Return value:
4325 * pointer to buffer with description string
4326 **/
4327 static const char * ipr_ioa_info(struct Scsi_Host *host)
4328 {
4329 static char buffer[512];
4330 struct ipr_ioa_cfg *ioa_cfg;
4331 unsigned long lock_flags = 0;
4332
4333 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4334
4335 spin_lock_irqsave(host->host_lock, lock_flags);
4336 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4337 spin_unlock_irqrestore(host->host_lock, lock_flags);
4338
4339 return buffer;
4340 }
4341
4342 static struct scsi_host_template driver_template = {
4343 .module = THIS_MODULE,
4344 .name = "IPR",
4345 .info = ipr_ioa_info,
4346 .queuecommand = ipr_queuecommand,
4347 .eh_abort_handler = ipr_eh_abort,
4348 .eh_device_reset_handler = ipr_eh_dev_reset,
4349 .eh_host_reset_handler = ipr_eh_host_reset,
4350 .slave_alloc = ipr_slave_alloc,
4351 .slave_configure = ipr_slave_configure,
4352 .slave_destroy = ipr_slave_destroy,
4353 .change_queue_depth = ipr_change_queue_depth,
4354 .change_queue_type = ipr_change_queue_type,
4355 .bios_param = ipr_biosparam,
4356 .can_queue = IPR_MAX_COMMANDS,
4357 .this_id = -1,
4358 .sg_tablesize = IPR_MAX_SGLIST,
4359 .max_sectors = IPR_IOA_MAX_SECTORS,
4360 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4361 .use_clustering = ENABLE_CLUSTERING,
4362 .shost_attrs = ipr_ioa_attrs,
4363 .sdev_attrs = ipr_dev_attrs,
4364 .proc_name = IPR_NAME
4365 };
4366
4367 #ifdef CONFIG_PPC_PSERIES
4368 static const u16 ipr_blocked_processors[] = {
4369 PV_NORTHSTAR,
4370 PV_PULSAR,
4371 PV_POWER4,
4372 PV_ICESTAR,
4373 PV_SSTAR,
4374 PV_POWER4p,
4375 PV_630,
4376 PV_630p
4377 };
4378
4379 /**
4380 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4381 * @ioa_cfg: ioa cfg struct
4382 *
4383 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4384 * certain pSeries hardware. This function determines if the given
4385 * adapter is in one of these confgurations or not.
4386 *
4387 * Return value:
4388 * 1 if adapter is not supported / 0 if adapter is supported
4389 **/
4390 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4391 {
4392 u8 rev_id;
4393 int i;
4394
4395 if (ioa_cfg->type == 0x5702) {
4396 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4397 &rev_id) == PCIBIOS_SUCCESSFUL) {
4398 if (rev_id < 4) {
4399 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4400 if (__is_processor(ipr_blocked_processors[i]))
4401 return 1;
4402 }
4403 }
4404 }
4405 }
4406 return 0;
4407 }
4408 #else
4409 #define ipr_invalid_adapter(ioa_cfg) 0
4410 #endif
4411
4412 /**
4413 * ipr_ioa_bringdown_done - IOA bring down completion.
4414 * @ipr_cmd: ipr command struct
4415 *
4416 * This function processes the completion of an adapter bring down.
4417 * It wakes any reset sleepers.
4418 *
4419 * Return value:
4420 * IPR_RC_JOB_RETURN
4421 **/
4422 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4423 {
4424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4425
4426 ENTER;
4427 ioa_cfg->in_reset_reload = 0;
4428 ioa_cfg->reset_retries = 0;
4429 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4430 wake_up_all(&ioa_cfg->reset_wait_q);
4431
4432 spin_unlock_irq(ioa_cfg->host->host_lock);
4433 scsi_unblock_requests(ioa_cfg->host);
4434 spin_lock_irq(ioa_cfg->host->host_lock);
4435 LEAVE;
4436
4437 return IPR_RC_JOB_RETURN;
4438 }
4439
4440 /**
4441 * ipr_ioa_reset_done - IOA reset completion.
4442 * @ipr_cmd: ipr command struct
4443 *
4444 * This function processes the completion of an adapter reset.
4445 * It schedules any necessary mid-layer add/removes and
4446 * wakes any reset sleepers.
4447 *
4448 * Return value:
4449 * IPR_RC_JOB_RETURN
4450 **/
4451 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4452 {
4453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4454 struct ipr_resource_entry *res;
4455 struct ipr_hostrcb *hostrcb, *temp;
4456 int i = 0;
4457
4458 ENTER;
4459 ioa_cfg->in_reset_reload = 0;
4460 ioa_cfg->allow_cmds = 1;
4461 ioa_cfg->reset_cmd = NULL;
4462 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4463
4464 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4465 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4466 ipr_trace;
4467 break;
4468 }
4469 }
4470 schedule_work(&ioa_cfg->work_q);
4471
4472 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4473 list_del(&hostrcb->queue);
4474 if (i++ < IPR_NUM_LOG_HCAMS)
4475 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4476 else
4477 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4478 }
4479
4480 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4481
4482 ioa_cfg->reset_retries = 0;
4483 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4484 wake_up_all(&ioa_cfg->reset_wait_q);
4485
4486 spin_unlock_irq(ioa_cfg->host->host_lock);
4487 scsi_unblock_requests(ioa_cfg->host);
4488 spin_lock_irq(ioa_cfg->host->host_lock);
4489
4490 if (!ioa_cfg->allow_cmds)
4491 scsi_block_requests(ioa_cfg->host);
4492
4493 LEAVE;
4494 return IPR_RC_JOB_RETURN;
4495 }
4496
4497 /**
4498 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4499 * @supported_dev: supported device struct
4500 * @vpids: vendor product id struct
4501 *
4502 * Return value:
4503 * none
4504 **/
4505 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4506 struct ipr_std_inq_vpids *vpids)
4507 {
4508 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4509 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4510 supported_dev->num_records = 1;
4511 supported_dev->data_length =
4512 cpu_to_be16(sizeof(struct ipr_supported_device));
4513 supported_dev->reserved = 0;
4514 }
4515
4516 /**
4517 * ipr_set_supported_devs - Send Set Supported Devices for a device
4518 * @ipr_cmd: ipr command struct
4519 *
4520 * This function send a Set Supported Devices to the adapter
4521 *
4522 * Return value:
4523 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4524 **/
4525 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4526 {
4527 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4528 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4529 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4530 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4531 struct ipr_resource_entry *res = ipr_cmd->u.res;
4532
4533 ipr_cmd->job_step = ipr_ioa_reset_done;
4534
4535 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4536 if (!ipr_is_scsi_disk(res))
4537 continue;
4538
4539 ipr_cmd->u.res = res;
4540 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4541
4542 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4543 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4544 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4545
4546 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4547 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4548 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4549
4550 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4551 sizeof(struct ipr_supported_device));
4552 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4553 offsetof(struct ipr_misc_cbs, supp_dev));
4554 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4555 ioarcb->write_data_transfer_length =
4556 cpu_to_be32(sizeof(struct ipr_supported_device));
4557
4558 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4559 IPR_SET_SUP_DEVICE_TIMEOUT);
4560
4561 ipr_cmd->job_step = ipr_set_supported_devs;
4562 return IPR_RC_JOB_RETURN;
4563 }
4564
4565 return IPR_RC_JOB_CONTINUE;
4566 }
4567
4568 /**
4569 * ipr_setup_write_cache - Disable write cache if needed
4570 * @ipr_cmd: ipr command struct
4571 *
4572 * This function sets up adapters write cache to desired setting
4573 *
4574 * Return value:
4575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4576 **/
4577 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4578 {
4579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4580
4581 ipr_cmd->job_step = ipr_set_supported_devs;
4582 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4583 struct ipr_resource_entry, queue);
4584
4585 if (ioa_cfg->cache_state != CACHE_DISABLED)
4586 return IPR_RC_JOB_CONTINUE;
4587
4588 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4589 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4590 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4591 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4592
4593 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4594
4595 return IPR_RC_JOB_RETURN;
4596 }
4597
4598 /**
4599 * ipr_get_mode_page - Locate specified mode page
4600 * @mode_pages: mode page buffer
4601 * @page_code: page code to find
4602 * @len: minimum required length for mode page
4603 *
4604 * Return value:
4605 * pointer to mode page / NULL on failure
4606 **/
4607 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4608 u32 page_code, u32 len)
4609 {
4610 struct ipr_mode_page_hdr *mode_hdr;
4611 u32 page_length;
4612 u32 length;
4613
4614 if (!mode_pages || (mode_pages->hdr.length == 0))
4615 return NULL;
4616
4617 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4618 mode_hdr = (struct ipr_mode_page_hdr *)
4619 (mode_pages->data + mode_pages->hdr.block_desc_len);
4620
4621 while (length) {
4622 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4623 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4624 return mode_hdr;
4625 break;
4626 } else {
4627 page_length = (sizeof(struct ipr_mode_page_hdr) +
4628 mode_hdr->page_length);
4629 length -= page_length;
4630 mode_hdr = (struct ipr_mode_page_hdr *)
4631 ((unsigned long)mode_hdr + page_length);
4632 }
4633 }
4634 return NULL;
4635 }
4636
4637 /**
4638 * ipr_check_term_power - Check for term power errors
4639 * @ioa_cfg: ioa config struct
4640 * @mode_pages: IOAFP mode pages buffer
4641 *
4642 * Check the IOAFP's mode page 28 for term power errors
4643 *
4644 * Return value:
4645 * nothing
4646 **/
4647 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4648 struct ipr_mode_pages *mode_pages)
4649 {
4650 int i;
4651 int entry_length;
4652 struct ipr_dev_bus_entry *bus;
4653 struct ipr_mode_page28 *mode_page;
4654
4655 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4656 sizeof(struct ipr_mode_page28));
4657
4658 entry_length = mode_page->entry_length;
4659
4660 bus = mode_page->bus;
4661
4662 for (i = 0; i < mode_page->num_entries; i++) {
4663 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4664 dev_err(&ioa_cfg->pdev->dev,
4665 "Term power is absent on scsi bus %d\n",
4666 bus->res_addr.bus);
4667 }
4668
4669 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4670 }
4671 }
4672
4673 /**
4674 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4675 * @ioa_cfg: ioa config struct
4676 *
4677 * Looks through the config table checking for SES devices. If
4678 * the SES device is in the SES table indicating a maximum SCSI
4679 * bus speed, the speed is limited for the bus.
4680 *
4681 * Return value:
4682 * none
4683 **/
4684 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4685 {
4686 u32 max_xfer_rate;
4687 int i;
4688
4689 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4690 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4691 ioa_cfg->bus_attr[i].bus_width);
4692
4693 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4694 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4695 }
4696 }
4697
4698 /**
4699 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4700 * @ioa_cfg: ioa config struct
4701 * @mode_pages: mode page 28 buffer
4702 *
4703 * Updates mode page 28 based on driver configuration
4704 *
4705 * Return value:
4706 * none
4707 **/
4708 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4709 struct ipr_mode_pages *mode_pages)
4710 {
4711 int i, entry_length;
4712 struct ipr_dev_bus_entry *bus;
4713 struct ipr_bus_attributes *bus_attr;
4714 struct ipr_mode_page28 *mode_page;
4715
4716 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4717 sizeof(struct ipr_mode_page28));
4718
4719 entry_length = mode_page->entry_length;
4720
4721 /* Loop for each device bus entry */
4722 for (i = 0, bus = mode_page->bus;
4723 i < mode_page->num_entries;
4724 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4725 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4726 dev_err(&ioa_cfg->pdev->dev,
4727 "Invalid resource address reported: 0x%08X\n",
4728 IPR_GET_PHYS_LOC(bus->res_addr));
4729 continue;
4730 }
4731
4732 bus_attr = &ioa_cfg->bus_attr[i];
4733 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4734 bus->bus_width = bus_attr->bus_width;
4735 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4736 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4737 if (bus_attr->qas_enabled)
4738 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4739 else
4740 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4741 }
4742 }
4743
4744 /**
4745 * ipr_build_mode_select - Build a mode select command
4746 * @ipr_cmd: ipr command struct
4747 * @res_handle: resource handle to send command to
4748 * @parm: Byte 2 of Mode Sense command
4749 * @dma_addr: DMA buffer address
4750 * @xfer_len: data transfer length
4751 *
4752 * Return value:
4753 * none
4754 **/
4755 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4756 __be32 res_handle, u8 parm, u32 dma_addr,
4757 u8 xfer_len)
4758 {
4759 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4760 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4761
4762 ioarcb->res_handle = res_handle;
4763 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4764 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4765 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4766 ioarcb->cmd_pkt.cdb[1] = parm;
4767 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4768
4769 ioadl->flags_and_data_len =
4770 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4771 ioadl->address = cpu_to_be32(dma_addr);
4772 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4773 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4774 }
4775
4776 /**
4777 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4778 * @ipr_cmd: ipr command struct
4779 *
4780 * This function sets up the SCSI bus attributes and sends
4781 * a Mode Select for Page 28 to activate them.
4782 *
4783 * Return value:
4784 * IPR_RC_JOB_RETURN
4785 **/
4786 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4787 {
4788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4789 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4790 int length;
4791
4792 ENTER;
4793 ipr_scsi_bus_speed_limit(ioa_cfg);
4794 ipr_check_term_power(ioa_cfg, mode_pages);
4795 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4796 length = mode_pages->hdr.length + 1;
4797 mode_pages->hdr.length = 0;
4798
4799 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4800 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4801 length);
4802
4803 ipr_cmd->job_step = ipr_setup_write_cache;
4804 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4805
4806 LEAVE;
4807 return IPR_RC_JOB_RETURN;
4808 }
4809
4810 /**
4811 * ipr_build_mode_sense - Builds a mode sense command
4812 * @ipr_cmd: ipr command struct
4813 * @res: resource entry struct
4814 * @parm: Byte 2 of mode sense command
4815 * @dma_addr: DMA address of mode sense buffer
4816 * @xfer_len: Size of DMA buffer
4817 *
4818 * Return value:
4819 * none
4820 **/
4821 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4822 __be32 res_handle,
4823 u8 parm, u32 dma_addr, u8 xfer_len)
4824 {
4825 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4826 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4827
4828 ioarcb->res_handle = res_handle;
4829 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4830 ioarcb->cmd_pkt.cdb[2] = parm;
4831 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4832 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4833
4834 ioadl->flags_and_data_len =
4835 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4836 ioadl->address = cpu_to_be32(dma_addr);
4837 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4838 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4839 }
4840
4841 /**
4842 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4843 * @ipr_cmd: ipr command struct
4844 *
4845 * This function handles the failure of an IOA bringup command.
4846 *
4847 * Return value:
4848 * IPR_RC_JOB_RETURN
4849 **/
4850 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4851 {
4852 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4853 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4854
4855 dev_err(&ioa_cfg->pdev->dev,
4856 "0x%02X failed with IOASC: 0x%08X\n",
4857 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4858
4859 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4860 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4861 return IPR_RC_JOB_RETURN;
4862 }
4863
4864 /**
4865 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4866 * @ipr_cmd: ipr command struct
4867 *
4868 * This function handles the failure of a Mode Sense to the IOAFP.
4869 * Some adapters do not handle all mode pages.
4870 *
4871 * Return value:
4872 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4873 **/
4874 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4875 {
4876 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4877
4878 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4879 ipr_cmd->job_step = ipr_setup_write_cache;
4880 return IPR_RC_JOB_CONTINUE;
4881 }
4882
4883 return ipr_reset_cmd_failed(ipr_cmd);
4884 }
4885
4886 /**
4887 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4888 * @ipr_cmd: ipr command struct
4889 *
4890 * This function send a Page 28 mode sense to the IOA to
4891 * retrieve SCSI bus attributes.
4892 *
4893 * Return value:
4894 * IPR_RC_JOB_RETURN
4895 **/
4896 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4897 {
4898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4899
4900 ENTER;
4901 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4902 0x28, ioa_cfg->vpd_cbs_dma +
4903 offsetof(struct ipr_misc_cbs, mode_pages),
4904 sizeof(struct ipr_mode_pages));
4905
4906 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4907 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4908
4909 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4910
4911 LEAVE;
4912 return IPR_RC_JOB_RETURN;
4913 }
4914
4915 /**
4916 * ipr_init_res_table - Initialize the resource table
4917 * @ipr_cmd: ipr command struct
4918 *
4919 * This function looks through the existing resource table, comparing
4920 * it with the config table. This function will take care of old/new
4921 * devices and schedule adding/removing them from the mid-layer
4922 * as appropriate.
4923 *
4924 * Return value:
4925 * IPR_RC_JOB_CONTINUE
4926 **/
4927 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4928 {
4929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4930 struct ipr_resource_entry *res, *temp;
4931 struct ipr_config_table_entry *cfgte;
4932 int found, i;
4933 LIST_HEAD(old_res);
4934
4935 ENTER;
4936 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4937 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4938
4939 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4940 list_move_tail(&res->queue, &old_res);
4941
4942 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4943 cfgte = &ioa_cfg->cfg_table->dev[i];
4944 found = 0;
4945
4946 list_for_each_entry_safe(res, temp, &old_res, queue) {
4947 if (!memcmp(&res->cfgte.res_addr,
4948 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4949 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4950 found = 1;
4951 break;
4952 }
4953 }
4954
4955 if (!found) {
4956 if (list_empty(&ioa_cfg->free_res_q)) {
4957 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4958 break;
4959 }
4960
4961 found = 1;
4962 res = list_entry(ioa_cfg->free_res_q.next,
4963 struct ipr_resource_entry, queue);
4964 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4965 ipr_init_res_entry(res);
4966 res->add_to_ml = 1;
4967 }
4968
4969 if (found)
4970 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4971 }
4972
4973 list_for_each_entry_safe(res, temp, &old_res, queue) {
4974 if (res->sdev) {
4975 res->del_from_ml = 1;
4976 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4977 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4978 } else {
4979 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4980 }
4981 }
4982
4983 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4984
4985 LEAVE;
4986 return IPR_RC_JOB_CONTINUE;
4987 }
4988
4989 /**
4990 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4991 * @ipr_cmd: ipr command struct
4992 *
4993 * This function sends a Query IOA Configuration command
4994 * to the adapter to retrieve the IOA configuration table.
4995 *
4996 * Return value:
4997 * IPR_RC_JOB_RETURN
4998 **/
4999 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5000 {
5001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5003 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5004 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5005
5006 ENTER;
5007 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5008 ucode_vpd->major_release, ucode_vpd->card_type,
5009 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5010 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5011 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5012
5013 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5014 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5015 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5016
5017 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5018 ioarcb->read_data_transfer_length =
5019 cpu_to_be32(sizeof(struct ipr_config_table));
5020
5021 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5022 ioadl->flags_and_data_len =
5023 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5024
5025 ipr_cmd->job_step = ipr_init_res_table;
5026
5027 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5028
5029 LEAVE;
5030 return IPR_RC_JOB_RETURN;
5031 }
5032
5033 /**
5034 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5035 * @ipr_cmd: ipr command struct
5036 *
5037 * This utility function sends an inquiry to the adapter.
5038 *
5039 * Return value:
5040 * none
5041 **/
5042 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5043 u32 dma_addr, u8 xfer_len)
5044 {
5045 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5046 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5047
5048 ENTER;
5049 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5050 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5051
5052 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5053 ioarcb->cmd_pkt.cdb[1] = flags;
5054 ioarcb->cmd_pkt.cdb[2] = page;
5055 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5056
5057 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5058 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5059
5060 ioadl->address = cpu_to_be32(dma_addr);
5061 ioadl->flags_and_data_len =
5062 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5063
5064 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5065 LEAVE;
5066 }
5067
5068 /**
5069 * ipr_inquiry_page_supported - Is the given inquiry page supported
5070 * @page0: inquiry page 0 buffer
5071 * @page: page code.
5072 *
5073 * This function determines if the specified inquiry page is supported.
5074 *
5075 * Return value:
5076 * 1 if page is supported / 0 if not
5077 **/
5078 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5079 {
5080 int i;
5081
5082 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5083 if (page0->page[i] == page)
5084 return 1;
5085
5086 return 0;
5087 }
5088
5089 /**
5090 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5091 * @ipr_cmd: ipr command struct
5092 *
5093 * This function sends a Page 3 inquiry to the adapter
5094 * to retrieve software VPD information.
5095 *
5096 * Return value:
5097 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5098 **/
5099 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5100 {
5101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5102 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5103
5104 ENTER;
5105
5106 if (!ipr_inquiry_page_supported(page0, 1))
5107 ioa_cfg->cache_state = CACHE_NONE;
5108
5109 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5110
5111 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5112 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5113 sizeof(struct ipr_inquiry_page3));
5114
5115 LEAVE;
5116 return IPR_RC_JOB_RETURN;
5117 }
5118
5119 /**
5120 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5121 * @ipr_cmd: ipr command struct
5122 *
5123 * This function sends a Page 0 inquiry to the adapter
5124 * to retrieve supported inquiry pages.
5125 *
5126 * Return value:
5127 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5128 **/
5129 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5130 {
5131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5132 char type[5];
5133
5134 ENTER;
5135
5136 /* Grab the type out of the VPD and store it away */
5137 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5138 type[4] = '\0';
5139 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5140
5141 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5142
5143 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5144 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5145 sizeof(struct ipr_inquiry_page0));
5146
5147 LEAVE;
5148 return IPR_RC_JOB_RETURN;
5149 }
5150
5151 /**
5152 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5153 * @ipr_cmd: ipr command struct
5154 *
5155 * This function sends a standard inquiry to the adapter.
5156 *
5157 * Return value:
5158 * IPR_RC_JOB_RETURN
5159 **/
5160 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5161 {
5162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5163
5164 ENTER;
5165 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5166
5167 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5168 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5169 sizeof(struct ipr_ioa_vpd));
5170
5171 LEAVE;
5172 return IPR_RC_JOB_RETURN;
5173 }
5174
5175 /**
5176 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5177 * @ipr_cmd: ipr command struct
5178 *
5179 * This function send an Identify Host Request Response Queue
5180 * command to establish the HRRQ with the adapter.
5181 *
5182 * Return value:
5183 * IPR_RC_JOB_RETURN
5184 **/
5185 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5186 {
5187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5188 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5189
5190 ENTER;
5191 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5192
5193 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5194 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5195
5196 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5197 ioarcb->cmd_pkt.cdb[2] =
5198 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5199 ioarcb->cmd_pkt.cdb[3] =
5200 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5201 ioarcb->cmd_pkt.cdb[4] =
5202 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5203 ioarcb->cmd_pkt.cdb[5] =
5204 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5205 ioarcb->cmd_pkt.cdb[7] =
5206 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5207 ioarcb->cmd_pkt.cdb[8] =
5208 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5209
5210 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5211
5212 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5213
5214 LEAVE;
5215 return IPR_RC_JOB_RETURN;
5216 }
5217
5218 /**
5219 * ipr_reset_timer_done - Adapter reset timer function
5220 * @ipr_cmd: ipr command struct
5221 *
5222 * Description: This function is used in adapter reset processing
5223 * for timing events. If the reset_cmd pointer in the IOA
5224 * config struct is not this adapter's we are doing nested
5225 * resets and fail_all_ops will take care of freeing the
5226 * command block.
5227 *
5228 * Return value:
5229 * none
5230 **/
5231 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5232 {
5233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5234 unsigned long lock_flags = 0;
5235
5236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5237
5238 if (ioa_cfg->reset_cmd == ipr_cmd) {
5239 list_del(&ipr_cmd->queue);
5240 ipr_cmd->done(ipr_cmd);
5241 }
5242
5243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5244 }
5245
5246 /**
5247 * ipr_reset_start_timer - Start a timer for adapter reset job
5248 * @ipr_cmd: ipr command struct
5249 * @timeout: timeout value
5250 *
5251 * Description: This function is used in adapter reset processing
5252 * for timing events. If the reset_cmd pointer in the IOA
5253 * config struct is not this adapter's we are doing nested
5254 * resets and fail_all_ops will take care of freeing the
5255 * command block.
5256 *
5257 * Return value:
5258 * none
5259 **/
5260 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5261 unsigned long timeout)
5262 {
5263 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5264 ipr_cmd->done = ipr_reset_ioa_job;
5265
5266 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5267 ipr_cmd->timer.expires = jiffies + timeout;
5268 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5269 add_timer(&ipr_cmd->timer);
5270 }
5271
5272 /**
5273 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5274 * @ioa_cfg: ioa cfg struct
5275 *
5276 * Return value:
5277 * nothing
5278 **/
5279 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5280 {
5281 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5282
5283 /* Initialize Host RRQ pointers */
5284 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5285 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5286 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5287 ioa_cfg->toggle_bit = 1;
5288
5289 /* Zero out config table */
5290 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5291 }
5292
5293 /**
5294 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5295 * @ipr_cmd: ipr command struct
5296 *
5297 * This function reinitializes some control blocks and
5298 * enables destructive diagnostics on the adapter.
5299 *
5300 * Return value:
5301 * IPR_RC_JOB_RETURN
5302 **/
5303 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5304 {
5305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5306 volatile u32 int_reg;
5307
5308 ENTER;
5309 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5310 ipr_init_ioa_mem(ioa_cfg);
5311
5312 ioa_cfg->allow_interrupts = 1;
5313 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5314
5315 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5316 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5317 ioa_cfg->regs.clr_interrupt_mask_reg);
5318 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5319 return IPR_RC_JOB_CONTINUE;
5320 }
5321
5322 /* Enable destructive diagnostics on IOA */
5323 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5324
5325 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5326 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5327
5328 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5329
5330 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5331 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5332 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5333 ipr_cmd->done = ipr_reset_ioa_job;
5334 add_timer(&ipr_cmd->timer);
5335 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5336
5337 LEAVE;
5338 return IPR_RC_JOB_RETURN;
5339 }
5340
5341 /**
5342 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5343 * @ipr_cmd: ipr command struct
5344 *
5345 * This function is invoked when an adapter dump has run out
5346 * of processing time.
5347 *
5348 * Return value:
5349 * IPR_RC_JOB_CONTINUE
5350 **/
5351 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5352 {
5353 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5354
5355 if (ioa_cfg->sdt_state == GET_DUMP)
5356 ioa_cfg->sdt_state = ABORT_DUMP;
5357
5358 ipr_cmd->job_step = ipr_reset_alert;
5359
5360 return IPR_RC_JOB_CONTINUE;
5361 }
5362
5363 /**
5364 * ipr_unit_check_no_data - Log a unit check/no data error log
5365 * @ioa_cfg: ioa config struct
5366 *
5367 * Logs an error indicating the adapter unit checked, but for some
5368 * reason, we were unable to fetch the unit check buffer.
5369 *
5370 * Return value:
5371 * nothing
5372 **/
5373 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5374 {
5375 ioa_cfg->errors_logged++;
5376 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5377 }
5378
5379 /**
5380 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5381 * @ioa_cfg: ioa config struct
5382 *
5383 * Fetches the unit check buffer from the adapter by clocking the data
5384 * through the mailbox register.
5385 *
5386 * Return value:
5387 * nothing
5388 **/
5389 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5390 {
5391 unsigned long mailbox;
5392 struct ipr_hostrcb *hostrcb;
5393 struct ipr_uc_sdt sdt;
5394 int rc, length;
5395
5396 mailbox = readl(ioa_cfg->ioa_mailbox);
5397
5398 if (!ipr_sdt_is_fmt2(mailbox)) {
5399 ipr_unit_check_no_data(ioa_cfg);
5400 return;
5401 }
5402
5403 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5404 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5405 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5406
5407 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5408 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5409 ipr_unit_check_no_data(ioa_cfg);
5410 return;
5411 }
5412
5413 /* Find length of the first sdt entry (UC buffer) */
5414 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5415 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5416
5417 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5418 struct ipr_hostrcb, queue);
5419 list_del(&hostrcb->queue);
5420 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5421
5422 rc = ipr_get_ldump_data_section(ioa_cfg,
5423 be32_to_cpu(sdt.entry[0].bar_str_offset),
5424 (__be32 *)&hostrcb->hcam,
5425 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5426
5427 if (!rc)
5428 ipr_handle_log_data(ioa_cfg, hostrcb);
5429 else
5430 ipr_unit_check_no_data(ioa_cfg);
5431
5432 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5433 }
5434
5435 /**
5436 * ipr_reset_restore_cfg_space - Restore PCI config space.
5437 * @ipr_cmd: ipr command struct
5438 *
5439 * Description: This function restores the saved PCI config space of
5440 * the adapter, fails all outstanding ops back to the callers, and
5441 * fetches the dump/unit check if applicable to this reset.
5442 *
5443 * Return value:
5444 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5445 **/
5446 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5447 {
5448 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5449 int rc;
5450
5451 ENTER;
5452 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5453 rc = pci_restore_state(ioa_cfg->pdev);
5454
5455 if (rc != PCIBIOS_SUCCESSFUL) {
5456 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5457 return IPR_RC_JOB_CONTINUE;
5458 }
5459
5460 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5461 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5462 return IPR_RC_JOB_CONTINUE;
5463 }
5464
5465 ipr_fail_all_ops(ioa_cfg);
5466
5467 if (ioa_cfg->ioa_unit_checked) {
5468 ioa_cfg->ioa_unit_checked = 0;
5469 ipr_get_unit_check_buffer(ioa_cfg);
5470 ipr_cmd->job_step = ipr_reset_alert;
5471 ipr_reset_start_timer(ipr_cmd, 0);
5472 return IPR_RC_JOB_RETURN;
5473 }
5474
5475 if (ioa_cfg->in_ioa_bringdown) {
5476 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5477 } else {
5478 ipr_cmd->job_step = ipr_reset_enable_ioa;
5479
5480 if (GET_DUMP == ioa_cfg->sdt_state) {
5481 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5482 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5483 schedule_work(&ioa_cfg->work_q);
5484 return IPR_RC_JOB_RETURN;
5485 }
5486 }
5487
5488 ENTER;
5489 return IPR_RC_JOB_CONTINUE;
5490 }
5491
5492 /**
5493 * ipr_reset_start_bist - Run BIST on the adapter.
5494 * @ipr_cmd: ipr command struct
5495 *
5496 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5497 *
5498 * Return value:
5499 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5500 **/
5501 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5502 {
5503 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5504 int rc;
5505
5506 ENTER;
5507 pci_block_user_cfg_access(ioa_cfg->pdev);
5508 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5509
5510 if (rc != PCIBIOS_SUCCESSFUL) {
5511 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5512 rc = IPR_RC_JOB_CONTINUE;
5513 } else {
5514 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5515 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5516 rc = IPR_RC_JOB_RETURN;
5517 }
5518
5519 LEAVE;
5520 return rc;
5521 }
5522
5523 /**
5524 * ipr_reset_allowed - Query whether or not IOA can be reset
5525 * @ioa_cfg: ioa config struct
5526 *
5527 * Return value:
5528 * 0 if reset not allowed / non-zero if reset is allowed
5529 **/
5530 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5531 {
5532 volatile u32 temp_reg;
5533
5534 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5535 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5536 }
5537
5538 /**
5539 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5540 * @ipr_cmd: ipr command struct
5541 *
5542 * Description: This function waits for adapter permission to run BIST,
5543 * then runs BIST. If the adapter does not give permission after a
5544 * reasonable time, we will reset the adapter anyway. The impact of
5545 * resetting the adapter without warning the adapter is the risk of
5546 * losing the persistent error log on the adapter. If the adapter is
5547 * reset while it is writing to the flash on the adapter, the flash
5548 * segment will have bad ECC and be zeroed.
5549 *
5550 * Return value:
5551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5552 **/
5553 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5554 {
5555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5556 int rc = IPR_RC_JOB_RETURN;
5557
5558 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5559 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5560 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5561 } else {
5562 ipr_cmd->job_step = ipr_reset_start_bist;
5563 rc = IPR_RC_JOB_CONTINUE;
5564 }
5565
5566 return rc;
5567 }
5568
5569 /**
5570 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5571 * @ipr_cmd: ipr command struct
5572 *
5573 * Description: This function alerts the adapter that it will be reset.
5574 * If memory space is not currently enabled, proceed directly
5575 * to running BIST on the adapter. The timer must always be started
5576 * so we guarantee we do not run BIST from ipr_isr.
5577 *
5578 * Return value:
5579 * IPR_RC_JOB_RETURN
5580 **/
5581 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5582 {
5583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5584 u16 cmd_reg;
5585 int rc;
5586
5587 ENTER;
5588 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5589
5590 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5591 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5592 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5593 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5594 } else {
5595 ipr_cmd->job_step = ipr_reset_start_bist;
5596 }
5597
5598 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5599 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5600
5601 LEAVE;
5602 return IPR_RC_JOB_RETURN;
5603 }
5604
5605 /**
5606 * ipr_reset_ucode_download_done - Microcode download completion
5607 * @ipr_cmd: ipr command struct
5608 *
5609 * Description: This function unmaps the microcode download buffer.
5610 *
5611 * Return value:
5612 * IPR_RC_JOB_CONTINUE
5613 **/
5614 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5615 {
5616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5617 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5618
5619 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5620 sglist->num_sg, DMA_TO_DEVICE);
5621
5622 ipr_cmd->job_step = ipr_reset_alert;
5623 return IPR_RC_JOB_CONTINUE;
5624 }
5625
5626 /**
5627 * ipr_reset_ucode_download - Download microcode to the adapter
5628 * @ipr_cmd: ipr command struct
5629 *
5630 * Description: This function checks to see if it there is microcode
5631 * to download to the adapter. If there is, a download is performed.
5632 *
5633 * Return value:
5634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5635 **/
5636 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5637 {
5638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5639 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5640
5641 ENTER;
5642 ipr_cmd->job_step = ipr_reset_alert;
5643
5644 if (!sglist)
5645 return IPR_RC_JOB_CONTINUE;
5646
5647 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5648 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5651 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5652 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5653 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5654
5655 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5656 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5657
5658 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5659 IPR_WRITE_BUFFER_TIMEOUT);
5660
5661 LEAVE;
5662 return IPR_RC_JOB_RETURN;
5663 }
5664
5665 /**
5666 * ipr_reset_shutdown_ioa - Shutdown the adapter
5667 * @ipr_cmd: ipr command struct
5668 *
5669 * Description: This function issues an adapter shutdown of the
5670 * specified type to the specified adapter as part of the
5671 * adapter reset job.
5672 *
5673 * Return value:
5674 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5675 **/
5676 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5677 {
5678 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5679 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5680 unsigned long timeout;
5681 int rc = IPR_RC_JOB_CONTINUE;
5682
5683 ENTER;
5684 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5685 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5686 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5687 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5688 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5689
5690 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5691 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5692 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5693 timeout = IPR_INTERNAL_TIMEOUT;
5694 else
5695 timeout = IPR_SHUTDOWN_TIMEOUT;
5696
5697 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5698
5699 rc = IPR_RC_JOB_RETURN;
5700 ipr_cmd->job_step = ipr_reset_ucode_download;
5701 } else
5702 ipr_cmd->job_step = ipr_reset_alert;
5703
5704 LEAVE;
5705 return rc;
5706 }
5707
5708 /**
5709 * ipr_reset_ioa_job - Adapter reset job
5710 * @ipr_cmd: ipr command struct
5711 *
5712 * Description: This function is the job router for the adapter reset job.
5713 *
5714 * Return value:
5715 * none
5716 **/
5717 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5718 {
5719 u32 rc, ioasc;
5720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5721
5722 do {
5723 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5724
5725 if (ioa_cfg->reset_cmd != ipr_cmd) {
5726 /*
5727 * We are doing nested adapter resets and this is
5728 * not the current reset job.
5729 */
5730 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5731 return;
5732 }
5733
5734 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5735 rc = ipr_cmd->job_step_failed(ipr_cmd);
5736 if (rc == IPR_RC_JOB_RETURN)
5737 return;
5738 }
5739
5740 ipr_reinit_ipr_cmnd(ipr_cmd);
5741 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5742 rc = ipr_cmd->job_step(ipr_cmd);
5743 } while(rc == IPR_RC_JOB_CONTINUE);
5744 }
5745
5746 /**
5747 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5748 * @ioa_cfg: ioa config struct
5749 * @job_step: first job step of reset job
5750 * @shutdown_type: shutdown type
5751 *
5752 * Description: This function will initiate the reset of the given adapter
5753 * starting at the selected job step.
5754 * If the caller needs to wait on the completion of the reset,
5755 * the caller must sleep on the reset_wait_q.
5756 *
5757 * Return value:
5758 * none
5759 **/
5760 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5761 int (*job_step) (struct ipr_cmnd *),
5762 enum ipr_shutdown_type shutdown_type)
5763 {
5764 struct ipr_cmnd *ipr_cmd;
5765
5766 ioa_cfg->in_reset_reload = 1;
5767 ioa_cfg->allow_cmds = 0;
5768 scsi_block_requests(ioa_cfg->host);
5769
5770 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5771 ioa_cfg->reset_cmd = ipr_cmd;
5772 ipr_cmd->job_step = job_step;
5773 ipr_cmd->u.shutdown_type = shutdown_type;
5774
5775 ipr_reset_ioa_job(ipr_cmd);
5776 }
5777
5778 /**
5779 * ipr_initiate_ioa_reset - Initiate an adapter reset
5780 * @ioa_cfg: ioa config struct
5781 * @shutdown_type: shutdown type
5782 *
5783 * Description: This function will initiate the reset of the given adapter.
5784 * If the caller needs to wait on the completion of the reset,
5785 * the caller must sleep on the reset_wait_q.
5786 *
5787 * Return value:
5788 * none
5789 **/
5790 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5791 enum ipr_shutdown_type shutdown_type)
5792 {
5793 if (ioa_cfg->ioa_is_dead)
5794 return;
5795
5796 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5797 ioa_cfg->sdt_state = ABORT_DUMP;
5798
5799 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5800 dev_err(&ioa_cfg->pdev->dev,
5801 "IOA taken offline - error recovery failed\n");
5802
5803 ioa_cfg->reset_retries = 0;
5804 ioa_cfg->ioa_is_dead = 1;
5805
5806 if (ioa_cfg->in_ioa_bringdown) {
5807 ioa_cfg->reset_cmd = NULL;
5808 ioa_cfg->in_reset_reload = 0;
5809 ipr_fail_all_ops(ioa_cfg);
5810 wake_up_all(&ioa_cfg->reset_wait_q);
5811
5812 spin_unlock_irq(ioa_cfg->host->host_lock);
5813 scsi_unblock_requests(ioa_cfg->host);
5814 spin_lock_irq(ioa_cfg->host->host_lock);
5815 return;
5816 } else {
5817 ioa_cfg->in_ioa_bringdown = 1;
5818 shutdown_type = IPR_SHUTDOWN_NONE;
5819 }
5820 }
5821
5822 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5823 shutdown_type);
5824 }
5825
5826 /**
5827 * ipr_reset_freeze - Hold off all I/O activity
5828 * @ipr_cmd: ipr command struct
5829 *
5830 * Description: If the PCI slot is frozen, hold off all I/O
5831 * activity; then, as soon as the slot is available again,
5832 * initiate an adapter reset.
5833 */
5834 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5835 {
5836 /* Disallow new interrupts, avoid loop */
5837 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5839 ipr_cmd->done = ipr_reset_ioa_job;
5840 return IPR_RC_JOB_RETURN;
5841 }
5842
5843 /**
5844 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5845 * @pdev: PCI device struct
5846 *
5847 * Description: This routine is called to tell us that the PCI bus
5848 * is down. Can't do anything here, except put the device driver
5849 * into a holding pattern, waiting for the PCI bus to come back.
5850 */
5851 static void ipr_pci_frozen(struct pci_dev *pdev)
5852 {
5853 unsigned long flags = 0;
5854 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5855
5856 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5857 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5859 }
5860
5861 /**
5862 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5863 * @pdev: PCI device struct
5864 *
5865 * Description: This routine is called by the pci error recovery
5866 * code after the PCI slot has been reset, just before we
5867 * should resume normal operations.
5868 */
5869 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5870 {
5871 unsigned long flags = 0;
5872 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5873
5874 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5875 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5876 IPR_SHUTDOWN_NONE);
5877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5878 return PCI_ERS_RESULT_RECOVERED;
5879 }
5880
5881 /**
5882 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5883 * @pdev: PCI device struct
5884 *
5885 * Description: This routine is called when the PCI bus has
5886 * permanently failed.
5887 */
5888 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5889 {
5890 unsigned long flags = 0;
5891 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5892
5893 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5894 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5895 ioa_cfg->sdt_state = ABORT_DUMP;
5896 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5897 ioa_cfg->in_ioa_bringdown = 1;
5898 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5900 }
5901
5902 /**
5903 * ipr_pci_error_detected - Called when a PCI error is detected.
5904 * @pdev: PCI device struct
5905 * @state: PCI channel state
5906 *
5907 * Description: Called when a PCI error is detected.
5908 *
5909 * Return value:
5910 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5911 */
5912 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5913 pci_channel_state_t state)
5914 {
5915 switch (state) {
5916 case pci_channel_io_frozen:
5917 ipr_pci_frozen(pdev);
5918 return PCI_ERS_RESULT_NEED_RESET;
5919 case pci_channel_io_perm_failure:
5920 ipr_pci_perm_failure(pdev);
5921 return PCI_ERS_RESULT_DISCONNECT;
5922 break;
5923 default:
5924 break;
5925 }
5926 return PCI_ERS_RESULT_NEED_RESET;
5927 }
5928
5929 /**
5930 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5931 * @ioa_cfg: ioa cfg struct
5932 *
5933 * Description: This is the second phase of adapter intialization
5934 * This function takes care of initilizing the adapter to the point
5935 * where it can accept new commands.
5936
5937 * Return value:
5938 * 0 on sucess / -EIO on failure
5939 **/
5940 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5941 {
5942 int rc = 0;
5943 unsigned long host_lock_flags = 0;
5944
5945 ENTER;
5946 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5947 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5948 if (ioa_cfg->needs_hard_reset) {
5949 ioa_cfg->needs_hard_reset = 0;
5950 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5951 } else
5952 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5953 IPR_SHUTDOWN_NONE);
5954
5955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5956 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5957 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5958
5959 if (ioa_cfg->ioa_is_dead) {
5960 rc = -EIO;
5961 } else if (ipr_invalid_adapter(ioa_cfg)) {
5962 if (!ipr_testmode)
5963 rc = -EIO;
5964
5965 dev_err(&ioa_cfg->pdev->dev,
5966 "Adapter not supported in this hardware configuration.\n");
5967 }
5968
5969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5970
5971 LEAVE;
5972 return rc;
5973 }
5974
5975 /**
5976 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5977 * @ioa_cfg: ioa config struct
5978 *
5979 * Return value:
5980 * none
5981 **/
5982 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5983 {
5984 int i;
5985
5986 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5987 if (ioa_cfg->ipr_cmnd_list[i])
5988 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5989 ioa_cfg->ipr_cmnd_list[i],
5990 ioa_cfg->ipr_cmnd_list_dma[i]);
5991
5992 ioa_cfg->ipr_cmnd_list[i] = NULL;
5993 }
5994
5995 if (ioa_cfg->ipr_cmd_pool)
5996 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5997
5998 ioa_cfg->ipr_cmd_pool = NULL;
5999 }
6000
6001 /**
6002 * ipr_free_mem - Frees memory allocated for an adapter
6003 * @ioa_cfg: ioa cfg struct
6004 *
6005 * Return value:
6006 * nothing
6007 **/
6008 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6009 {
6010 int i;
6011
6012 kfree(ioa_cfg->res_entries);
6013 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6014 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6015 ipr_free_cmd_blks(ioa_cfg);
6016 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6017 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6018 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6019 ioa_cfg->cfg_table,
6020 ioa_cfg->cfg_table_dma);
6021
6022 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6023 pci_free_consistent(ioa_cfg->pdev,
6024 sizeof(struct ipr_hostrcb),
6025 ioa_cfg->hostrcb[i],
6026 ioa_cfg->hostrcb_dma[i]);
6027 }
6028
6029 ipr_free_dump(ioa_cfg);
6030 kfree(ioa_cfg->trace);
6031 }
6032
6033 /**
6034 * ipr_free_all_resources - Free all allocated resources for an adapter.
6035 * @ipr_cmd: ipr command struct
6036 *
6037 * This function frees all allocated resources for the
6038 * specified adapter.
6039 *
6040 * Return value:
6041 * none
6042 **/
6043 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6044 {
6045 struct pci_dev *pdev = ioa_cfg->pdev;
6046
6047 ENTER;
6048 free_irq(pdev->irq, ioa_cfg);
6049 iounmap(ioa_cfg->hdw_dma_regs);
6050 pci_release_regions(pdev);
6051 ipr_free_mem(ioa_cfg);
6052 scsi_host_put(ioa_cfg->host);
6053 pci_disable_device(pdev);
6054 LEAVE;
6055 }
6056
6057 /**
6058 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6059 * @ioa_cfg: ioa config struct
6060 *
6061 * Return value:
6062 * 0 on success / -ENOMEM on allocation failure
6063 **/
6064 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6065 {
6066 struct ipr_cmnd *ipr_cmd;
6067 struct ipr_ioarcb *ioarcb;
6068 dma_addr_t dma_addr;
6069 int i;
6070
6071 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6072 sizeof(struct ipr_cmnd), 8, 0);
6073
6074 if (!ioa_cfg->ipr_cmd_pool)
6075 return -ENOMEM;
6076
6077 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6078 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6079
6080 if (!ipr_cmd) {
6081 ipr_free_cmd_blks(ioa_cfg);
6082 return -ENOMEM;
6083 }
6084
6085 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6086 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6087 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6088
6089 ioarcb = &ipr_cmd->ioarcb;
6090 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6091 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6092 ioarcb->write_ioadl_addr =
6093 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6094 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6095 ioarcb->ioasa_host_pci_addr =
6096 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6097 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6098 ipr_cmd->cmd_index = i;
6099 ipr_cmd->ioa_cfg = ioa_cfg;
6100 ipr_cmd->sense_buffer_dma = dma_addr +
6101 offsetof(struct ipr_cmnd, sense_buffer);
6102
6103 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6104 }
6105
6106 return 0;
6107 }
6108
6109 /**
6110 * ipr_alloc_mem - Allocate memory for an adapter
6111 * @ioa_cfg: ioa config struct
6112 *
6113 * Return value:
6114 * 0 on success / non-zero for error
6115 **/
6116 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6117 {
6118 struct pci_dev *pdev = ioa_cfg->pdev;
6119 int i, rc = -ENOMEM;
6120
6121 ENTER;
6122 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6123 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6124
6125 if (!ioa_cfg->res_entries)
6126 goto out;
6127
6128 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6129 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6130
6131 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6132 sizeof(struct ipr_misc_cbs),
6133 &ioa_cfg->vpd_cbs_dma);
6134
6135 if (!ioa_cfg->vpd_cbs)
6136 goto out_free_res_entries;
6137
6138 if (ipr_alloc_cmd_blks(ioa_cfg))
6139 goto out_free_vpd_cbs;
6140
6141 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6142 sizeof(u32) * IPR_NUM_CMD_BLKS,
6143 &ioa_cfg->host_rrq_dma);
6144
6145 if (!ioa_cfg->host_rrq)
6146 goto out_ipr_free_cmd_blocks;
6147
6148 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6149 sizeof(struct ipr_config_table),
6150 &ioa_cfg->cfg_table_dma);
6151
6152 if (!ioa_cfg->cfg_table)
6153 goto out_free_host_rrq;
6154
6155 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6156 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6157 sizeof(struct ipr_hostrcb),
6158 &ioa_cfg->hostrcb_dma[i]);
6159
6160 if (!ioa_cfg->hostrcb[i])
6161 goto out_free_hostrcb_dma;
6162
6163 ioa_cfg->hostrcb[i]->hostrcb_dma =
6164 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6165 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6166 }
6167
6168 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6169 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6170
6171 if (!ioa_cfg->trace)
6172 goto out_free_hostrcb_dma;
6173
6174 rc = 0;
6175 out:
6176 LEAVE;
6177 return rc;
6178
6179 out_free_hostrcb_dma:
6180 while (i-- > 0) {
6181 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6182 ioa_cfg->hostrcb[i],
6183 ioa_cfg->hostrcb_dma[i]);
6184 }
6185 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6186 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6187 out_free_host_rrq:
6188 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6189 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6190 out_ipr_free_cmd_blocks:
6191 ipr_free_cmd_blks(ioa_cfg);
6192 out_free_vpd_cbs:
6193 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6194 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6195 out_free_res_entries:
6196 kfree(ioa_cfg->res_entries);
6197 goto out;
6198 }
6199
6200 /**
6201 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6202 * @ioa_cfg: ioa config struct
6203 *
6204 * Return value:
6205 * none
6206 **/
6207 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6208 {
6209 int i;
6210
6211 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6212 ioa_cfg->bus_attr[i].bus = i;
6213 ioa_cfg->bus_attr[i].qas_enabled = 0;
6214 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6215 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6216 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6217 else
6218 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6219 }
6220 }
6221
6222 /**
6223 * ipr_init_ioa_cfg - Initialize IOA config struct
6224 * @ioa_cfg: ioa config struct
6225 * @host: scsi host struct
6226 * @pdev: PCI dev struct
6227 *
6228 * Return value:
6229 * none
6230 **/
6231 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6232 struct Scsi_Host *host, struct pci_dev *pdev)
6233 {
6234 const struct ipr_interrupt_offsets *p;
6235 struct ipr_interrupts *t;
6236 void __iomem *base;
6237
6238 ioa_cfg->host = host;
6239 ioa_cfg->pdev = pdev;
6240 ioa_cfg->log_level = ipr_log_level;
6241 ioa_cfg->doorbell = IPR_DOORBELL;
6242 if (!ipr_auto_create)
6243 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6244 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6245 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6246 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6247 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6248 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6249 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6250 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6251 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6252
6253 INIT_LIST_HEAD(&ioa_cfg->free_q);
6254 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6255 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6256 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6257 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6258 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6259 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6260 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6261 ioa_cfg->sdt_state = INACTIVE;
6262 if (ipr_enable_cache)
6263 ioa_cfg->cache_state = CACHE_ENABLED;
6264 else
6265 ioa_cfg->cache_state = CACHE_DISABLED;
6266
6267 ipr_initialize_bus_attr(ioa_cfg);
6268
6269 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6270 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6271 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6272 host->unique_id = host->host_no;
6273 host->max_cmd_len = IPR_MAX_CDB_LEN;
6274 pci_set_drvdata(pdev, ioa_cfg);
6275
6276 p = &ioa_cfg->chip_cfg->regs;
6277 t = &ioa_cfg->regs;
6278 base = ioa_cfg->hdw_dma_regs;
6279
6280 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6281 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6282 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6283 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6284 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6285 t->ioarrin_reg = base + p->ioarrin_reg;
6286 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6287 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6288 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6289 }
6290
6291 /**
6292 * ipr_get_chip_cfg - Find adapter chip configuration
6293 * @dev_id: PCI device id struct
6294 *
6295 * Return value:
6296 * ptr to chip config on success / NULL on failure
6297 **/
6298 static const struct ipr_chip_cfg_t * __devinit
6299 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6300 {
6301 int i;
6302
6303 if (dev_id->driver_data)
6304 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6305
6306 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6307 if (ipr_chip[i].vendor == dev_id->vendor &&
6308 ipr_chip[i].device == dev_id->device)
6309 return ipr_chip[i].cfg;
6310 return NULL;
6311 }
6312
6313 /**
6314 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6315 * @pdev: PCI device struct
6316 * @dev_id: PCI device id struct
6317 *
6318 * Return value:
6319 * 0 on success / non-zero on failure
6320 **/
6321 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6322 const struct pci_device_id *dev_id)
6323 {
6324 struct ipr_ioa_cfg *ioa_cfg;
6325 struct Scsi_Host *host;
6326 unsigned long ipr_regs_pci;
6327 void __iomem *ipr_regs;
6328 u32 rc = PCIBIOS_SUCCESSFUL;
6329 volatile u32 mask, uproc;
6330
6331 ENTER;
6332
6333 if ((rc = pci_enable_device(pdev))) {
6334 dev_err(&pdev->dev, "Cannot enable adapter\n");
6335 goto out;
6336 }
6337
6338 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6339
6340 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6341
6342 if (!host) {
6343 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6344 rc = -ENOMEM;
6345 goto out_disable;
6346 }
6347
6348 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6349 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6350
6351 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6352
6353 if (!ioa_cfg->chip_cfg) {
6354 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6355 dev_id->vendor, dev_id->device);
6356 goto out_scsi_host_put;
6357 }
6358
6359 ipr_regs_pci = pci_resource_start(pdev, 0);
6360
6361 rc = pci_request_regions(pdev, IPR_NAME);
6362 if (rc < 0) {
6363 dev_err(&pdev->dev,
6364 "Couldn't register memory range of registers\n");
6365 goto out_scsi_host_put;
6366 }
6367
6368 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6369
6370 if (!ipr_regs) {
6371 dev_err(&pdev->dev,
6372 "Couldn't map memory range of registers\n");
6373 rc = -ENOMEM;
6374 goto out_release_regions;
6375 }
6376
6377 ioa_cfg->hdw_dma_regs = ipr_regs;
6378 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6379 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6380
6381 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6382
6383 pci_set_master(pdev);
6384
6385 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6386 if (rc < 0) {
6387 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6388 goto cleanup_nomem;
6389 }
6390
6391 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6392 ioa_cfg->chip_cfg->cache_line_size);
6393
6394 if (rc != PCIBIOS_SUCCESSFUL) {
6395 dev_err(&pdev->dev, "Write of cache line size failed\n");
6396 rc = -EIO;
6397 goto cleanup_nomem;
6398 }
6399
6400 /* Save away PCI config space for use following IOA reset */
6401 rc = pci_save_state(pdev);
6402
6403 if (rc != PCIBIOS_SUCCESSFUL) {
6404 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6405 rc = -EIO;
6406 goto cleanup_nomem;
6407 }
6408
6409 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6410 goto cleanup_nomem;
6411
6412 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6413 goto cleanup_nomem;
6414
6415 rc = ipr_alloc_mem(ioa_cfg);
6416 if (rc < 0) {
6417 dev_err(&pdev->dev,
6418 "Couldn't allocate enough memory for device driver!\n");
6419 goto cleanup_nomem;
6420 }
6421
6422 /*
6423 * If HRRQ updated interrupt is not masked, or reset alert is set,
6424 * the card is in an unknown state and needs a hard reset
6425 */
6426 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6427 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6428 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6429 ioa_cfg->needs_hard_reset = 1;
6430
6431 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6432 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6433
6434 if (rc) {
6435 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6436 pdev->irq, rc);
6437 goto cleanup_nolog;
6438 }
6439
6440 spin_lock(&ipr_driver_lock);
6441 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6442 spin_unlock(&ipr_driver_lock);
6443
6444 LEAVE;
6445 out:
6446 return rc;
6447
6448 cleanup_nolog:
6449 ipr_free_mem(ioa_cfg);
6450 cleanup_nomem:
6451 iounmap(ipr_regs);
6452 out_release_regions:
6453 pci_release_regions(pdev);
6454 out_scsi_host_put:
6455 scsi_host_put(host);
6456 out_disable:
6457 pci_disable_device(pdev);
6458 goto out;
6459 }
6460
6461 /**
6462 * ipr_scan_vsets - Scans for VSET devices
6463 * @ioa_cfg: ioa config struct
6464 *
6465 * Description: Since the VSET resources do not follow SAM in that we can have
6466 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6467 *
6468 * Return value:
6469 * none
6470 **/
6471 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6472 {
6473 int target, lun;
6474
6475 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6476 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6477 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6478 }
6479
6480 /**
6481 * ipr_initiate_ioa_bringdown - Bring down an adapter
6482 * @ioa_cfg: ioa config struct
6483 * @shutdown_type: shutdown type
6484 *
6485 * Description: This function will initiate bringing down the adapter.
6486 * This consists of issuing an IOA shutdown to the adapter
6487 * to flush the cache, and running BIST.
6488 * If the caller needs to wait on the completion of the reset,
6489 * the caller must sleep on the reset_wait_q.
6490 *
6491 * Return value:
6492 * none
6493 **/
6494 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6495 enum ipr_shutdown_type shutdown_type)
6496 {
6497 ENTER;
6498 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6499 ioa_cfg->sdt_state = ABORT_DUMP;
6500 ioa_cfg->reset_retries = 0;
6501 ioa_cfg->in_ioa_bringdown = 1;
6502 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6503 LEAVE;
6504 }
6505
6506 /**
6507 * __ipr_remove - Remove a single adapter
6508 * @pdev: pci device struct
6509 *
6510 * Adapter hot plug remove entry point.
6511 *
6512 * Return value:
6513 * none
6514 **/
6515 static void __ipr_remove(struct pci_dev *pdev)
6516 {
6517 unsigned long host_lock_flags = 0;
6518 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6519 ENTER;
6520
6521 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6522 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6523
6524 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6525 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6526 flush_scheduled_work();
6527 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6528
6529 spin_lock(&ipr_driver_lock);
6530 list_del(&ioa_cfg->queue);
6531 spin_unlock(&ipr_driver_lock);
6532
6533 if (ioa_cfg->sdt_state == ABORT_DUMP)
6534 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6536
6537 ipr_free_all_resources(ioa_cfg);
6538
6539 LEAVE;
6540 }
6541
6542 /**
6543 * ipr_remove - IOA hot plug remove entry point
6544 * @pdev: pci device struct
6545 *
6546 * Adapter hot plug remove entry point.
6547 *
6548 * Return value:
6549 * none
6550 **/
6551 static void ipr_remove(struct pci_dev *pdev)
6552 {
6553 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6554
6555 ENTER;
6556
6557 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6558 &ipr_trace_attr);
6559 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6560 &ipr_dump_attr);
6561 scsi_remove_host(ioa_cfg->host);
6562
6563 __ipr_remove(pdev);
6564
6565 LEAVE;
6566 }
6567
6568 /**
6569 * ipr_probe - Adapter hot plug add entry point
6570 *
6571 * Return value:
6572 * 0 on success / non-zero on failure
6573 **/
6574 static int __devinit ipr_probe(struct pci_dev *pdev,
6575 const struct pci_device_id *dev_id)
6576 {
6577 struct ipr_ioa_cfg *ioa_cfg;
6578 int rc;
6579
6580 rc = ipr_probe_ioa(pdev, dev_id);
6581
6582 if (rc)
6583 return rc;
6584
6585 ioa_cfg = pci_get_drvdata(pdev);
6586 rc = ipr_probe_ioa_part2(ioa_cfg);
6587
6588 if (rc) {
6589 __ipr_remove(pdev);
6590 return rc;
6591 }
6592
6593 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6594
6595 if (rc) {
6596 __ipr_remove(pdev);
6597 return rc;
6598 }
6599
6600 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6601 &ipr_trace_attr);
6602
6603 if (rc) {
6604 scsi_remove_host(ioa_cfg->host);
6605 __ipr_remove(pdev);
6606 return rc;
6607 }
6608
6609 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6610 &ipr_dump_attr);
6611
6612 if (rc) {
6613 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6614 &ipr_trace_attr);
6615 scsi_remove_host(ioa_cfg->host);
6616 __ipr_remove(pdev);
6617 return rc;
6618 }
6619
6620 scsi_scan_host(ioa_cfg->host);
6621 ipr_scan_vsets(ioa_cfg);
6622 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6623 ioa_cfg->allow_ml_add_del = 1;
6624 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6625 schedule_work(&ioa_cfg->work_q);
6626 return 0;
6627 }
6628
6629 /**
6630 * ipr_shutdown - Shutdown handler.
6631 * @pdev: pci device struct
6632 *
6633 * This function is invoked upon system shutdown/reboot. It will issue
6634 * an adapter shutdown to the adapter to flush the write cache.
6635 *
6636 * Return value:
6637 * none
6638 **/
6639 static void ipr_shutdown(struct pci_dev *pdev)
6640 {
6641 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6642 unsigned long lock_flags = 0;
6643
6644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6645 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6647 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6648 }
6649
6650 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6651 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6653 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6654 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6656 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6657 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6659 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6660 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6662 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6665 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6666 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6668 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6669 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6671 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6674 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6675 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6677 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6678 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6680 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6681 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6682 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6683 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6686 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6687 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6689 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6690 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6692 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6693 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6695 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6696 { }
6697 };
6698 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6699
6700 static struct pci_error_handlers ipr_err_handler = {
6701 .error_detected = ipr_pci_error_detected,
6702 .slot_reset = ipr_pci_slot_reset,
6703 };
6704
6705 static struct pci_driver ipr_driver = {
6706 .name = IPR_NAME,
6707 .id_table = ipr_pci_table,
6708 .probe = ipr_probe,
6709 .remove = ipr_remove,
6710 .shutdown = ipr_shutdown,
6711 .err_handler = &ipr_err_handler,
6712 };
6713
6714 /**
6715 * ipr_init - Module entry point
6716 *
6717 * Return value:
6718 * 0 on success / negative value on failure
6719 **/
6720 static int __init ipr_init(void)
6721 {
6722 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6723 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6724
6725 return pci_module_init(&ipr_driver);
6726 }
6727
6728 /**
6729 * ipr_exit - Module unload
6730 *
6731 * Module unload entry point.
6732 *
6733 * Return value:
6734 * none
6735 **/
6736 static void __exit ipr_exit(void)
6737 {
6738 pci_unregister_driver(&ipr_driver);
6739 }
6740
6741 module_init(ipr_init);
6742 module_exit(ipr_exit);