]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Fix adapter microcode update DMA mapping leak
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 94static unsigned int ipr_enable_cache = 1;
1da177e4
LT
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone and Citrine */
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
136};
137
138static int ipr_max_bus_speeds [] = {
139 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
140};
141
142MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
143MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
144module_param_named(max_speed, ipr_max_speed, uint, 0);
145MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
146module_param_named(log_level, ipr_log_level, uint, 0);
147MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
148module_param_named(testmode, ipr_testmode, int, 0);
149MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
150module_param_named(fastfail, ipr_fastfail, int, 0);
151MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
152module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
153MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
154module_param_named(enable_cache, ipr_enable_cache, int, 0);
155MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
1da177e4
LT
156MODULE_LICENSE("GPL");
157MODULE_VERSION(IPR_DRIVER_VERSION);
158
159static const char *ipr_gpdd_dev_end_states[] = {
160 "Command complete",
161 "Terminated by host",
162 "Terminated by device reset",
163 "Terminated by bus reset",
164 "Unknown",
165 "Command not started"
166};
167
168static const char *ipr_gpdd_dev_bus_phases[] = {
169 "Bus free",
170 "Arbitration",
171 "Selection",
172 "Message out",
173 "Command",
174 "Message in",
175 "Data out",
176 "Data in",
177 "Status",
178 "Reselection",
179 "Unknown"
180};
181
182/* A constant array of IOASCs/URCs/Error Messages */
183static const
184struct ipr_error_table_t ipr_error_table[] = {
185 {0x00000000, 1, 1,
186 "8155: An unknown error was received"},
187 {0x00330000, 0, 0,
188 "Soft underlength error"},
189 {0x005A0000, 0, 0,
190 "Command to be cancelled not found"},
191 {0x00808000, 0, 0,
192 "Qualified success"},
193 {0x01080000, 1, 1,
194 "FFFE: Soft device bus error recovered by the IOA"},
195 {0x01170600, 0, 1,
196 "FFF9: Device sector reassign successful"},
197 {0x01170900, 0, 1,
198 "FFF7: Media error recovered by device rewrite procedures"},
199 {0x01180200, 0, 1,
200 "7001: IOA sector reassignment successful"},
201 {0x01180500, 0, 1,
202 "FFF9: Soft media error. Sector reassignment recommended"},
203 {0x01180600, 0, 1,
204 "FFF7: Media error recovered by IOA rewrite procedures"},
205 {0x01418000, 0, 1,
206 "FF3D: Soft PCI bus error recovered by the IOA"},
207 {0x01440000, 1, 1,
208 "FFF6: Device hardware error recovered by the IOA"},
209 {0x01448100, 0, 1,
210 "FFF6: Device hardware error recovered by the device"},
211 {0x01448200, 1, 1,
212 "FF3D: Soft IOA error recovered by the IOA"},
213 {0x01448300, 0, 1,
214 "FFFA: Undefined device response recovered by the IOA"},
215 {0x014A0000, 1, 1,
216 "FFF6: Device bus error, message or command phase"},
217 {0x015D0000, 0, 1,
218 "FFF6: Failure prediction threshold exceeded"},
219 {0x015D9200, 0, 1,
220 "8009: Impending cache battery pack failure"},
221 {0x02040400, 0, 0,
222 "34FF: Disk device format in progress"},
223 {0x023F0000, 0, 0,
224 "Synchronization required"},
225 {0x024E0000, 0, 0,
226 "No ready, IOA shutdown"},
227 {0x025A0000, 0, 0,
228 "Not ready, IOA has been shutdown"},
229 {0x02670100, 0, 1,
230 "3020: Storage subsystem configuration error"},
231 {0x03110B00, 0, 0,
232 "FFF5: Medium error, data unreadable, recommend reassign"},
233 {0x03110C00, 0, 0,
234 "7000: Medium error, data unreadable, do not reassign"},
235 {0x03310000, 0, 1,
236 "FFF3: Disk media format bad"},
237 {0x04050000, 0, 1,
238 "3002: Addressed device failed to respond to selection"},
239 {0x04080000, 1, 1,
240 "3100: Device bus error"},
241 {0x04080100, 0, 1,
242 "3109: IOA timed out a device command"},
243 {0x04088000, 0, 0,
244 "3120: SCSI bus is not operational"},
245 {0x04118000, 0, 1,
246 "9000: IOA reserved area data check"},
247 {0x04118100, 0, 1,
248 "9001: IOA reserved area invalid data pattern"},
249 {0x04118200, 0, 1,
250 "9002: IOA reserved area LRC error"},
251 {0x04320000, 0, 1,
252 "102E: Out of alternate sectors for disk storage"},
253 {0x04330000, 1, 1,
254 "FFF4: Data transfer underlength error"},
255 {0x04338000, 1, 1,
256 "FFF4: Data transfer overlength error"},
257 {0x043E0100, 0, 1,
258 "3400: Logical unit failure"},
259 {0x04408500, 0, 1,
260 "FFF4: Device microcode is corrupt"},
261 {0x04418000, 1, 1,
262 "8150: PCI bus error"},
263 {0x04430000, 1, 0,
264 "Unsupported device bus message received"},
265 {0x04440000, 1, 1,
266 "FFF4: Disk device problem"},
267 {0x04448200, 1, 1,
268 "8150: Permanent IOA failure"},
269 {0x04448300, 0, 1,
270 "3010: Disk device returned wrong response to IOA"},
271 {0x04448400, 0, 1,
272 "8151: IOA microcode error"},
273 {0x04448500, 0, 0,
274 "Device bus status error"},
275 {0x04448600, 0, 1,
276 "8157: IOA error requiring IOA reset to recover"},
277 {0x04490000, 0, 0,
278 "Message reject received from the device"},
279 {0x04449200, 0, 1,
280 "8008: A permanent cache battery pack failure occurred"},
281 {0x0444A000, 0, 1,
282 "9090: Disk unit has been modified after the last known status"},
283 {0x0444A200, 0, 1,
284 "9081: IOA detected device error"},
285 {0x0444A300, 0, 1,
286 "9082: IOA detected device error"},
287 {0x044A0000, 1, 1,
288 "3110: Device bus error, message or command phase"},
289 {0x04670400, 0, 1,
290 "9091: Incorrect hardware configuration change has been detected"},
291 {0x046E0000, 0, 1,
292 "FFF4: Command to logical unit failed"},
293 {0x05240000, 1, 0,
294 "Illegal request, invalid request type or request packet"},
295 {0x05250000, 0, 0,
296 "Illegal request, invalid resource handle"},
297 {0x05260000, 0, 0,
298 "Illegal request, invalid field in parameter list"},
299 {0x05260100, 0, 0,
300 "Illegal request, parameter not supported"},
301 {0x05260200, 0, 0,
302 "Illegal request, parameter value invalid"},
303 {0x052C0000, 0, 0,
304 "Illegal request, command sequence error"},
305 {0x06040500, 0, 1,
306 "9031: Array protection temporarily suspended, protection resuming"},
307 {0x06040600, 0, 1,
308 "9040: Array protection temporarily suspended, protection resuming"},
309 {0x06290000, 0, 1,
310 "FFFB: SCSI bus was reset"},
311 {0x06290500, 0, 0,
312 "FFFE: SCSI bus transition to single ended"},
313 {0x06290600, 0, 0,
314 "FFFE: SCSI bus transition to LVD"},
315 {0x06298000, 0, 1,
316 "FFFB: SCSI bus was reset by another initiator"},
317 {0x063F0300, 0, 1,
318 "3029: A device replacement has occurred"},
319 {0x064C8000, 0, 1,
320 "9051: IOA cache data exists for a missing or failed device"},
321 {0x06670100, 0, 1,
322 "9025: Disk unit is not supported at its physical location"},
323 {0x06670600, 0, 1,
324 "3020: IOA detected a SCSI bus configuration error"},
325 {0x06678000, 0, 1,
326 "3150: SCSI bus configuration error"},
327 {0x06690200, 0, 1,
328 "9041: Array protection temporarily suspended"},
329 {0x06698200, 0, 1,
330 "9042: Corrupt array parity detected on specified device"},
331 {0x066B0200, 0, 1,
332 "9030: Array no longer protected due to missing or failed disk unit"},
333 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"},
335 {0x07270000, 0, 0,
336 "Failure due to other device"},
337 {0x07278000, 0, 1,
338 "9008: IOA does not support functions expected by devices"},
339 {0x07278100, 0, 1,
340 "9010: Cache data associated with attached devices cannot be found"},
341 {0x07278200, 0, 1,
342 "9011: Cache data belongs to devices other than those attached"},
343 {0x07278400, 0, 1,
344 "9020: Array missing 2 or more devices with only 1 device present"},
345 {0x07278500, 0, 1,
346 "9021: Array missing 2 or more devices with 2 or more devices present"},
347 {0x07278600, 0, 1,
348 "9022: Exposed array is missing a required device"},
349 {0x07278700, 0, 1,
350 "9023: Array member(s) not at required physical locations"},
351 {0x07278800, 0, 1,
352 "9024: Array not functional due to present hardware configuration"},
353 {0x07278900, 0, 1,
354 "9026: Array not functional due to present hardware configuration"},
355 {0x07278A00, 0, 1,
356 "9027: Array is missing a device and parity is out of sync"},
357 {0x07278B00, 0, 1,
358 "9028: Maximum number of arrays already exist"},
359 {0x07278C00, 0, 1,
360 "9050: Required cache data cannot be located for a disk unit"},
361 {0x07278D00, 0, 1,
362 "9052: Cache data exists for a device that has been modified"},
363 {0x07278F00, 0, 1,
364 "9054: IOA resources not available due to previous problems"},
365 {0x07279100, 0, 1,
366 "9092: Disk unit requires initialization before use"},
367 {0x07279200, 0, 1,
368 "9029: Incorrect hardware configuration change has been detected"},
369 {0x07279600, 0, 1,
370 "9060: One or more disk pairs are missing from an array"},
371 {0x07279700, 0, 1,
372 "9061: One or more disks are missing from an array"},
373 {0x07279800, 0, 1,
374 "9062: One or more disks are missing from an array"},
375 {0x07279900, 0, 1,
376 "9063: Maximum number of functional arrays has been exceeded"},
377 {0x0B260000, 0, 0,
378 "Aborted command, invalid descriptor"},
379 {0x0B5A0000, 0, 0,
380 "Command terminated by host"}
381};
382
383static const struct ipr_ses_table_entry ipr_ses_table[] = {
384 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
387 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
388 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
389 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
390 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
397};
398
399/*
400 * Function Prototypes
401 */
402static int ipr_reset_alert(struct ipr_cmnd *);
403static void ipr_process_ccn(struct ipr_cmnd *);
404static void ipr_process_error(struct ipr_cmnd *);
405static void ipr_reset_ioa_job(struct ipr_cmnd *);
406static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
407 enum ipr_shutdown_type);
408
409#ifdef CONFIG_SCSI_IPR_TRACE
410/**
411 * ipr_trc_hook - Add a trace entry to the driver trace
412 * @ipr_cmd: ipr command struct
413 * @type: trace type
414 * @add_data: additional data
415 *
416 * Return value:
417 * none
418 **/
419static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
420 u8 type, u32 add_data)
421{
422 struct ipr_trace_entry *trace_entry;
423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
424
425 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
426 trace_entry->time = jiffies;
427 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
428 trace_entry->type = type;
429 trace_entry->cmd_index = ipr_cmd->cmd_index;
430 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
431 trace_entry->u.add_data = add_data;
432}
433#else
434#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
435#endif
436
437/**
438 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
439 * @ipr_cmd: ipr command struct
440 *
441 * Return value:
442 * none
443 **/
444static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
445{
446 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
447 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
448
449 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
450 ioarcb->write_data_transfer_length = 0;
451 ioarcb->read_data_transfer_length = 0;
452 ioarcb->write_ioadl_len = 0;
453 ioarcb->read_ioadl_len = 0;
454 ioasa->ioasc = 0;
455 ioasa->residual_data_len = 0;
456
457 ipr_cmd->scsi_cmd = NULL;
458 ipr_cmd->sense_buffer[0] = 0;
459 ipr_cmd->dma_use_sg = 0;
460}
461
462/**
463 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
464 * @ipr_cmd: ipr command struct
465 *
466 * Return value:
467 * none
468 **/
469static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
470{
471 ipr_reinit_ipr_cmnd(ipr_cmd);
472 ipr_cmd->u.scratch = 0;
473 ipr_cmd->sibling = NULL;
474 init_timer(&ipr_cmd->timer);
475}
476
477/**
478 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
479 * @ioa_cfg: ioa config struct
480 *
481 * Return value:
482 * pointer to ipr command struct
483 **/
484static
485struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
486{
487 struct ipr_cmnd *ipr_cmd;
488
489 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
490 list_del(&ipr_cmd->queue);
491 ipr_init_ipr_cmnd(ipr_cmd);
492
493 return ipr_cmd;
494}
495
496/**
497 * ipr_unmap_sglist - Unmap scatterlist if mapped
498 * @ioa_cfg: ioa config struct
499 * @ipr_cmd: ipr command struct
500 *
501 * Return value:
502 * nothing
503 **/
504static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
505 struct ipr_cmnd *ipr_cmd)
506{
507 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
508
509 if (ipr_cmd->dma_use_sg) {
510 if (scsi_cmd->use_sg > 0) {
511 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
512 scsi_cmd->use_sg,
513 scsi_cmd->sc_data_direction);
514 } else {
515 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
516 scsi_cmd->request_bufflen,
517 scsi_cmd->sc_data_direction);
518 }
519 }
520}
521
522/**
523 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
524 * @ioa_cfg: ioa config struct
525 * @clr_ints: interrupts to clear
526 *
527 * This function masks all interrupts on the adapter, then clears the
528 * interrupts specified in the mask
529 *
530 * Return value:
531 * none
532 **/
533static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
534 u32 clr_ints)
535{
536 volatile u32 int_reg;
537
538 /* Stop new interrupts */
539 ioa_cfg->allow_interrupts = 0;
540
541 /* Set interrupt mask to stop all new interrupts */
542 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
543
544 /* Clear any pending interrupts */
545 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
547}
548
549/**
550 * ipr_save_pcix_cmd_reg - Save PCI-X command register
551 * @ioa_cfg: ioa config struct
552 *
553 * Return value:
554 * 0 on success / -EIO on failure
555 **/
556static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
557{
558 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
559
560 if (pcix_cmd_reg == 0) {
561 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
562 return -EIO;
563 }
564
565 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
566 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 return -EIO;
569 }
570
571 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
572 return 0;
573}
574
575/**
576 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
577 * @ioa_cfg: ioa config struct
578 *
579 * Return value:
580 * 0 on success / -EIO on failure
581 **/
582static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
583{
584 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
585
586 if (pcix_cmd_reg) {
587 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
590 return -EIO;
591 }
592 } else {
593 dev_err(&ioa_cfg->pdev->dev,
594 "Failed to setup PCI-X command register\n");
595 return -EIO;
596 }
597
598 return 0;
599}
600
601/**
602 * ipr_scsi_eh_done - mid-layer done function for aborted ops
603 * @ipr_cmd: ipr command struct
604 *
605 * This function is invoked by the interrupt handler for
606 * ops generated by the SCSI mid-layer which are being aborted.
607 *
608 * Return value:
609 * none
610 **/
611static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
612{
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
615
616 scsi_cmd->result |= (DID_ERROR << 16);
617
618 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
619 scsi_cmd->scsi_done(scsi_cmd);
620 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
621}
622
623/**
624 * ipr_fail_all_ops - Fails all outstanding ops.
625 * @ioa_cfg: ioa config struct
626 *
627 * This function fails all outstanding ops.
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
633{
634 struct ipr_cmnd *ipr_cmd, *temp;
635
636 ENTER;
637 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
638 list_del(&ipr_cmd->queue);
639
640 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
641 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
642
643 if (ipr_cmd->scsi_cmd)
644 ipr_cmd->done = ipr_scsi_eh_done;
645
646 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
647 del_timer(&ipr_cmd->timer);
648 ipr_cmd->done(ipr_cmd);
649 }
650
651 LEAVE;
652}
653
654/**
655 * ipr_do_req - Send driver initiated requests.
656 * @ipr_cmd: ipr command struct
657 * @done: done function
658 * @timeout_func: timeout function
659 * @timeout: timeout value
660 *
661 * This function sends the specified command to the adapter with the
662 * timeout given. The done function is invoked on command completion.
663 *
664 * Return value:
665 * none
666 **/
667static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
668 void (*done) (struct ipr_cmnd *),
669 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
670{
671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
672
673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
674
675 ipr_cmd->done = done;
676
677 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
678 ipr_cmd->timer.expires = jiffies + timeout;
679 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
680
681 add_timer(&ipr_cmd->timer);
682
683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
684
685 mb();
686 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
687 ioa_cfg->regs.ioarrin_reg);
688}
689
690/**
691 * ipr_internal_cmd_done - Op done function for an internally generated op.
692 * @ipr_cmd: ipr command struct
693 *
694 * This function is the op done function for an internally generated,
695 * blocking op. It simply wakes the sleeping thread.
696 *
697 * Return value:
698 * none
699 **/
700static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
701{
702 if (ipr_cmd->sibling)
703 ipr_cmd->sibling = NULL;
704 else
705 complete(&ipr_cmd->completion);
706}
707
708/**
709 * ipr_send_blocking_cmd - Send command and sleep on its completion.
710 * @ipr_cmd: ipr command struct
711 * @timeout_func: function to invoke if command times out
712 * @timeout: timeout
713 *
714 * Return value:
715 * none
716 **/
717static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
718 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
719 u32 timeout)
720{
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722
723 init_completion(&ipr_cmd->completion);
724 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
725
726 spin_unlock_irq(ioa_cfg->host->host_lock);
727 wait_for_completion(&ipr_cmd->completion);
728 spin_lock_irq(ioa_cfg->host->host_lock);
729}
730
731/**
732 * ipr_send_hcam - Send an HCAM to the adapter.
733 * @ioa_cfg: ioa config struct
734 * @type: HCAM type
735 * @hostrcb: hostrcb struct
736 *
737 * This function will send a Host Controlled Async command to the adapter.
738 * If HCAMs are currently not allowed to be issued to the adapter, it will
739 * place the hostrcb on the free queue.
740 *
741 * Return value:
742 * none
743 **/
744static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
745 struct ipr_hostrcb *hostrcb)
746{
747 struct ipr_cmnd *ipr_cmd;
748 struct ipr_ioarcb *ioarcb;
749
750 if (ioa_cfg->allow_cmds) {
751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
753 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
754
755 ipr_cmd->u.hostrcb = hostrcb;
756 ioarcb = &ipr_cmd->ioarcb;
757
758 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
759 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
760 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
761 ioarcb->cmd_pkt.cdb[1] = type;
762 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
763 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
764
765 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
766 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
767 ipr_cmd->ioadl[0].flags_and_data_len =
768 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
769 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
770
771 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
772 ipr_cmd->done = ipr_process_ccn;
773 else
774 ipr_cmd->done = ipr_process_error;
775
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
777
778 mb();
779 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
780 ioa_cfg->regs.ioarrin_reg);
781 } else {
782 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
783 }
784}
785
786/**
787 * ipr_init_res_entry - Initialize a resource entry struct.
788 * @res: resource entry struct
789 *
790 * Return value:
791 * none
792 **/
793static void ipr_init_res_entry(struct ipr_resource_entry *res)
794{
795 res->needs_sync_complete = 1;
796 res->in_erp = 0;
797 res->add_to_ml = 0;
798 res->del_from_ml = 0;
799 res->resetting_device = 0;
800 res->sdev = NULL;
801}
802
803/**
804 * ipr_handle_config_change - Handle a config change from the adapter
805 * @ioa_cfg: ioa config struct
806 * @hostrcb: hostrcb
807 *
808 * Return value:
809 * none
810 **/
811static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
812 struct ipr_hostrcb *hostrcb)
813{
814 struct ipr_resource_entry *res = NULL;
815 struct ipr_config_table_entry *cfgte;
816 u32 is_ndn = 1;
817
818 cfgte = &hostrcb->hcam.u.ccn.cfgte;
819
820 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
821 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
822 sizeof(cfgte->res_addr))) {
823 is_ndn = 0;
824 break;
825 }
826 }
827
828 if (is_ndn) {
829 if (list_empty(&ioa_cfg->free_res_q)) {
830 ipr_send_hcam(ioa_cfg,
831 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
832 hostrcb);
833 return;
834 }
835
836 res = list_entry(ioa_cfg->free_res_q.next,
837 struct ipr_resource_entry, queue);
838
839 list_del(&res->queue);
840 ipr_init_res_entry(res);
841 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
842 }
843
844 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
845
846 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
847 if (res->sdev) {
848 res->sdev->hostdata = NULL;
849 res->del_from_ml = 1;
850 if (ioa_cfg->allow_ml_add_del)
851 schedule_work(&ioa_cfg->work_q);
852 } else
853 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
854 } else if (!res->sdev) {
855 res->add_to_ml = 1;
856 if (ioa_cfg->allow_ml_add_del)
857 schedule_work(&ioa_cfg->work_q);
858 }
859
860 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
861}
862
863/**
864 * ipr_process_ccn - Op done function for a CCN.
865 * @ipr_cmd: ipr command struct
866 *
867 * This function is the op done function for a configuration
868 * change notification host controlled async from the adapter.
869 *
870 * Return value:
871 * none
872 **/
873static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
874{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
877 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
878
879 list_del(&hostrcb->queue);
880 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
881
882 if (ioasc) {
883 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
884 dev_err(&ioa_cfg->pdev->dev,
885 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
886
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888 } else {
889 ipr_handle_config_change(ioa_cfg, hostrcb);
890 }
891}
892
893/**
894 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 895 * @vpd: vendor/product id/sn struct
1da177e4
LT
896 *
897 * Return value:
898 * none
899 **/
cfc32139 900static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
901{
902 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
903 + IPR_SERIAL_NUM_LEN];
904
cfc32139
BK
905 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
906 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
907 IPR_PROD_ID_LEN);
908 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
909 ipr_err("Vendor/Product ID: %s\n", buffer);
910
cfc32139 911 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
912 buffer[IPR_SERIAL_NUM_LEN] = '\0';
913 ipr_err(" Serial Number: %s\n", buffer);
914}
915
916/**
917 * ipr_log_cache_error - Log a cache error.
918 * @ioa_cfg: ioa config struct
919 * @hostrcb: hostrcb struct
920 *
921 * Return value:
922 * none
923 **/
924static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
925 struct ipr_hostrcb *hostrcb)
926{
927 struct ipr_hostrcb_type_02_error *error =
928 &hostrcb->hcam.u.error.u.type_02_error;
929
930 ipr_err("-----Current Configuration-----\n");
931 ipr_err("Cache Directory Card Information:\n");
cfc32139 932 ipr_log_vpd(&error->ioa_vpd);
1da177e4 933 ipr_err("Adapter Card Information:\n");
cfc32139 934 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
935
936 ipr_err("-----Expected Configuration-----\n");
937 ipr_err("Cache Directory Card Information:\n");
cfc32139 938 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 939 ipr_err("Adapter Card Information:\n");
cfc32139 940 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
941
942 ipr_err("Additional IOA Data: %08X %08X %08X\n",
943 be32_to_cpu(error->ioa_data[0]),
944 be32_to_cpu(error->ioa_data[1]),
945 be32_to_cpu(error->ioa_data[2]));
946}
947
948/**
949 * ipr_log_config_error - Log a configuration error.
950 * @ioa_cfg: ioa config struct
951 * @hostrcb: hostrcb struct
952 *
953 * Return value:
954 * none
955 **/
956static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
957 struct ipr_hostrcb *hostrcb)
958{
959 int errors_logged, i;
960 struct ipr_hostrcb_device_data_entry *dev_entry;
961 struct ipr_hostrcb_type_03_error *error;
962
963 error = &hostrcb->hcam.u.error.u.type_03_error;
964 errors_logged = be32_to_cpu(error->errors_logged);
965
966 ipr_err("Device Errors Detected/Logged: %d/%d\n",
967 be32_to_cpu(error->errors_detected), errors_logged);
968
cfc32139 969 dev_entry = error->dev;
1da177e4
LT
970
971 for (i = 0; i < errors_logged; i++, dev_entry++) {
972 ipr_err_separator;
973
fa15b1f6 974 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 975 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
976
977 ipr_err("-----New Device Information-----\n");
cfc32139 978 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
979
980 ipr_err("Cache Directory Card Information:\n");
cfc32139 981 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
982
983 ipr_err("Adapter Card Information:\n");
cfc32139 984 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
985
986 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
987 be32_to_cpu(dev_entry->ioa_data[0]),
988 be32_to_cpu(dev_entry->ioa_data[1]),
989 be32_to_cpu(dev_entry->ioa_data[2]),
990 be32_to_cpu(dev_entry->ioa_data[3]),
991 be32_to_cpu(dev_entry->ioa_data[4]));
992 }
993}
994
995/**
996 * ipr_log_array_error - Log an array configuration error.
997 * @ioa_cfg: ioa config struct
998 * @hostrcb: hostrcb struct
999 *
1000 * Return value:
1001 * none
1002 **/
1003static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1004 struct ipr_hostrcb *hostrcb)
1005{
1006 int i;
1007 struct ipr_hostrcb_type_04_error *error;
1008 struct ipr_hostrcb_array_data_entry *array_entry;
1009 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1010
1011 error = &hostrcb->hcam.u.error.u.type_04_error;
1012
1013 ipr_err_separator;
1014
1015 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1016 error->protection_level,
1017 ioa_cfg->host->host_no,
1018 error->last_func_vset_res_addr.bus,
1019 error->last_func_vset_res_addr.target,
1020 error->last_func_vset_res_addr.lun);
1021
1022 ipr_err_separator;
1023
1024 array_entry = error->array_member;
1025
1026 for (i = 0; i < 18; i++) {
cfc32139 1027 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1028 continue;
1029
fa15b1f6 1030 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1031 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1032 else
1da177e4 1033 ipr_err("Array Member %d:\n", i);
1da177e4 1034
cfc32139 1035 ipr_log_vpd(&array_entry->vpd);
1da177e4 1036
fa15b1f6
BK
1037 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1038 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1039 "Expected Location");
1da177e4
LT
1040
1041 ipr_err_separator;
1042
1043 if (i == 9)
1044 array_entry = error->array_member2;
1045 else
1046 array_entry++;
1047 }
1048}
1049
1050/**
1051 * ipr_log_generic_error - Log an adapter error.
1052 * @ioa_cfg: ioa config struct
1053 * @hostrcb: hostrcb struct
1054 *
1055 * Return value:
1056 * none
1057 **/
1058static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1059 struct ipr_hostrcb *hostrcb)
1060{
1061 int i;
1062 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1063
1064 if (ioa_data_len == 0)
1065 return;
1066
1da177e4
LT
1067 for (i = 0; i < ioa_data_len / 4; i += 4) {
1068 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1069 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1070 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1071 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1073 }
1074}
1075
1076/**
1077 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1078 * @ioasc: IOASC
1079 *
1080 * This function will return the index of into the ipr_error_table
1081 * for the specified IOASC. If the IOASC is not in the table,
1082 * 0 will be returned, which points to the entry used for unknown errors.
1083 *
1084 * Return value:
1085 * index into the ipr_error_table
1086 **/
1087static u32 ipr_get_error(u32 ioasc)
1088{
1089 int i;
1090
1091 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1092 if (ipr_error_table[i].ioasc == ioasc)
1093 return i;
1094
1095 return 0;
1096}
1097
1098/**
1099 * ipr_handle_log_data - Log an adapter error.
1100 * @ioa_cfg: ioa config struct
1101 * @hostrcb: hostrcb struct
1102 *
1103 * This function logs an adapter error to the system.
1104 *
1105 * Return value:
1106 * none
1107 **/
1108static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1109 struct ipr_hostrcb *hostrcb)
1110{
1111 u32 ioasc;
1112 int error_index;
1113
1114 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1115 return;
1116
1117 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1118 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1119
1120 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1121
1122 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1123 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1124 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1125 scsi_report_bus_reset(ioa_cfg->host,
1126 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1127 }
1128
1129 error_index = ipr_get_error(ioasc);
1130
1131 if (!ipr_error_table[error_index].log_hcam)
1132 return;
1133
1134 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1135 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1136 "%s\n", ipr_error_table[error_index].error);
1137 } else {
1138 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1139 ipr_error_table[error_index].error);
1140 }
1141
1142 /* Set indication we have logged an error */
1143 ioa_cfg->errors_logged++;
1144
1145 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1146 return;
cf852037
BK
1147 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1148 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1149
1150 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1151 case IPR_HOST_RCB_OVERLAY_ID_2:
1152 ipr_log_cache_error(ioa_cfg, hostrcb);
1153 break;
1154 case IPR_HOST_RCB_OVERLAY_ID_3:
1155 ipr_log_config_error(ioa_cfg, hostrcb);
1156 break;
1157 case IPR_HOST_RCB_OVERLAY_ID_4:
1158 case IPR_HOST_RCB_OVERLAY_ID_6:
1159 ipr_log_array_error(ioa_cfg, hostrcb);
1160 break;
cf852037 1161 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1162 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1163 default:
a9cfca96 1164 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1165 break;
1166 }
1167}
1168
1169/**
1170 * ipr_process_error - Op done function for an adapter error log.
1171 * @ipr_cmd: ipr command struct
1172 *
1173 * This function is the op done function for an error log host
1174 * controlled async from the adapter. It will log the error and
1175 * send the HCAM back to the adapter.
1176 *
1177 * Return value:
1178 * none
1179 **/
1180static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1181{
1182 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1183 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1184 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1185
1186 list_del(&hostrcb->queue);
1187 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1188
1189 if (!ioasc) {
1190 ipr_handle_log_data(ioa_cfg, hostrcb);
1191 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1192 dev_err(&ioa_cfg->pdev->dev,
1193 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1194 }
1195
1196 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1197}
1198
1199/**
1200 * ipr_timeout - An internally generated op has timed out.
1201 * @ipr_cmd: ipr command struct
1202 *
1203 * This function blocks host requests and initiates an
1204 * adapter reset.
1205 *
1206 * Return value:
1207 * none
1208 **/
1209static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1210{
1211 unsigned long lock_flags = 0;
1212 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1213
1214 ENTER;
1215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1216
1217 ioa_cfg->errors_logged++;
1218 dev_err(&ioa_cfg->pdev->dev,
1219 "Adapter being reset due to command timeout.\n");
1220
1221 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1222 ioa_cfg->sdt_state = GET_DUMP;
1223
1224 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1225 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1226
1227 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1228 LEAVE;
1229}
1230
1231/**
1232 * ipr_oper_timeout - Adapter timed out transitioning to operational
1233 * @ipr_cmd: ipr command struct
1234 *
1235 * This function blocks host requests and initiates an
1236 * adapter reset.
1237 *
1238 * Return value:
1239 * none
1240 **/
1241static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1242{
1243 unsigned long lock_flags = 0;
1244 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1245
1246 ENTER;
1247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1248
1249 ioa_cfg->errors_logged++;
1250 dev_err(&ioa_cfg->pdev->dev,
1251 "Adapter timed out transitioning to operational.\n");
1252
1253 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1254 ioa_cfg->sdt_state = GET_DUMP;
1255
1256 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1257 if (ipr_fastfail)
1258 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1259 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1260 }
1261
1262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1263 LEAVE;
1264}
1265
1266/**
1267 * ipr_reset_reload - Reset/Reload the IOA
1268 * @ioa_cfg: ioa config struct
1269 * @shutdown_type: shutdown type
1270 *
1271 * This function resets the adapter and re-initializes it.
1272 * This function assumes that all new host commands have been stopped.
1273 * Return value:
1274 * SUCCESS / FAILED
1275 **/
1276static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1277 enum ipr_shutdown_type shutdown_type)
1278{
1279 if (!ioa_cfg->in_reset_reload)
1280 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1281
1282 spin_unlock_irq(ioa_cfg->host->host_lock);
1283 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1284 spin_lock_irq(ioa_cfg->host->host_lock);
1285
1286 /* If we got hit with a host reset while we were already resetting
1287 the adapter for some reason, and the reset failed. */
1288 if (ioa_cfg->ioa_is_dead) {
1289 ipr_trace;
1290 return FAILED;
1291 }
1292
1293 return SUCCESS;
1294}
1295
1296/**
1297 * ipr_find_ses_entry - Find matching SES in SES table
1298 * @res: resource entry struct of SES
1299 *
1300 * Return value:
1301 * pointer to SES table entry / NULL on failure
1302 **/
1303static const struct ipr_ses_table_entry *
1304ipr_find_ses_entry(struct ipr_resource_entry *res)
1305{
1306 int i, j, matches;
1307 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1308
1309 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1310 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1311 if (ste->compare_product_id_byte[j] == 'X') {
1312 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1313 matches++;
1314 else
1315 break;
1316 } else
1317 matches++;
1318 }
1319
1320 if (matches == IPR_PROD_ID_LEN)
1321 return ste;
1322 }
1323
1324 return NULL;
1325}
1326
1327/**
1328 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1329 * @ioa_cfg: ioa config struct
1330 * @bus: SCSI bus
1331 * @bus_width: bus width
1332 *
1333 * Return value:
1334 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1335 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1336 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1337 * max 160MHz = max 320MB/sec).
1338 **/
1339static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1340{
1341 struct ipr_resource_entry *res;
1342 const struct ipr_ses_table_entry *ste;
1343 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1344
1345 /* Loop through each config table entry in the config table buffer */
1346 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1347 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1348 continue;
1349
1350 if (bus != res->cfgte.res_addr.bus)
1351 continue;
1352
1353 if (!(ste = ipr_find_ses_entry(res)))
1354 continue;
1355
1356 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1357 }
1358
1359 return max_xfer_rate;
1360}
1361
1362/**
1363 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1364 * @ioa_cfg: ioa config struct
1365 * @max_delay: max delay in micro-seconds to wait
1366 *
1367 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1368 *
1369 * Return value:
1370 * 0 on success / other on failure
1371 **/
1372static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1373{
1374 volatile u32 pcii_reg;
1375 int delay = 1;
1376
1377 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1378 while (delay < max_delay) {
1379 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1380
1381 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1382 return 0;
1383
1384 /* udelay cannot be used if delay is more than a few milliseconds */
1385 if ((delay / 1000) > MAX_UDELAY_MS)
1386 mdelay(delay / 1000);
1387 else
1388 udelay(delay);
1389
1390 delay += delay;
1391 }
1392 return -EIO;
1393}
1394
1395/**
1396 * ipr_get_ldump_data_section - Dump IOA memory
1397 * @ioa_cfg: ioa config struct
1398 * @start_addr: adapter address to dump
1399 * @dest: destination kernel buffer
1400 * @length_in_words: length to dump in 4 byte words
1401 *
1402 * Return value:
1403 * 0 on success / -EIO on failure
1404 **/
1405static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1406 u32 start_addr,
1407 __be32 *dest, u32 length_in_words)
1408{
1409 volatile u32 temp_pcii_reg;
1410 int i, delay = 0;
1411
1412 /* Write IOA interrupt reg starting LDUMP state */
1413 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1414 ioa_cfg->regs.set_uproc_interrupt_reg);
1415
1416 /* Wait for IO debug acknowledge */
1417 if (ipr_wait_iodbg_ack(ioa_cfg,
1418 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1419 dev_err(&ioa_cfg->pdev->dev,
1420 "IOA dump long data transfer timeout\n");
1421 return -EIO;
1422 }
1423
1424 /* Signal LDUMP interlocked - clear IO debug ack */
1425 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1426 ioa_cfg->regs.clr_interrupt_reg);
1427
1428 /* Write Mailbox with starting address */
1429 writel(start_addr, ioa_cfg->ioa_mailbox);
1430
1431 /* Signal address valid - clear IOA Reset alert */
1432 writel(IPR_UPROCI_RESET_ALERT,
1433 ioa_cfg->regs.clr_uproc_interrupt_reg);
1434
1435 for (i = 0; i < length_in_words; i++) {
1436 /* Wait for IO debug acknowledge */
1437 if (ipr_wait_iodbg_ack(ioa_cfg,
1438 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1439 dev_err(&ioa_cfg->pdev->dev,
1440 "IOA dump short data transfer timeout\n");
1441 return -EIO;
1442 }
1443
1444 /* Read data from mailbox and increment destination pointer */
1445 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1446 dest++;
1447
1448 /* For all but the last word of data, signal data received */
1449 if (i < (length_in_words - 1)) {
1450 /* Signal dump data received - Clear IO debug Ack */
1451 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1452 ioa_cfg->regs.clr_interrupt_reg);
1453 }
1454 }
1455
1456 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1457 writel(IPR_UPROCI_RESET_ALERT,
1458 ioa_cfg->regs.set_uproc_interrupt_reg);
1459
1460 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1461 ioa_cfg->regs.clr_uproc_interrupt_reg);
1462
1463 /* Signal dump data received - Clear IO debug Ack */
1464 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1465 ioa_cfg->regs.clr_interrupt_reg);
1466
1467 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1468 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1469 temp_pcii_reg =
1470 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1471
1472 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1473 return 0;
1474
1475 udelay(10);
1476 delay += 10;
1477 }
1478
1479 return 0;
1480}
1481
1482#ifdef CONFIG_SCSI_IPR_DUMP
1483/**
1484 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1485 * @ioa_cfg: ioa config struct
1486 * @pci_address: adapter address
1487 * @length: length of data to copy
1488 *
1489 * Copy data from PCI adapter to kernel buffer.
1490 * Note: length MUST be a 4 byte multiple
1491 * Return value:
1492 * 0 on success / other on failure
1493 **/
1494static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1495 unsigned long pci_address, u32 length)
1496{
1497 int bytes_copied = 0;
1498 int cur_len, rc, rem_len, rem_page_len;
1499 __be32 *page;
1500 unsigned long lock_flags = 0;
1501 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1502
1503 while (bytes_copied < length &&
1504 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1505 if (ioa_dump->page_offset >= PAGE_SIZE ||
1506 ioa_dump->page_offset == 0) {
1507 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1508
1509 if (!page) {
1510 ipr_trace;
1511 return bytes_copied;
1512 }
1513
1514 ioa_dump->page_offset = 0;
1515 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1516 ioa_dump->next_page_index++;
1517 } else
1518 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1519
1520 rem_len = length - bytes_copied;
1521 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1522 cur_len = min(rem_len, rem_page_len);
1523
1524 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1525 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1526 rc = -EIO;
1527 } else {
1528 rc = ipr_get_ldump_data_section(ioa_cfg,
1529 pci_address + bytes_copied,
1530 &page[ioa_dump->page_offset / 4],
1531 (cur_len / sizeof(u32)));
1532 }
1533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1534
1535 if (!rc) {
1536 ioa_dump->page_offset += cur_len;
1537 bytes_copied += cur_len;
1538 } else {
1539 ipr_trace;
1540 break;
1541 }
1542 schedule();
1543 }
1544
1545 return bytes_copied;
1546}
1547
1548/**
1549 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1550 * @hdr: dump entry header struct
1551 *
1552 * Return value:
1553 * nothing
1554 **/
1555static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1556{
1557 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1558 hdr->num_elems = 1;
1559 hdr->offset = sizeof(*hdr);
1560 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1561}
1562
1563/**
1564 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1565 * @ioa_cfg: ioa config struct
1566 * @driver_dump: driver dump struct
1567 *
1568 * Return value:
1569 * nothing
1570 **/
1571static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1572 struct ipr_driver_dump *driver_dump)
1573{
1574 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1575
1576 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1577 driver_dump->ioa_type_entry.hdr.len =
1578 sizeof(struct ipr_dump_ioa_type_entry) -
1579 sizeof(struct ipr_dump_entry_header);
1580 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1581 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1582 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1583 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1584 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1585 ucode_vpd->minor_release[1];
1586 driver_dump->hdr.num_entries++;
1587}
1588
1589/**
1590 * ipr_dump_version_data - Fill in the driver version in the dump.
1591 * @ioa_cfg: ioa config struct
1592 * @driver_dump: driver dump struct
1593 *
1594 * Return value:
1595 * nothing
1596 **/
1597static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_driver_dump *driver_dump)
1599{
1600 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1601 driver_dump->version_entry.hdr.len =
1602 sizeof(struct ipr_dump_version_entry) -
1603 sizeof(struct ipr_dump_entry_header);
1604 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1605 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1606 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1607 driver_dump->hdr.num_entries++;
1608}
1609
1610/**
1611 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1612 * @ioa_cfg: ioa config struct
1613 * @driver_dump: driver dump struct
1614 *
1615 * Return value:
1616 * nothing
1617 **/
1618static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_driver_dump *driver_dump)
1620{
1621 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1622 driver_dump->trace_entry.hdr.len =
1623 sizeof(struct ipr_dump_trace_entry) -
1624 sizeof(struct ipr_dump_entry_header);
1625 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1626 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1627 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1628 driver_dump->hdr.num_entries++;
1629}
1630
1631/**
1632 * ipr_dump_location_data - Fill in the IOA location in the dump.
1633 * @ioa_cfg: ioa config struct
1634 * @driver_dump: driver dump struct
1635 *
1636 * Return value:
1637 * nothing
1638 **/
1639static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1640 struct ipr_driver_dump *driver_dump)
1641{
1642 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1643 driver_dump->location_entry.hdr.len =
1644 sizeof(struct ipr_dump_location_entry) -
1645 sizeof(struct ipr_dump_entry_header);
1646 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1647 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1648 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1649 driver_dump->hdr.num_entries++;
1650}
1651
1652/**
1653 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1654 * @ioa_cfg: ioa config struct
1655 * @dump: dump struct
1656 *
1657 * Return value:
1658 * nothing
1659 **/
1660static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1661{
1662 unsigned long start_addr, sdt_word;
1663 unsigned long lock_flags = 0;
1664 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1665 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1666 u32 num_entries, start_off, end_off;
1667 u32 bytes_to_copy, bytes_copied, rc;
1668 struct ipr_sdt *sdt;
1669 int i;
1670
1671 ENTER;
1672
1673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1674
1675 if (ioa_cfg->sdt_state != GET_DUMP) {
1676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1677 return;
1678 }
1679
1680 start_addr = readl(ioa_cfg->ioa_mailbox);
1681
1682 if (!ipr_sdt_is_fmt2(start_addr)) {
1683 dev_err(&ioa_cfg->pdev->dev,
1684 "Invalid dump table format: %lx\n", start_addr);
1685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1686 return;
1687 }
1688
1689 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1690
1691 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1692
1693 /* Initialize the overall dump header */
1694 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1695 driver_dump->hdr.num_entries = 1;
1696 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1697 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1698 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1699 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1700
1701 ipr_dump_version_data(ioa_cfg, driver_dump);
1702 ipr_dump_location_data(ioa_cfg, driver_dump);
1703 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1704 ipr_dump_trace_data(ioa_cfg, driver_dump);
1705
1706 /* Update dump_header */
1707 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1708
1709 /* IOA Dump entry */
1710 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1711 ioa_dump->format = IPR_SDT_FMT2;
1712 ioa_dump->hdr.len = 0;
1713 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1714 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1715
1716 /* First entries in sdt are actually a list of dump addresses and
1717 lengths to gather the real dump data. sdt represents the pointer
1718 to the ioa generated dump table. Dump data will be extracted based
1719 on entries in this table */
1720 sdt = &ioa_dump->sdt;
1721
1722 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1723 sizeof(struct ipr_sdt) / sizeof(__be32));
1724
1725 /* Smart Dump table is ready to use and the first entry is valid */
1726 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1727 dev_err(&ioa_cfg->pdev->dev,
1728 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1729 rc, be32_to_cpu(sdt->hdr.state));
1730 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1731 ioa_cfg->sdt_state = DUMP_OBTAINED;
1732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1733 return;
1734 }
1735
1736 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1737
1738 if (num_entries > IPR_NUM_SDT_ENTRIES)
1739 num_entries = IPR_NUM_SDT_ENTRIES;
1740
1741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1742
1743 for (i = 0; i < num_entries; i++) {
1744 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1745 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1746 break;
1747 }
1748
1749 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1750 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1751 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1752 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1753
1754 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1755 bytes_to_copy = end_off - start_off;
1756 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1757 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1758 continue;
1759 }
1760
1761 /* Copy data from adapter to driver buffers */
1762 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1763 bytes_to_copy);
1764
1765 ioa_dump->hdr.len += bytes_copied;
1766
1767 if (bytes_copied != bytes_to_copy) {
1768 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1769 break;
1770 }
1771 }
1772 }
1773 }
1774
1775 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1776
1777 /* Update dump_header */
1778 driver_dump->hdr.len += ioa_dump->hdr.len;
1779 wmb();
1780 ioa_cfg->sdt_state = DUMP_OBTAINED;
1781 LEAVE;
1782}
1783
1784#else
1785#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1786#endif
1787
1788/**
1789 * ipr_release_dump - Free adapter dump memory
1790 * @kref: kref struct
1791 *
1792 * Return value:
1793 * nothing
1794 **/
1795static void ipr_release_dump(struct kref *kref)
1796{
1797 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1798 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1799 unsigned long lock_flags = 0;
1800 int i;
1801
1802 ENTER;
1803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1804 ioa_cfg->dump = NULL;
1805 ioa_cfg->sdt_state = INACTIVE;
1806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1807
1808 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1809 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1810
1811 kfree(dump);
1812 LEAVE;
1813}
1814
1815/**
1816 * ipr_worker_thread - Worker thread
1817 * @data: ioa config struct
1818 *
1819 * Called at task level from a work thread. This function takes care
1820 * of adding and removing device from the mid-layer as configuration
1821 * changes are detected by the adapter.
1822 *
1823 * Return value:
1824 * nothing
1825 **/
1826static void ipr_worker_thread(void *data)
1827{
1828 unsigned long lock_flags;
1829 struct ipr_resource_entry *res;
1830 struct scsi_device *sdev;
1831 struct ipr_dump *dump;
1832 struct ipr_ioa_cfg *ioa_cfg = data;
1833 u8 bus, target, lun;
1834 int did_work;
1835
1836 ENTER;
1837 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1838
1839 if (ioa_cfg->sdt_state == GET_DUMP) {
1840 dump = ioa_cfg->dump;
1841 if (!dump) {
1842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1843 return;
1844 }
1845 kref_get(&dump->kref);
1846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1847 ipr_get_ioa_dump(ioa_cfg, dump);
1848 kref_put(&dump->kref, ipr_release_dump);
1849
1850 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1851 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1852 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1854 return;
1855 }
1856
1857restart:
1858 do {
1859 did_work = 0;
1860 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1861 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1862 return;
1863 }
1864
1865 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1866 if (res->del_from_ml && res->sdev) {
1867 did_work = 1;
1868 sdev = res->sdev;
1869 if (!scsi_device_get(sdev)) {
1870 res->sdev = NULL;
1871 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1873 scsi_remove_device(sdev);
1874 scsi_device_put(sdev);
1875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1876 }
1877 break;
1878 }
1879 }
1880 } while(did_work);
1881
1882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1883 if (res->add_to_ml) {
1884 bus = res->cfgte.res_addr.bus;
1885 target = res->cfgte.res_addr.target;
1886 lun = res->cfgte.res_addr.lun;
1887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1888 scsi_add_device(ioa_cfg->host, bus, target, lun);
1889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1890 goto restart;
1891 }
1892 }
1893
1894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1895 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1896 LEAVE;
1897}
1898
1899#ifdef CONFIG_SCSI_IPR_TRACE
1900/**
1901 * ipr_read_trace - Dump the adapter trace
1902 * @kobj: kobject struct
1903 * @buf: buffer
1904 * @off: offset
1905 * @count: buffer size
1906 *
1907 * Return value:
1908 * number of bytes printed to buffer
1909 **/
1910static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1911 loff_t off, size_t count)
1912{
1913 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1914 struct Scsi_Host *shost = class_to_shost(cdev);
1915 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1916 unsigned long lock_flags = 0;
1917 int size = IPR_TRACE_SIZE;
1918 char *src = (char *)ioa_cfg->trace;
1919
1920 if (off > size)
1921 return 0;
1922 if (off + count > size) {
1923 size -= off;
1924 count = size;
1925 }
1926
1927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1928 memcpy(buf, &src[off], count);
1929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1930 return count;
1931}
1932
1933static struct bin_attribute ipr_trace_attr = {
1934 .attr = {
1935 .name = "trace",
1936 .mode = S_IRUGO,
1937 },
1938 .size = 0,
1939 .read = ipr_read_trace,
1940};
1941#endif
1942
62275040
BK
1943static const struct {
1944 enum ipr_cache_state state;
1945 char *name;
1946} cache_state [] = {
1947 { CACHE_NONE, "none" },
1948 { CACHE_DISABLED, "disabled" },
1949 { CACHE_ENABLED, "enabled" }
1950};
1951
1952/**
1953 * ipr_show_write_caching - Show the write caching attribute
1954 * @class_dev: class device struct
1955 * @buf: buffer
1956 *
1957 * Return value:
1958 * number of bytes printed to buffer
1959 **/
1960static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1961{
1962 struct Scsi_Host *shost = class_to_shost(class_dev);
1963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1964 unsigned long lock_flags = 0;
1965 int i, len = 0;
1966
1967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1968 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1969 if (cache_state[i].state == ioa_cfg->cache_state) {
1970 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1975 return len;
1976}
1977
1978
1979/**
1980 * ipr_store_write_caching - Enable/disable adapter write cache
1981 * @class_dev: class_device struct
1982 * @buf: buffer
1983 * @count: buffer size
1984 *
1985 * This function will enable/disable adapter write cache.
1986 *
1987 * Return value:
1988 * count on success / other on failure
1989 **/
1990static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1991 const char *buf, size_t count)
1992{
1993 struct Scsi_Host *shost = class_to_shost(class_dev);
1994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1995 unsigned long lock_flags = 0;
1996 enum ipr_cache_state new_state = CACHE_INVALID;
1997 int i;
1998
1999 if (!capable(CAP_SYS_ADMIN))
2000 return -EACCES;
2001 if (ioa_cfg->cache_state == CACHE_NONE)
2002 return -EINVAL;
2003
2004 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2005 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2006 new_state = cache_state[i].state;
2007 break;
2008 }
2009 }
2010
2011 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2012 return -EINVAL;
2013
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->cache_state == new_state) {
2016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2017 return count;
2018 }
2019
2020 ioa_cfg->cache_state = new_state;
2021 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2022 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2023 if (!ioa_cfg->in_reset_reload)
2024 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2027
2028 return count;
2029}
2030
2031static struct class_device_attribute ipr_ioa_cache_attr = {
2032 .attr = {
2033 .name = "write_cache",
2034 .mode = S_IRUGO | S_IWUSR,
2035 },
2036 .show = ipr_show_write_caching,
2037 .store = ipr_store_write_caching
2038};
2039
1da177e4
LT
2040/**
2041 * ipr_show_fw_version - Show the firmware version
2042 * @class_dev: class device struct
2043 * @buf: buffer
2044 *
2045 * Return value:
2046 * number of bytes printed to buffer
2047 **/
2048static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2049{
2050 struct Scsi_Host *shost = class_to_shost(class_dev);
2051 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2052 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2053 unsigned long lock_flags = 0;
2054 int len;
2055
2056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2057 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2058 ucode_vpd->major_release, ucode_vpd->card_type,
2059 ucode_vpd->minor_release[0],
2060 ucode_vpd->minor_release[1]);
2061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2062 return len;
2063}
2064
2065static struct class_device_attribute ipr_fw_version_attr = {
2066 .attr = {
2067 .name = "fw_version",
2068 .mode = S_IRUGO,
2069 },
2070 .show = ipr_show_fw_version,
2071};
2072
2073/**
2074 * ipr_show_log_level - Show the adapter's error logging level
2075 * @class_dev: class device struct
2076 * @buf: buffer
2077 *
2078 * Return value:
2079 * number of bytes printed to buffer
2080 **/
2081static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2082{
2083 struct Scsi_Host *shost = class_to_shost(class_dev);
2084 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2085 unsigned long lock_flags = 0;
2086 int len;
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2091 return len;
2092}
2093
2094/**
2095 * ipr_store_log_level - Change the adapter's error logging level
2096 * @class_dev: class device struct
2097 * @buf: buffer
2098 *
2099 * Return value:
2100 * number of bytes printed to buffer
2101 **/
2102static ssize_t ipr_store_log_level(struct class_device *class_dev,
2103 const char *buf, size_t count)
2104{
2105 struct Scsi_Host *shost = class_to_shost(class_dev);
2106 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2107 unsigned long lock_flags = 0;
2108
2109 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2110 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112 return strlen(buf);
2113}
2114
2115static struct class_device_attribute ipr_log_level_attr = {
2116 .attr = {
2117 .name = "log_level",
2118 .mode = S_IRUGO | S_IWUSR,
2119 },
2120 .show = ipr_show_log_level,
2121 .store = ipr_store_log_level
2122};
2123
2124/**
2125 * ipr_store_diagnostics - IOA Diagnostics interface
2126 * @class_dev: class_device struct
2127 * @buf: buffer
2128 * @count: buffer size
2129 *
2130 * This function will reset the adapter and wait a reasonable
2131 * amount of time for any errors that the adapter might log.
2132 *
2133 * Return value:
2134 * count on success / other on failure
2135 **/
2136static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2137 const char *buf, size_t count)
2138{
2139 struct Scsi_Host *shost = class_to_shost(class_dev);
2140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2141 unsigned long lock_flags = 0;
2142 int rc = count;
2143
2144 if (!capable(CAP_SYS_ADMIN))
2145 return -EACCES;
2146
2147 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2148 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2149 ioa_cfg->errors_logged = 0;
2150 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2151
2152 if (ioa_cfg->in_reset_reload) {
2153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2154 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2155
2156 /* Wait for a second for any errors to be logged */
2157 msleep(1000);
2158 } else {
2159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2160 return -EIO;
2161 }
2162
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2164 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2165 rc = -EIO;
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2167
2168 return rc;
2169}
2170
2171static struct class_device_attribute ipr_diagnostics_attr = {
2172 .attr = {
2173 .name = "run_diagnostics",
2174 .mode = S_IWUSR,
2175 },
2176 .store = ipr_store_diagnostics
2177};
2178
2179/**
2180 * ipr_store_reset_adapter - Reset the adapter
2181 * @class_dev: class_device struct
2182 * @buf: buffer
2183 * @count: buffer size
2184 *
2185 * This function will reset the adapter.
2186 *
2187 * Return value:
2188 * count on success / other on failure
2189 **/
2190static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2191 const char *buf, size_t count)
2192{
2193 struct Scsi_Host *shost = class_to_shost(class_dev);
2194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2195 unsigned long lock_flags;
2196 int result = count;
2197
2198 if (!capable(CAP_SYS_ADMIN))
2199 return -EACCES;
2200
2201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2202 if (!ioa_cfg->in_reset_reload)
2203 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2205 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2206
2207 return result;
2208}
2209
2210static struct class_device_attribute ipr_ioa_reset_attr = {
2211 .attr = {
2212 .name = "reset_host",
2213 .mode = S_IWUSR,
2214 },
2215 .store = ipr_store_reset_adapter
2216};
2217
2218/**
2219 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2220 * @buf_len: buffer length
2221 *
2222 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2223 * list to use for microcode download
2224 *
2225 * Return value:
2226 * pointer to sglist / NULL on failure
2227 **/
2228static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2229{
2230 int sg_size, order, bsize_elem, num_elem, i, j;
2231 struct ipr_sglist *sglist;
2232 struct scatterlist *scatterlist;
2233 struct page *page;
2234
2235 /* Get the minimum size per scatter/gather element */
2236 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2237
2238 /* Get the actual size per element */
2239 order = get_order(sg_size);
2240
2241 /* Determine the actual number of bytes per element */
2242 bsize_elem = PAGE_SIZE * (1 << order);
2243
2244 /* Determine the actual number of sg entries needed */
2245 if (buf_len % bsize_elem)
2246 num_elem = (buf_len / bsize_elem) + 1;
2247 else
2248 num_elem = buf_len / bsize_elem;
2249
2250 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2251 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2252 (sizeof(struct scatterlist) * (num_elem - 1)),
2253 GFP_KERNEL);
2254
2255 if (sglist == NULL) {
2256 ipr_trace;
2257 return NULL;
2258 }
2259
1da177e4
LT
2260 scatterlist = sglist->scatterlist;
2261
2262 sglist->order = order;
2263 sglist->num_sg = num_elem;
2264
2265 /* Allocate a bunch of sg elements */
2266 for (i = 0; i < num_elem; i++) {
2267 page = alloc_pages(GFP_KERNEL, order);
2268 if (!page) {
2269 ipr_trace;
2270
2271 /* Free up what we already allocated */
2272 for (j = i - 1; j >= 0; j--)
2273 __free_pages(scatterlist[j].page, order);
2274 kfree(sglist);
2275 return NULL;
2276 }
2277
2278 scatterlist[i].page = page;
2279 }
2280
2281 return sglist;
2282}
2283
2284/**
2285 * ipr_free_ucode_buffer - Frees a microcode download buffer
2286 * @p_dnld: scatter/gather list pointer
2287 *
2288 * Free a DMA'able ucode download buffer previously allocated with
2289 * ipr_alloc_ucode_buffer
2290 *
2291 * Return value:
2292 * nothing
2293 **/
2294static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2295{
2296 int i;
2297
2298 for (i = 0; i < sglist->num_sg; i++)
2299 __free_pages(sglist->scatterlist[i].page, sglist->order);
2300
2301 kfree(sglist);
2302}
2303
2304/**
2305 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2306 * @sglist: scatter/gather list pointer
2307 * @buffer: buffer pointer
2308 * @len: buffer length
2309 *
2310 * Copy a microcode image from a user buffer into a buffer allocated by
2311 * ipr_alloc_ucode_buffer
2312 *
2313 * Return value:
2314 * 0 on success / other on failure
2315 **/
2316static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2317 u8 *buffer, u32 len)
2318{
2319 int bsize_elem, i, result = 0;
2320 struct scatterlist *scatterlist;
2321 void *kaddr;
2322
2323 /* Determine the actual number of bytes per element */
2324 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2325
2326 scatterlist = sglist->scatterlist;
2327
2328 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2329 kaddr = kmap(scatterlist[i].page);
2330 memcpy(kaddr, buffer, bsize_elem);
2331 kunmap(scatterlist[i].page);
2332
2333 scatterlist[i].length = bsize_elem;
2334
2335 if (result != 0) {
2336 ipr_trace;
2337 return result;
2338 }
2339 }
2340
2341 if (len % bsize_elem) {
2342 kaddr = kmap(scatterlist[i].page);
2343 memcpy(kaddr, buffer, len % bsize_elem);
2344 kunmap(scatterlist[i].page);
2345
2346 scatterlist[i].length = len % bsize_elem;
2347 }
2348
2349 sglist->buffer_len = len;
2350 return result;
2351}
2352
2353/**
12baa420 2354 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2355 * @ipr_cmd: ipr command struct
2356 * @sglist: scatter/gather list
1da177e4 2357 *
12baa420 2358 * Builds a microcode download IOA data list (IOADL).
1da177e4 2359 *
1da177e4 2360 **/
12baa420
BK
2361static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2362 struct ipr_sglist *sglist)
1da177e4 2363{
1da177e4
LT
2364 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2365 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2366 struct scatterlist *scatterlist = sglist->scatterlist;
2367 int i;
2368
12baa420 2369 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2370 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2371 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2372 ioarcb->write_ioadl_len =
2373 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2374
2375 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2376 ioadl[i].flags_and_data_len =
2377 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2378 ioadl[i].address =
2379 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2380 }
2381
12baa420
BK
2382 ioadl[i-1].flags_and_data_len |=
2383 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2384}
2385
2386/**
2387 * ipr_update_ioa_ucode - Update IOA's microcode
2388 * @ioa_cfg: ioa config struct
2389 * @sglist: scatter/gather list
2390 *
2391 * Initiate an adapter reset to update the IOA's microcode
2392 *
2393 * Return value:
2394 * 0 on success / -EIO on failure
2395 **/
2396static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2397 struct ipr_sglist *sglist)
2398{
2399 unsigned long lock_flags;
2400
2401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2402
2403 if (ioa_cfg->ucode_sglist) {
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2405 dev_err(&ioa_cfg->pdev->dev,
2406 "Microcode download already in progress\n");
2407 return -EIO;
1da177e4 2408 }
12baa420
BK
2409
2410 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2411 sglist->num_sg, DMA_TO_DEVICE);
2412
2413 if (!sglist->num_dma_sg) {
2414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2415 dev_err(&ioa_cfg->pdev->dev,
2416 "Failed to map microcode download buffer!\n");
1da177e4
LT
2417 return -EIO;
2418 }
2419
12baa420
BK
2420 ioa_cfg->ucode_sglist = sglist;
2421 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2422 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2423 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2424
2425 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2426 ioa_cfg->ucode_sglist = NULL;
2427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2428 return 0;
2429}
2430
2431/**
2432 * ipr_store_update_fw - Update the firmware on the adapter
2433 * @class_dev: class_device struct
2434 * @buf: buffer
2435 * @count: buffer size
2436 *
2437 * This function will update the firmware on the adapter.
2438 *
2439 * Return value:
2440 * count on success / other on failure
2441 **/
2442static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2443 const char *buf, size_t count)
2444{
2445 struct Scsi_Host *shost = class_to_shost(class_dev);
2446 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2447 struct ipr_ucode_image_header *image_hdr;
2448 const struct firmware *fw_entry;
2449 struct ipr_sglist *sglist;
1da177e4
LT
2450 char fname[100];
2451 char *src;
2452 int len, result, dnld_size;
2453
2454 if (!capable(CAP_SYS_ADMIN))
2455 return -EACCES;
2456
2457 len = snprintf(fname, 99, "%s", buf);
2458 fname[len-1] = '\0';
2459
2460 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2461 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2462 return -EIO;
2463 }
2464
2465 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2466
2467 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2468 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2469 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2470 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2471 release_firmware(fw_entry);
2472 return -EINVAL;
2473 }
2474
2475 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2476 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2477 sglist = ipr_alloc_ucode_buffer(dnld_size);
2478
2479 if (!sglist) {
2480 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2481 release_firmware(fw_entry);
2482 return -ENOMEM;
2483 }
2484
2485 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2486
2487 if (result) {
2488 dev_err(&ioa_cfg->pdev->dev,
2489 "Microcode buffer copy to DMA buffer failed\n");
12baa420 2490 goto out;
1da177e4
LT
2491 }
2492
12baa420 2493 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 2494
12baa420
BK
2495 if (!result)
2496 result = count;
2497out:
1da177e4
LT
2498 ipr_free_ucode_buffer(sglist);
2499 release_firmware(fw_entry);
12baa420 2500 return result;
1da177e4
LT
2501}
2502
2503static struct class_device_attribute ipr_update_fw_attr = {
2504 .attr = {
2505 .name = "update_fw",
2506 .mode = S_IWUSR,
2507 },
2508 .store = ipr_store_update_fw
2509};
2510
2511static struct class_device_attribute *ipr_ioa_attrs[] = {
2512 &ipr_fw_version_attr,
2513 &ipr_log_level_attr,
2514 &ipr_diagnostics_attr,
2515 &ipr_ioa_reset_attr,
2516 &ipr_update_fw_attr,
62275040 2517 &ipr_ioa_cache_attr,
1da177e4
LT
2518 NULL,
2519};
2520
2521#ifdef CONFIG_SCSI_IPR_DUMP
2522/**
2523 * ipr_read_dump - Dump the adapter
2524 * @kobj: kobject struct
2525 * @buf: buffer
2526 * @off: offset
2527 * @count: buffer size
2528 *
2529 * Return value:
2530 * number of bytes printed to buffer
2531 **/
2532static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2533 loff_t off, size_t count)
2534{
2535 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2536 struct Scsi_Host *shost = class_to_shost(cdev);
2537 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2538 struct ipr_dump *dump;
2539 unsigned long lock_flags = 0;
2540 char *src;
2541 int len;
2542 size_t rc = count;
2543
2544 if (!capable(CAP_SYS_ADMIN))
2545 return -EACCES;
2546
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 dump = ioa_cfg->dump;
2549
2550 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2552 return 0;
2553 }
2554 kref_get(&dump->kref);
2555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556
2557 if (off > dump->driver_dump.hdr.len) {
2558 kref_put(&dump->kref, ipr_release_dump);
2559 return 0;
2560 }
2561
2562 if (off + count > dump->driver_dump.hdr.len) {
2563 count = dump->driver_dump.hdr.len - off;
2564 rc = count;
2565 }
2566
2567 if (count && off < sizeof(dump->driver_dump)) {
2568 if (off + count > sizeof(dump->driver_dump))
2569 len = sizeof(dump->driver_dump) - off;
2570 else
2571 len = count;
2572 src = (u8 *)&dump->driver_dump + off;
2573 memcpy(buf, src, len);
2574 buf += len;
2575 off += len;
2576 count -= len;
2577 }
2578
2579 off -= sizeof(dump->driver_dump);
2580
2581 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2582 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2583 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2584 else
2585 len = count;
2586 src = (u8 *)&dump->ioa_dump + off;
2587 memcpy(buf, src, len);
2588 buf += len;
2589 off += len;
2590 count -= len;
2591 }
2592
2593 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2594
2595 while (count) {
2596 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2597 len = PAGE_ALIGN(off) - off;
2598 else
2599 len = count;
2600 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2601 src += off & ~PAGE_MASK;
2602 memcpy(buf, src, len);
2603 buf += len;
2604 off += len;
2605 count -= len;
2606 }
2607
2608 kref_put(&dump->kref, ipr_release_dump);
2609 return rc;
2610}
2611
2612/**
2613 * ipr_alloc_dump - Prepare for adapter dump
2614 * @ioa_cfg: ioa config struct
2615 *
2616 * Return value:
2617 * 0 on success / other on failure
2618 **/
2619static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2620{
2621 struct ipr_dump *dump;
2622 unsigned long lock_flags = 0;
2623
2624 ENTER;
0bc42e35 2625 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2626
2627 if (!dump) {
2628 ipr_err("Dump memory allocation failed\n");
2629 return -ENOMEM;
2630 }
2631
1da177e4
LT
2632 kref_init(&dump->kref);
2633 dump->ioa_cfg = ioa_cfg;
2634
2635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2636
2637 if (INACTIVE != ioa_cfg->sdt_state) {
2638 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2639 kfree(dump);
2640 return 0;
2641 }
2642
2643 ioa_cfg->dump = dump;
2644 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2645 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2646 ioa_cfg->dump_taken = 1;
2647 schedule_work(&ioa_cfg->work_q);
2648 }
2649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650
2651 LEAVE;
2652 return 0;
2653}
2654
2655/**
2656 * ipr_free_dump - Free adapter dump memory
2657 * @ioa_cfg: ioa config struct
2658 *
2659 * Return value:
2660 * 0 on success / other on failure
2661 **/
2662static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2663{
2664 struct ipr_dump *dump;
2665 unsigned long lock_flags = 0;
2666
2667 ENTER;
2668
2669 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2670 dump = ioa_cfg->dump;
2671 if (!dump) {
2672 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2673 return 0;
2674 }
2675
2676 ioa_cfg->dump = NULL;
2677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2678
2679 kref_put(&dump->kref, ipr_release_dump);
2680
2681 LEAVE;
2682 return 0;
2683}
2684
2685/**
2686 * ipr_write_dump - Setup dump state of adapter
2687 * @kobj: kobject struct
2688 * @buf: buffer
2689 * @off: offset
2690 * @count: buffer size
2691 *
2692 * Return value:
2693 * number of bytes printed to buffer
2694 **/
2695static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2696 loff_t off, size_t count)
2697{
2698 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2699 struct Scsi_Host *shost = class_to_shost(cdev);
2700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2701 int rc;
2702
2703 if (!capable(CAP_SYS_ADMIN))
2704 return -EACCES;
2705
2706 if (buf[0] == '1')
2707 rc = ipr_alloc_dump(ioa_cfg);
2708 else if (buf[0] == '0')
2709 rc = ipr_free_dump(ioa_cfg);
2710 else
2711 return -EINVAL;
2712
2713 if (rc)
2714 return rc;
2715 else
2716 return count;
2717}
2718
2719static struct bin_attribute ipr_dump_attr = {
2720 .attr = {
2721 .name = "dump",
2722 .mode = S_IRUSR | S_IWUSR,
2723 },
2724 .size = 0,
2725 .read = ipr_read_dump,
2726 .write = ipr_write_dump
2727};
2728#else
2729static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2730#endif
2731
2732/**
2733 * ipr_change_queue_depth - Change the device's queue depth
2734 * @sdev: scsi device struct
2735 * @qdepth: depth to set
2736 *
2737 * Return value:
2738 * actual depth set
2739 **/
2740static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2741{
2742 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2743 return sdev->queue_depth;
2744}
2745
2746/**
2747 * ipr_change_queue_type - Change the device's queue type
2748 * @dsev: scsi device struct
2749 * @tag_type: type of tags to use
2750 *
2751 * Return value:
2752 * actual queue type set
2753 **/
2754static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2755{
2756 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2757 struct ipr_resource_entry *res;
2758 unsigned long lock_flags = 0;
2759
2760 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2761 res = (struct ipr_resource_entry *)sdev->hostdata;
2762
2763 if (res) {
2764 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2765 /*
2766 * We don't bother quiescing the device here since the
2767 * adapter firmware does it for us.
2768 */
2769 scsi_set_tag_type(sdev, tag_type);
2770
2771 if (tag_type)
2772 scsi_activate_tcq(sdev, sdev->queue_depth);
2773 else
2774 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2775 } else
2776 tag_type = 0;
2777 } else
2778 tag_type = 0;
2779
2780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2781 return tag_type;
2782}
2783
2784/**
2785 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2786 * @dev: device struct
2787 * @buf: buffer
2788 *
2789 * Return value:
2790 * number of bytes printed to buffer
2791 **/
10523b3b 2792static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
2793{
2794 struct scsi_device *sdev = to_scsi_device(dev);
2795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2796 struct ipr_resource_entry *res;
2797 unsigned long lock_flags = 0;
2798 ssize_t len = -ENXIO;
2799
2800 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2801 res = (struct ipr_resource_entry *)sdev->hostdata;
2802 if (res)
2803 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2805 return len;
2806}
2807
2808static struct device_attribute ipr_adapter_handle_attr = {
2809 .attr = {
2810 .name = "adapter_handle",
2811 .mode = S_IRUSR,
2812 },
2813 .show = ipr_show_adapter_handle
2814};
2815
2816static struct device_attribute *ipr_dev_attrs[] = {
2817 &ipr_adapter_handle_attr,
2818 NULL,
2819};
2820
2821/**
2822 * ipr_biosparam - Return the HSC mapping
2823 * @sdev: scsi device struct
2824 * @block_device: block device pointer
2825 * @capacity: capacity of the device
2826 * @parm: Array containing returned HSC values.
2827 *
2828 * This function generates the HSC parms that fdisk uses.
2829 * We want to make sure we return something that places partitions
2830 * on 4k boundaries for best performance with the IOA.
2831 *
2832 * Return value:
2833 * 0 on success
2834 **/
2835static int ipr_biosparam(struct scsi_device *sdev,
2836 struct block_device *block_device,
2837 sector_t capacity, int *parm)
2838{
2839 int heads, sectors;
2840 sector_t cylinders;
2841
2842 heads = 128;
2843 sectors = 32;
2844
2845 cylinders = capacity;
2846 sector_div(cylinders, (128 * 32));
2847
2848 /* return result */
2849 parm[0] = heads;
2850 parm[1] = sectors;
2851 parm[2] = cylinders;
2852
2853 return 0;
2854}
2855
2856/**
2857 * ipr_slave_destroy - Unconfigure a SCSI device
2858 * @sdev: scsi device struct
2859 *
2860 * Return value:
2861 * nothing
2862 **/
2863static void ipr_slave_destroy(struct scsi_device *sdev)
2864{
2865 struct ipr_resource_entry *res;
2866 struct ipr_ioa_cfg *ioa_cfg;
2867 unsigned long lock_flags = 0;
2868
2869 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2870
2871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2872 res = (struct ipr_resource_entry *) sdev->hostdata;
2873 if (res) {
2874 sdev->hostdata = NULL;
2875 res->sdev = NULL;
2876 }
2877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2878}
2879
2880/**
2881 * ipr_slave_configure - Configure a SCSI device
2882 * @sdev: scsi device struct
2883 *
2884 * This function configures the specified scsi device.
2885 *
2886 * Return value:
2887 * 0 on success
2888 **/
2889static int ipr_slave_configure(struct scsi_device *sdev)
2890{
2891 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2892 struct ipr_resource_entry *res;
2893 unsigned long lock_flags = 0;
2894
2895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2896 res = sdev->hostdata;
2897 if (res) {
2898 if (ipr_is_af_dasd_device(res))
2899 sdev->type = TYPE_RAID;
0726ce26 2900 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 2901 sdev->scsi_level = 4;
0726ce26
BK
2902 sdev->no_uld_attach = 1;
2903 }
1da177e4
LT
2904 if (ipr_is_vset_device(res)) {
2905 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2906 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2907 }
2908 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2909 sdev->allow_restart = 1;
2910 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2911 }
2912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2913 return 0;
2914}
2915
2916/**
2917 * ipr_slave_alloc - Prepare for commands to a device.
2918 * @sdev: scsi device struct
2919 *
2920 * This function saves a pointer to the resource entry
2921 * in the scsi device struct if the device exists. We
2922 * can then use this pointer in ipr_queuecommand when
2923 * handling new commands.
2924 *
2925 * Return value:
692aebfc 2926 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
2927 **/
2928static int ipr_slave_alloc(struct scsi_device *sdev)
2929{
2930 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2931 struct ipr_resource_entry *res;
2932 unsigned long lock_flags;
692aebfc 2933 int rc = -ENXIO;
1da177e4
LT
2934
2935 sdev->hostdata = NULL;
2936
2937 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2938
2939 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2940 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2941 (res->cfgte.res_addr.target == sdev->id) &&
2942 (res->cfgte.res_addr.lun == sdev->lun)) {
2943 res->sdev = sdev;
2944 res->add_to_ml = 0;
2945 res->in_erp = 0;
2946 sdev->hostdata = res;
2947 res->needs_sync_complete = 1;
692aebfc 2948 rc = 0;
1da177e4
LT
2949 break;
2950 }
2951 }
2952
2953 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2954
692aebfc 2955 return rc;
1da177e4
LT
2956}
2957
2958/**
2959 * ipr_eh_host_reset - Reset the host adapter
2960 * @scsi_cmd: scsi command struct
2961 *
2962 * Return value:
2963 * SUCCESS / FAILED
2964 **/
df0ae249 2965static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
2966{
2967 struct ipr_ioa_cfg *ioa_cfg;
2968 int rc;
2969
2970 ENTER;
2971 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2972
2973 dev_err(&ioa_cfg->pdev->dev,
2974 "Adapter being reset as a result of error recovery.\n");
2975
2976 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2977 ioa_cfg->sdt_state = GET_DUMP;
2978
2979 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2980
2981 LEAVE;
2982 return rc;
2983}
2984
df0ae249
JG
2985static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2986{
2987 int rc;
2988
2989 spin_lock_irq(cmd->device->host->host_lock);
2990 rc = __ipr_eh_host_reset(cmd);
2991 spin_unlock_irq(cmd->device->host->host_lock);
2992
2993 return rc;
2994}
2995
1da177e4
LT
2996/**
2997 * ipr_eh_dev_reset - Reset the device
2998 * @scsi_cmd: scsi command struct
2999 *
3000 * This function issues a device reset to the affected device.
3001 * A LUN reset will be sent to the device first. If that does
3002 * not work, a target reset will be sent.
3003 *
3004 * Return value:
3005 * SUCCESS / FAILED
3006 **/
94d0e7b8 3007static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3008{
3009 struct ipr_cmnd *ipr_cmd;
3010 struct ipr_ioa_cfg *ioa_cfg;
3011 struct ipr_resource_entry *res;
3012 struct ipr_cmd_pkt *cmd_pkt;
3013 u32 ioasc;
3014
3015 ENTER;
3016 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3017 res = scsi_cmd->device->hostdata;
3018
3019 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3020 return FAILED;
3021
3022 /*
3023 * If we are currently going through reset/reload, return failed. This will force the
3024 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3025 * reset to complete
3026 */
3027 if (ioa_cfg->in_reset_reload)
3028 return FAILED;
3029 if (ioa_cfg->ioa_is_dead)
3030 return FAILED;
3031
3032 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3033 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3034 if (ipr_cmd->scsi_cmd)
3035 ipr_cmd->done = ipr_scsi_eh_done;
3036 }
3037 }
3038
3039 res->resetting_device = 1;
3040
3041 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3042
3043 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3044 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3045 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3046 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3047
3048 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3049 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3050
3051 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3052
3053 res->resetting_device = 0;
3054
3055 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3056
3057 LEAVE;
3058 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3059}
3060
94d0e7b8
JG
3061static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3062{
3063 int rc;
3064
3065 spin_lock_irq(cmd->device->host->host_lock);
3066 rc = __ipr_eh_dev_reset(cmd);
3067 spin_unlock_irq(cmd->device->host->host_lock);
3068
3069 return rc;
3070}
3071
1da177e4
LT
3072/**
3073 * ipr_bus_reset_done - Op done function for bus reset.
3074 * @ipr_cmd: ipr command struct
3075 *
3076 * This function is the op done function for a bus reset
3077 *
3078 * Return value:
3079 * none
3080 **/
3081static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3082{
3083 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3084 struct ipr_resource_entry *res;
3085
3086 ENTER;
3087 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3088 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3089 sizeof(res->cfgte.res_handle))) {
3090 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3091 break;
3092 }
3093 }
3094
3095 /*
3096 * If abort has not completed, indicate the reset has, else call the
3097 * abort's done function to wake the sleeping eh thread
3098 */
3099 if (ipr_cmd->sibling->sibling)
3100 ipr_cmd->sibling->sibling = NULL;
3101 else
3102 ipr_cmd->sibling->done(ipr_cmd->sibling);
3103
3104 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3105 LEAVE;
3106}
3107
3108/**
3109 * ipr_abort_timeout - An abort task has timed out
3110 * @ipr_cmd: ipr command struct
3111 *
3112 * This function handles when an abort task times out. If this
3113 * happens we issue a bus reset since we have resources tied
3114 * up that must be freed before returning to the midlayer.
3115 *
3116 * Return value:
3117 * none
3118 **/
3119static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3120{
3121 struct ipr_cmnd *reset_cmd;
3122 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3123 struct ipr_cmd_pkt *cmd_pkt;
3124 unsigned long lock_flags = 0;
3125
3126 ENTER;
3127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3128 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3130 return;
3131 }
3132
3133 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3134 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3135 ipr_cmd->sibling = reset_cmd;
3136 reset_cmd->sibling = ipr_cmd;
3137 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3138 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3139 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3140 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3141 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3142
3143 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145 LEAVE;
3146}
3147
3148/**
3149 * ipr_cancel_op - Cancel specified op
3150 * @scsi_cmd: scsi command struct
3151 *
3152 * This function cancels specified op.
3153 *
3154 * Return value:
3155 * SUCCESS / FAILED
3156 **/
3157static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3158{
3159 struct ipr_cmnd *ipr_cmd;
3160 struct ipr_ioa_cfg *ioa_cfg;
3161 struct ipr_resource_entry *res;
3162 struct ipr_cmd_pkt *cmd_pkt;
3163 u32 ioasc;
3164 int op_found = 0;
3165
3166 ENTER;
3167 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3168 res = scsi_cmd->device->hostdata;
3169
8fa728a2
JG
3170 /* If we are currently going through reset/reload, return failed.
3171 * This will force the mid-layer to call ipr_eh_host_reset,
3172 * which will then go to sleep and wait for the reset to complete
3173 */
3174 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3175 return FAILED;
1da177e4
LT
3176 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3177 return FAILED;
3178
3179 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3180 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3181 ipr_cmd->done = ipr_scsi_eh_done;
3182 op_found = 1;
3183 break;
3184 }
3185 }
3186
3187 if (!op_found)
3188 return SUCCESS;
3189
3190 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3191 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3192 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3193 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3194 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3195 ipr_cmd->u.sdev = scsi_cmd->device;
3196
3197 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3198 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3199 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3200
3201 /*
3202 * If the abort task timed out and we sent a bus reset, we will get
3203 * one the following responses to the abort
3204 */
3205 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3206 ioasc = 0;
3207 ipr_trace;
3208 }
3209
3210 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3211 res->needs_sync_complete = 1;
3212
3213 LEAVE;
3214 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3215}
3216
3217/**
3218 * ipr_eh_abort - Abort a single op
3219 * @scsi_cmd: scsi command struct
3220 *
3221 * Return value:
3222 * SUCCESS / FAILED
3223 **/
3224static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3225{
8fa728a2
JG
3226 unsigned long flags;
3227 int rc;
1da177e4
LT
3228
3229 ENTER;
1da177e4 3230
8fa728a2
JG
3231 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3232 rc = ipr_cancel_op(scsi_cmd);
3233 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3234
3235 LEAVE;
8fa728a2 3236 return rc;
1da177e4
LT
3237}
3238
3239/**
3240 * ipr_handle_other_interrupt - Handle "other" interrupts
3241 * @ioa_cfg: ioa config struct
3242 * @int_reg: interrupt register
3243 *
3244 * Return value:
3245 * IRQ_NONE / IRQ_HANDLED
3246 **/
3247static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3248 volatile u32 int_reg)
3249{
3250 irqreturn_t rc = IRQ_HANDLED;
3251
3252 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3253 /* Mask the interrupt */
3254 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3255
3256 /* Clear the interrupt */
3257 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3258 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3259
3260 list_del(&ioa_cfg->reset_cmd->queue);
3261 del_timer(&ioa_cfg->reset_cmd->timer);
3262 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3263 } else {
3264 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3265 ioa_cfg->ioa_unit_checked = 1;
3266 else
3267 dev_err(&ioa_cfg->pdev->dev,
3268 "Permanent IOA failure. 0x%08X\n", int_reg);
3269
3270 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3271 ioa_cfg->sdt_state = GET_DUMP;
3272
3273 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3275 }
3276
3277 return rc;
3278}
3279
3280/**
3281 * ipr_isr - Interrupt service routine
3282 * @irq: irq number
3283 * @devp: pointer to ioa config struct
3284 * @regs: pt_regs struct
3285 *
3286 * Return value:
3287 * IRQ_NONE / IRQ_HANDLED
3288 **/
3289static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3290{
3291 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3292 unsigned long lock_flags = 0;
3293 volatile u32 int_reg, int_mask_reg;
3294 u32 ioasc;
3295 u16 cmd_index;
3296 struct ipr_cmnd *ipr_cmd;
3297 irqreturn_t rc = IRQ_NONE;
3298
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300
3301 /* If interrupts are disabled, ignore the interrupt */
3302 if (!ioa_cfg->allow_interrupts) {
3303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304 return IRQ_NONE;
3305 }
3306
3307 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3308 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3309
3310 /* If an interrupt on the adapter did not occur, ignore it */
3311 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 return IRQ_NONE;
3314 }
3315
3316 while (1) {
3317 ipr_cmd = NULL;
3318
3319 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3320 ioa_cfg->toggle_bit) {
3321
3322 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3323 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3324
3325 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3326 ioa_cfg->errors_logged++;
3327 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3328
3329 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3330 ioa_cfg->sdt_state = GET_DUMP;
3331
3332 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334 return IRQ_HANDLED;
3335 }
3336
3337 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3338
3339 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3340
3341 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3342
3343 list_del(&ipr_cmd->queue);
3344 del_timer(&ipr_cmd->timer);
3345 ipr_cmd->done(ipr_cmd);
3346
3347 rc = IRQ_HANDLED;
3348
3349 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3350 ioa_cfg->hrrq_curr++;
3351 } else {
3352 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3353 ioa_cfg->toggle_bit ^= 1u;
3354 }
3355 }
3356
3357 if (ipr_cmd != NULL) {
3358 /* Clear the PCI interrupt */
3359 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3360 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3361 } else
3362 break;
3363 }
3364
3365 if (unlikely(rc == IRQ_NONE))
3366 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3367
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 return rc;
3370}
3371
3372/**
3373 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3374 * @ioa_cfg: ioa config struct
3375 * @ipr_cmd: ipr command struct
3376 *
3377 * Return value:
3378 * 0 on success / -1 on failure
3379 **/
3380static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3381 struct ipr_cmnd *ipr_cmd)
3382{
3383 int i;
3384 struct scatterlist *sglist;
3385 u32 length;
3386 u32 ioadl_flags = 0;
3387 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3388 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3389 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3390
3391 length = scsi_cmd->request_bufflen;
3392
3393 if (length == 0)
3394 return 0;
3395
3396 if (scsi_cmd->use_sg) {
3397 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3398 scsi_cmd->request_buffer,
3399 scsi_cmd->use_sg,
3400 scsi_cmd->sc_data_direction);
3401
3402 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3403 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3404 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3405 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3406 ioarcb->write_ioadl_len =
3407 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3408 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3409 ioadl_flags = IPR_IOADL_FLAGS_READ;
3410 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3411 ioarcb->read_ioadl_len =
3412 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3413 }
3414
3415 sglist = scsi_cmd->request_buffer;
3416
3417 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3418 ioadl[i].flags_and_data_len =
3419 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3420 ioadl[i].address =
3421 cpu_to_be32(sg_dma_address(&sglist[i]));
3422 }
3423
3424 if (likely(ipr_cmd->dma_use_sg)) {
3425 ioadl[i-1].flags_and_data_len |=
3426 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3427 return 0;
3428 } else
3429 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3430 } else {
3431 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3432 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3433 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3434 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3435 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3436 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3437 ioadl_flags = IPR_IOADL_FLAGS_READ;
3438 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3439 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3440 }
3441
3442 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3443 scsi_cmd->request_buffer, length,
3444 scsi_cmd->sc_data_direction);
3445
3446 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3447 ipr_cmd->dma_use_sg = 1;
3448 ioadl[0].flags_and_data_len =
3449 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3450 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3451 return 0;
3452 } else
3453 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3454 }
3455
3456 return -1;
3457}
3458
3459/**
3460 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3461 * @scsi_cmd: scsi command struct
3462 *
3463 * Return value:
3464 * task attributes
3465 **/
3466static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3467{
3468 u8 tag[2];
3469 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3470
3471 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3472 switch (tag[0]) {
3473 case MSG_SIMPLE_TAG:
3474 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3475 break;
3476 case MSG_HEAD_TAG:
3477 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3478 break;
3479 case MSG_ORDERED_TAG:
3480 rc = IPR_FLAGS_LO_ORDERED_TASK;
3481 break;
3482 };
3483 }
3484
3485 return rc;
3486}
3487
3488/**
3489 * ipr_erp_done - Process completion of ERP for a device
3490 * @ipr_cmd: ipr command struct
3491 *
3492 * This function copies the sense buffer into the scsi_cmd
3493 * struct and pushes the scsi_done function.
3494 *
3495 * Return value:
3496 * nothing
3497 **/
3498static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3499{
3500 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3501 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3502 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3503 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3504
3505 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3506 scsi_cmd->result |= (DID_ERROR << 16);
3507 ipr_sdev_err(scsi_cmd->device,
3508 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3509 } else {
3510 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3511 SCSI_SENSE_BUFFERSIZE);
3512 }
3513
3514 if (res) {
3515 res->needs_sync_complete = 1;
3516 res->in_erp = 0;
3517 }
3518 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3519 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3520 scsi_cmd->scsi_done(scsi_cmd);
3521}
3522
3523/**
3524 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3525 * @ipr_cmd: ipr command struct
3526 *
3527 * Return value:
3528 * none
3529 **/
3530static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3531{
3532 struct ipr_ioarcb *ioarcb;
3533 struct ipr_ioasa *ioasa;
3534
3535 ioarcb = &ipr_cmd->ioarcb;
3536 ioasa = &ipr_cmd->ioasa;
3537
3538 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3539 ioarcb->write_data_transfer_length = 0;
3540 ioarcb->read_data_transfer_length = 0;
3541 ioarcb->write_ioadl_len = 0;
3542 ioarcb->read_ioadl_len = 0;
3543 ioasa->ioasc = 0;
3544 ioasa->residual_data_len = 0;
3545}
3546
3547/**
3548 * ipr_erp_request_sense - Send request sense to a device
3549 * @ipr_cmd: ipr command struct
3550 *
3551 * This function sends a request sense to a device as a result
3552 * of a check condition.
3553 *
3554 * Return value:
3555 * nothing
3556 **/
3557static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3558{
3559 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3560 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3561
3562 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3563 ipr_erp_done(ipr_cmd);
3564 return;
3565 }
3566
3567 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3568
3569 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3570 cmd_pkt->cdb[0] = REQUEST_SENSE;
3571 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3572 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3573 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3574 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3575
3576 ipr_cmd->ioadl[0].flags_and_data_len =
3577 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3578 ipr_cmd->ioadl[0].address =
3579 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3580
3581 ipr_cmd->ioarcb.read_ioadl_len =
3582 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3583 ipr_cmd->ioarcb.read_data_transfer_length =
3584 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3585
3586 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3587 IPR_REQUEST_SENSE_TIMEOUT * 2);
3588}
3589
3590/**
3591 * ipr_erp_cancel_all - Send cancel all to a device
3592 * @ipr_cmd: ipr command struct
3593 *
3594 * This function sends a cancel all to a device to clear the
3595 * queue. If we are running TCQ on the device, QERR is set to 1,
3596 * which means all outstanding ops have been dropped on the floor.
3597 * Cancel all will return them to us.
3598 *
3599 * Return value:
3600 * nothing
3601 **/
3602static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3603{
3604 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3605 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3606 struct ipr_cmd_pkt *cmd_pkt;
3607
3608 res->in_erp = 1;
3609
3610 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3611
3612 if (!scsi_get_tag_type(scsi_cmd->device)) {
3613 ipr_erp_request_sense(ipr_cmd);
3614 return;
3615 }
3616
3617 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3618 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3619 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3620
3621 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3622 IPR_CANCEL_ALL_TIMEOUT);
3623}
3624
3625/**
3626 * ipr_dump_ioasa - Dump contents of IOASA
3627 * @ioa_cfg: ioa config struct
3628 * @ipr_cmd: ipr command struct
3629 *
3630 * This function is invoked by the interrupt handler when ops
3631 * fail. It will log the IOASA if appropriate. Only called
3632 * for GPDD ops.
3633 *
3634 * Return value:
3635 * none
3636 **/
3637static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3638 struct ipr_cmnd *ipr_cmd)
3639{
3640 int i;
3641 u16 data_len;
3642 u32 ioasc;
3643 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3644 __be32 *ioasa_data = (__be32 *)ioasa;
3645 int error_index;
3646
3647 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3648
3649 if (0 == ioasc)
3650 return;
3651
3652 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3653 return;
3654
3655 error_index = ipr_get_error(ioasc);
3656
3657 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3658 /* Don't log an error if the IOA already logged one */
3659 if (ioasa->ilid != 0)
3660 return;
3661
3662 if (ipr_error_table[error_index].log_ioasa == 0)
3663 return;
3664 }
3665
3666 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3667 ipr_error_table[error_index].error);
3668
3669 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3670 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3671 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3672 "Device End state: %s Phase: %s\n",
3673 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3674 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3675 }
3676
3677 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3678 data_len = sizeof(struct ipr_ioasa);
3679 else
3680 data_len = be16_to_cpu(ioasa->ret_stat_len);
3681
3682 ipr_err("IOASA Dump:\n");
3683
3684 for (i = 0; i < data_len / 4; i += 4) {
3685 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3686 be32_to_cpu(ioasa_data[i]),
3687 be32_to_cpu(ioasa_data[i+1]),
3688 be32_to_cpu(ioasa_data[i+2]),
3689 be32_to_cpu(ioasa_data[i+3]));
3690 }
3691}
3692
3693/**
3694 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3695 * @ioasa: IOASA
3696 * @sense_buf: sense data buffer
3697 *
3698 * Return value:
3699 * none
3700 **/
3701static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3702{
3703 u32 failing_lba;
3704 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3705 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3706 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3707 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3708
3709 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3710
3711 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3712 return;
3713
3714 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3715
3716 if (ipr_is_vset_device(res) &&
3717 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3718 ioasa->u.vset.failing_lba_hi != 0) {
3719 sense_buf[0] = 0x72;
3720 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3721 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3722 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3723
3724 sense_buf[7] = 12;
3725 sense_buf[8] = 0;
3726 sense_buf[9] = 0x0A;
3727 sense_buf[10] = 0x80;
3728
3729 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3730
3731 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3732 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3733 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3734 sense_buf[15] = failing_lba & 0x000000ff;
3735
3736 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3737
3738 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3739 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3740 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3741 sense_buf[19] = failing_lba & 0x000000ff;
3742 } else {
3743 sense_buf[0] = 0x70;
3744 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3745 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3746 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3747
3748 /* Illegal request */
3749 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3750 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3751 sense_buf[7] = 10; /* additional length */
3752
3753 /* IOARCB was in error */
3754 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3755 sense_buf[15] = 0xC0;
3756 else /* Parameter data was invalid */
3757 sense_buf[15] = 0x80;
3758
3759 sense_buf[16] =
3760 ((IPR_FIELD_POINTER_MASK &
3761 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3762 sense_buf[17] =
3763 (IPR_FIELD_POINTER_MASK &
3764 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3765 } else {
3766 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3767 if (ipr_is_vset_device(res))
3768 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3769 else
3770 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3771
3772 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3773 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3774 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3775 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3776 sense_buf[6] = failing_lba & 0x000000ff;
3777 }
3778
3779 sense_buf[7] = 6; /* additional length */
3780 }
3781 }
3782}
3783
3784/**
3785 * ipr_erp_start - Process an error response for a SCSI op
3786 * @ioa_cfg: ioa config struct
3787 * @ipr_cmd: ipr command struct
3788 *
3789 * This function determines whether or not to initiate ERP
3790 * on the affected device.
3791 *
3792 * Return value:
3793 * nothing
3794 **/
3795static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3796 struct ipr_cmnd *ipr_cmd)
3797{
3798 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3799 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3800 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3801
3802 if (!res) {
3803 ipr_scsi_eh_done(ipr_cmd);
3804 return;
3805 }
3806
3807 if (ipr_is_gscsi(res))
3808 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3809 else
3810 ipr_gen_sense(ipr_cmd);
3811
3812 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3813 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3814 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3815 break;
3816 case IPR_IOASC_IR_RESOURCE_HANDLE:
3817 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3818 break;
3819 case IPR_IOASC_HW_SEL_TIMEOUT:
3820 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3821 res->needs_sync_complete = 1;
3822 break;
3823 case IPR_IOASC_SYNC_REQUIRED:
3824 if (!res->in_erp)
3825 res->needs_sync_complete = 1;
3826 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3827 break;
3828 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3829 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3830 break;
3831 case IPR_IOASC_BUS_WAS_RESET:
3832 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3833 /*
3834 * Report the bus reset and ask for a retry. The device
3835 * will give CC/UA the next command.
3836 */
3837 if (!res->resetting_device)
3838 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3839 scsi_cmd->result |= (DID_ERROR << 16);
3840 res->needs_sync_complete = 1;
3841 break;
3842 case IPR_IOASC_HW_DEV_BUS_STATUS:
3843 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3844 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3845 ipr_erp_cancel_all(ipr_cmd);
3846 return;
3847 }
3848 res->needs_sync_complete = 1;
3849 break;
3850 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3851 break;
3852 default:
3853 scsi_cmd->result |= (DID_ERROR << 16);
3854 if (!ipr_is_vset_device(res))
3855 res->needs_sync_complete = 1;
3856 break;
3857 }
3858
3859 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3860 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3861 scsi_cmd->scsi_done(scsi_cmd);
3862}
3863
3864/**
3865 * ipr_scsi_done - mid-layer done function
3866 * @ipr_cmd: ipr command struct
3867 *
3868 * This function is invoked by the interrupt handler for
3869 * ops generated by the SCSI mid-layer
3870 *
3871 * Return value:
3872 * none
3873 **/
3874static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3875{
3876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3877 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3878 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3879
3880 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3881
3882 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3883 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3884 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3885 scsi_cmd->scsi_done(scsi_cmd);
3886 } else
3887 ipr_erp_start(ioa_cfg, ipr_cmd);
3888}
3889
3890/**
3891 * ipr_save_ioafp_mode_select - Save adapters mode select data
3892 * @ioa_cfg: ioa config struct
3893 * @scsi_cmd: scsi command struct
3894 *
3895 * This function saves mode select data for the adapter to
3896 * use following an adapter reset.
3897 *
3898 * Return value:
3899 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3900 **/
3901static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3902 struct scsi_cmnd *scsi_cmd)
3903{
3904 if (!ioa_cfg->saved_mode_pages) {
3905 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3906 GFP_ATOMIC);
3907 if (!ioa_cfg->saved_mode_pages) {
3908 dev_err(&ioa_cfg->pdev->dev,
3909 "IOA mode select buffer allocation failed\n");
3910 return SCSI_MLQUEUE_HOST_BUSY;
3911 }
3912 }
3913
3914 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3915 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3916 return 0;
3917}
3918
3919/**
3920 * ipr_queuecommand - Queue a mid-layer request
3921 * @scsi_cmd: scsi command struct
3922 * @done: done function
3923 *
3924 * This function queues a request generated by the mid-layer.
3925 *
3926 * Return value:
3927 * 0 on success
3928 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3929 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3930 **/
3931static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3932 void (*done) (struct scsi_cmnd *))
3933{
3934 struct ipr_ioa_cfg *ioa_cfg;
3935 struct ipr_resource_entry *res;
3936 struct ipr_ioarcb *ioarcb;
3937 struct ipr_cmnd *ipr_cmd;
3938 int rc = 0;
3939
3940 scsi_cmd->scsi_done = done;
3941 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3942 res = scsi_cmd->device->hostdata;
3943 scsi_cmd->result = (DID_OK << 16);
3944
3945 /*
3946 * We are currently blocking all devices due to a host reset
3947 * We have told the host to stop giving us new requests, but
3948 * ERP ops don't count. FIXME
3949 */
3950 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3951 return SCSI_MLQUEUE_HOST_BUSY;
3952
3953 /*
3954 * FIXME - Create scsi_set_host_offline interface
3955 * and the ioa_is_dead check can be removed
3956 */
3957 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3958 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3959 scsi_cmd->result = (DID_NO_CONNECT << 16);
3960 scsi_cmd->scsi_done(scsi_cmd);
3961 return 0;
3962 }
3963
3964 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3965 ioarcb = &ipr_cmd->ioarcb;
3966 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3967
3968 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3969 ipr_cmd->scsi_cmd = scsi_cmd;
3970 ioarcb->res_handle = res->cfgte.res_handle;
3971 ipr_cmd->done = ipr_scsi_done;
3972 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3973
3974 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3975 if (scsi_cmd->underflow == 0)
3976 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3977
3978 if (res->needs_sync_complete) {
3979 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3980 res->needs_sync_complete = 0;
3981 }
3982
3983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3984 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3985 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3986 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3987 }
3988
3989 if (scsi_cmd->cmnd[0] >= 0xC0 &&
3990 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3991 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3992
3993 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3994 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3995
3996 if (likely(rc == 0))
3997 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3998
3999 if (likely(rc == 0)) {
4000 mb();
4001 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4002 ioa_cfg->regs.ioarrin_reg);
4003 } else {
4004 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4005 return SCSI_MLQUEUE_HOST_BUSY;
4006 }
4007
4008 return 0;
4009}
4010
4011/**
4012 * ipr_info - Get information about the card/driver
4013 * @scsi_host: scsi host struct
4014 *
4015 * Return value:
4016 * pointer to buffer with description string
4017 **/
4018static const char * ipr_ioa_info(struct Scsi_Host *host)
4019{
4020 static char buffer[512];
4021 struct ipr_ioa_cfg *ioa_cfg;
4022 unsigned long lock_flags = 0;
4023
4024 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4025
4026 spin_lock_irqsave(host->host_lock, lock_flags);
4027 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4028 spin_unlock_irqrestore(host->host_lock, lock_flags);
4029
4030 return buffer;
4031}
4032
4033static struct scsi_host_template driver_template = {
4034 .module = THIS_MODULE,
4035 .name = "IPR",
4036 .info = ipr_ioa_info,
4037 .queuecommand = ipr_queuecommand,
4038 .eh_abort_handler = ipr_eh_abort,
4039 .eh_device_reset_handler = ipr_eh_dev_reset,
4040 .eh_host_reset_handler = ipr_eh_host_reset,
4041 .slave_alloc = ipr_slave_alloc,
4042 .slave_configure = ipr_slave_configure,
4043 .slave_destroy = ipr_slave_destroy,
4044 .change_queue_depth = ipr_change_queue_depth,
4045 .change_queue_type = ipr_change_queue_type,
4046 .bios_param = ipr_biosparam,
4047 .can_queue = IPR_MAX_COMMANDS,
4048 .this_id = -1,
4049 .sg_tablesize = IPR_MAX_SGLIST,
4050 .max_sectors = IPR_IOA_MAX_SECTORS,
4051 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4052 .use_clustering = ENABLE_CLUSTERING,
4053 .shost_attrs = ipr_ioa_attrs,
4054 .sdev_attrs = ipr_dev_attrs,
4055 .proc_name = IPR_NAME
4056};
4057
4058#ifdef CONFIG_PPC_PSERIES
4059static const u16 ipr_blocked_processors[] = {
4060 PV_NORTHSTAR,
4061 PV_PULSAR,
4062 PV_POWER4,
4063 PV_ICESTAR,
4064 PV_SSTAR,
4065 PV_POWER4p,
4066 PV_630,
4067 PV_630p
4068};
4069
4070/**
4071 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4072 * @ioa_cfg: ioa cfg struct
4073 *
4074 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4075 * certain pSeries hardware. This function determines if the given
4076 * adapter is in one of these confgurations or not.
4077 *
4078 * Return value:
4079 * 1 if adapter is not supported / 0 if adapter is supported
4080 **/
4081static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4082{
4083 u8 rev_id;
4084 int i;
4085
4086 if (ioa_cfg->type == 0x5702) {
4087 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4088 &rev_id) == PCIBIOS_SUCCESSFUL) {
4089 if (rev_id < 4) {
4090 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4091 if (__is_processor(ipr_blocked_processors[i]))
4092 return 1;
4093 }
4094 }
4095 }
4096 }
4097 return 0;
4098}
4099#else
4100#define ipr_invalid_adapter(ioa_cfg) 0
4101#endif
4102
4103/**
4104 * ipr_ioa_bringdown_done - IOA bring down completion.
4105 * @ipr_cmd: ipr command struct
4106 *
4107 * This function processes the completion of an adapter bring down.
4108 * It wakes any reset sleepers.
4109 *
4110 * Return value:
4111 * IPR_RC_JOB_RETURN
4112 **/
4113static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4114{
4115 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4116
4117 ENTER;
4118 ioa_cfg->in_reset_reload = 0;
4119 ioa_cfg->reset_retries = 0;
4120 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4121 wake_up_all(&ioa_cfg->reset_wait_q);
4122
4123 spin_unlock_irq(ioa_cfg->host->host_lock);
4124 scsi_unblock_requests(ioa_cfg->host);
4125 spin_lock_irq(ioa_cfg->host->host_lock);
4126 LEAVE;
4127
4128 return IPR_RC_JOB_RETURN;
4129}
4130
4131/**
4132 * ipr_ioa_reset_done - IOA reset completion.
4133 * @ipr_cmd: ipr command struct
4134 *
4135 * This function processes the completion of an adapter reset.
4136 * It schedules any necessary mid-layer add/removes and
4137 * wakes any reset sleepers.
4138 *
4139 * Return value:
4140 * IPR_RC_JOB_RETURN
4141 **/
4142static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4143{
4144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4145 struct ipr_resource_entry *res;
4146 struct ipr_hostrcb *hostrcb, *temp;
4147 int i = 0;
4148
4149 ENTER;
4150 ioa_cfg->in_reset_reload = 0;
4151 ioa_cfg->allow_cmds = 1;
4152 ioa_cfg->reset_cmd = NULL;
4153
4154 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4155 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4156 ipr_trace;
4157 break;
4158 }
4159 }
4160 schedule_work(&ioa_cfg->work_q);
4161
4162 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4163 list_del(&hostrcb->queue);
4164 if (i++ < IPR_NUM_LOG_HCAMS)
4165 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4166 else
4167 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4168 }
4169
4170 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4171
4172 ioa_cfg->reset_retries = 0;
4173 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4174 wake_up_all(&ioa_cfg->reset_wait_q);
4175
4176 spin_unlock_irq(ioa_cfg->host->host_lock);
4177 scsi_unblock_requests(ioa_cfg->host);
4178 spin_lock_irq(ioa_cfg->host->host_lock);
4179
4180 if (!ioa_cfg->allow_cmds)
4181 scsi_block_requests(ioa_cfg->host);
4182
4183 LEAVE;
4184 return IPR_RC_JOB_RETURN;
4185}
4186
4187/**
4188 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4189 * @supported_dev: supported device struct
4190 * @vpids: vendor product id struct
4191 *
4192 * Return value:
4193 * none
4194 **/
4195static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4196 struct ipr_std_inq_vpids *vpids)
4197{
4198 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4199 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4200 supported_dev->num_records = 1;
4201 supported_dev->data_length =
4202 cpu_to_be16(sizeof(struct ipr_supported_device));
4203 supported_dev->reserved = 0;
4204}
4205
4206/**
4207 * ipr_set_supported_devs - Send Set Supported Devices for a device
4208 * @ipr_cmd: ipr command struct
4209 *
4210 * This function send a Set Supported Devices to the adapter
4211 *
4212 * Return value:
4213 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4214 **/
4215static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4216{
4217 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4218 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4219 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4220 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4221 struct ipr_resource_entry *res = ipr_cmd->u.res;
4222
4223 ipr_cmd->job_step = ipr_ioa_reset_done;
4224
4225 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
d0ad6f50 4226 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
1da177e4
LT
4227 continue;
4228
4229 ipr_cmd->u.res = res;
4230 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4231
4232 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4233 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4234 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4235
4236 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4237 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4238 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4239
4240 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4241 sizeof(struct ipr_supported_device));
4242 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4243 offsetof(struct ipr_misc_cbs, supp_dev));
4244 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4245 ioarcb->write_data_transfer_length =
4246 cpu_to_be32(sizeof(struct ipr_supported_device));
4247
4248 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4249 IPR_SET_SUP_DEVICE_TIMEOUT);
4250
4251 ipr_cmd->job_step = ipr_set_supported_devs;
4252 return IPR_RC_JOB_RETURN;
4253 }
4254
4255 return IPR_RC_JOB_CONTINUE;
4256}
4257
62275040
BK
4258/**
4259 * ipr_setup_write_cache - Disable write cache if needed
4260 * @ipr_cmd: ipr command struct
4261 *
4262 * This function sets up adapters write cache to desired setting
4263 *
4264 * Return value:
4265 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4266 **/
4267static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4268{
4269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4270
4271 ipr_cmd->job_step = ipr_set_supported_devs;
4272 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4273 struct ipr_resource_entry, queue);
4274
4275 if (ioa_cfg->cache_state != CACHE_DISABLED)
4276 return IPR_RC_JOB_CONTINUE;
4277
4278 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4279 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4280 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4281 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4282
4283 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4284
4285 return IPR_RC_JOB_RETURN;
4286}
4287
1da177e4
LT
4288/**
4289 * ipr_get_mode_page - Locate specified mode page
4290 * @mode_pages: mode page buffer
4291 * @page_code: page code to find
4292 * @len: minimum required length for mode page
4293 *
4294 * Return value:
4295 * pointer to mode page / NULL on failure
4296 **/
4297static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4298 u32 page_code, u32 len)
4299{
4300 struct ipr_mode_page_hdr *mode_hdr;
4301 u32 page_length;
4302 u32 length;
4303
4304 if (!mode_pages || (mode_pages->hdr.length == 0))
4305 return NULL;
4306
4307 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4308 mode_hdr = (struct ipr_mode_page_hdr *)
4309 (mode_pages->data + mode_pages->hdr.block_desc_len);
4310
4311 while (length) {
4312 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4313 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4314 return mode_hdr;
4315 break;
4316 } else {
4317 page_length = (sizeof(struct ipr_mode_page_hdr) +
4318 mode_hdr->page_length);
4319 length -= page_length;
4320 mode_hdr = (struct ipr_mode_page_hdr *)
4321 ((unsigned long)mode_hdr + page_length);
4322 }
4323 }
4324 return NULL;
4325}
4326
4327/**
4328 * ipr_check_term_power - Check for term power errors
4329 * @ioa_cfg: ioa config struct
4330 * @mode_pages: IOAFP mode pages buffer
4331 *
4332 * Check the IOAFP's mode page 28 for term power errors
4333 *
4334 * Return value:
4335 * nothing
4336 **/
4337static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4338 struct ipr_mode_pages *mode_pages)
4339{
4340 int i;
4341 int entry_length;
4342 struct ipr_dev_bus_entry *bus;
4343 struct ipr_mode_page28 *mode_page;
4344
4345 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4346 sizeof(struct ipr_mode_page28));
4347
4348 entry_length = mode_page->entry_length;
4349
4350 bus = mode_page->bus;
4351
4352 for (i = 0; i < mode_page->num_entries; i++) {
4353 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4354 dev_err(&ioa_cfg->pdev->dev,
4355 "Term power is absent on scsi bus %d\n",
4356 bus->res_addr.bus);
4357 }
4358
4359 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4360 }
4361}
4362
4363/**
4364 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4365 * @ioa_cfg: ioa config struct
4366 *
4367 * Looks through the config table checking for SES devices. If
4368 * the SES device is in the SES table indicating a maximum SCSI
4369 * bus speed, the speed is limited for the bus.
4370 *
4371 * Return value:
4372 * none
4373 **/
4374static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4375{
4376 u32 max_xfer_rate;
4377 int i;
4378
4379 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4380 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4381 ioa_cfg->bus_attr[i].bus_width);
4382
4383 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4384 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4385 }
4386}
4387
4388/**
4389 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4390 * @ioa_cfg: ioa config struct
4391 * @mode_pages: mode page 28 buffer
4392 *
4393 * Updates mode page 28 based on driver configuration
4394 *
4395 * Return value:
4396 * none
4397 **/
4398static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4399 struct ipr_mode_pages *mode_pages)
4400{
4401 int i, entry_length;
4402 struct ipr_dev_bus_entry *bus;
4403 struct ipr_bus_attributes *bus_attr;
4404 struct ipr_mode_page28 *mode_page;
4405
4406 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4407 sizeof(struct ipr_mode_page28));
4408
4409 entry_length = mode_page->entry_length;
4410
4411 /* Loop for each device bus entry */
4412 for (i = 0, bus = mode_page->bus;
4413 i < mode_page->num_entries;
4414 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4415 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4416 dev_err(&ioa_cfg->pdev->dev,
4417 "Invalid resource address reported: 0x%08X\n",
4418 IPR_GET_PHYS_LOC(bus->res_addr));
4419 continue;
4420 }
4421
4422 bus_attr = &ioa_cfg->bus_attr[i];
4423 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4424 bus->bus_width = bus_attr->bus_width;
4425 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4426 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4427 if (bus_attr->qas_enabled)
4428 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4429 else
4430 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4431 }
4432}
4433
4434/**
4435 * ipr_build_mode_select - Build a mode select command
4436 * @ipr_cmd: ipr command struct
4437 * @res_handle: resource handle to send command to
4438 * @parm: Byte 2 of Mode Sense command
4439 * @dma_addr: DMA buffer address
4440 * @xfer_len: data transfer length
4441 *
4442 * Return value:
4443 * none
4444 **/
4445static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4446 __be32 res_handle, u8 parm, u32 dma_addr,
4447 u8 xfer_len)
4448{
4449 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4450 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4451
4452 ioarcb->res_handle = res_handle;
4453 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4454 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4455 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4456 ioarcb->cmd_pkt.cdb[1] = parm;
4457 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4458
4459 ioadl->flags_and_data_len =
4460 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4461 ioadl->address = cpu_to_be32(dma_addr);
4462 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4463 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4464}
4465
4466/**
4467 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4468 * @ipr_cmd: ipr command struct
4469 *
4470 * This function sets up the SCSI bus attributes and sends
4471 * a Mode Select for Page 28 to activate them.
4472 *
4473 * Return value:
4474 * IPR_RC_JOB_RETURN
4475 **/
4476static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4477{
4478 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4479 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4480 int length;
4481
4482 ENTER;
4483 if (ioa_cfg->saved_mode_pages) {
4484 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4485 ioa_cfg->saved_mode_page_len);
4486 length = ioa_cfg->saved_mode_page_len;
4487 } else {
4488 ipr_scsi_bus_speed_limit(ioa_cfg);
4489 ipr_check_term_power(ioa_cfg, mode_pages);
4490 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4491 length = mode_pages->hdr.length + 1;
4492 mode_pages->hdr.length = 0;
4493 }
4494
4495 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4496 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4497 length);
4498
62275040 4499 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
4500 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4501
4502 LEAVE;
4503 return IPR_RC_JOB_RETURN;
4504}
4505
4506/**
4507 * ipr_build_mode_sense - Builds a mode sense command
4508 * @ipr_cmd: ipr command struct
4509 * @res: resource entry struct
4510 * @parm: Byte 2 of mode sense command
4511 * @dma_addr: DMA address of mode sense buffer
4512 * @xfer_len: Size of DMA buffer
4513 *
4514 * Return value:
4515 * none
4516 **/
4517static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4518 __be32 res_handle,
4519 u8 parm, u32 dma_addr, u8 xfer_len)
4520{
4521 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4522 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4523
4524 ioarcb->res_handle = res_handle;
4525 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4526 ioarcb->cmd_pkt.cdb[2] = parm;
4527 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4528 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4529
4530 ioadl->flags_and_data_len =
4531 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4532 ioadl->address = cpu_to_be32(dma_addr);
4533 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4534 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4535}
4536
4537/**
4538 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4539 * @ipr_cmd: ipr command struct
4540 *
4541 * This function send a Page 28 mode sense to the IOA to
4542 * retrieve SCSI bus attributes.
4543 *
4544 * Return value:
4545 * IPR_RC_JOB_RETURN
4546 **/
4547static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4548{
4549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4550
4551 ENTER;
4552 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4553 0x28, ioa_cfg->vpd_cbs_dma +
4554 offsetof(struct ipr_misc_cbs, mode_pages),
4555 sizeof(struct ipr_mode_pages));
4556
4557 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4558
4559 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4560
4561 LEAVE;
4562 return IPR_RC_JOB_RETURN;
4563}
4564
4565/**
4566 * ipr_init_res_table - Initialize the resource table
4567 * @ipr_cmd: ipr command struct
4568 *
4569 * This function looks through the existing resource table, comparing
4570 * it with the config table. This function will take care of old/new
4571 * devices and schedule adding/removing them from the mid-layer
4572 * as appropriate.
4573 *
4574 * Return value:
4575 * IPR_RC_JOB_CONTINUE
4576 **/
4577static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4578{
4579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4580 struct ipr_resource_entry *res, *temp;
4581 struct ipr_config_table_entry *cfgte;
4582 int found, i;
4583 LIST_HEAD(old_res);
4584
4585 ENTER;
4586 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4587 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4588
4589 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4590 list_move_tail(&res->queue, &old_res);
4591
4592 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4593 cfgte = &ioa_cfg->cfg_table->dev[i];
4594 found = 0;
4595
4596 list_for_each_entry_safe(res, temp, &old_res, queue) {
4597 if (!memcmp(&res->cfgte.res_addr,
4598 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4599 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4600 found = 1;
4601 break;
4602 }
4603 }
4604
4605 if (!found) {
4606 if (list_empty(&ioa_cfg->free_res_q)) {
4607 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4608 break;
4609 }
4610
4611 found = 1;
4612 res = list_entry(ioa_cfg->free_res_q.next,
4613 struct ipr_resource_entry, queue);
4614 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4615 ipr_init_res_entry(res);
4616 res->add_to_ml = 1;
4617 }
4618
4619 if (found)
4620 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4621 }
4622
4623 list_for_each_entry_safe(res, temp, &old_res, queue) {
4624 if (res->sdev) {
4625 res->del_from_ml = 1;
4626 res->sdev->hostdata = NULL;
4627 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4628 } else {
4629 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4630 }
4631 }
4632
4633 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4634
4635 LEAVE;
4636 return IPR_RC_JOB_CONTINUE;
4637}
4638
4639/**
4640 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4641 * @ipr_cmd: ipr command struct
4642 *
4643 * This function sends a Query IOA Configuration command
4644 * to the adapter to retrieve the IOA configuration table.
4645 *
4646 * Return value:
4647 * IPR_RC_JOB_RETURN
4648 **/
4649static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4650{
4651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4652 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4653 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4654 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4655
4656 ENTER;
4657 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4658 ucode_vpd->major_release, ucode_vpd->card_type,
4659 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4660 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4661 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4662
4663 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4664 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4665 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4666
4667 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4668 ioarcb->read_data_transfer_length =
4669 cpu_to_be32(sizeof(struct ipr_config_table));
4670
4671 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4672 ioadl->flags_and_data_len =
4673 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4674
4675 ipr_cmd->job_step = ipr_init_res_table;
4676
4677 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4678
4679 LEAVE;
4680 return IPR_RC_JOB_RETURN;
4681}
4682
4683/**
4684 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4685 * @ipr_cmd: ipr command struct
4686 *
4687 * This utility function sends an inquiry to the adapter.
4688 *
4689 * Return value:
4690 * none
4691 **/
4692static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4693 u32 dma_addr, u8 xfer_len)
4694{
4695 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4696 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4697
4698 ENTER;
4699 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4700 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4701
4702 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4703 ioarcb->cmd_pkt.cdb[1] = flags;
4704 ioarcb->cmd_pkt.cdb[2] = page;
4705 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4706
4707 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4708 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4709
4710 ioadl->address = cpu_to_be32(dma_addr);
4711 ioadl->flags_and_data_len =
4712 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4713
4714 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4715 LEAVE;
4716}
4717
62275040
BK
4718/**
4719 * ipr_inquiry_page_supported - Is the given inquiry page supported
4720 * @page0: inquiry page 0 buffer
4721 * @page: page code.
4722 *
4723 * This function determines if the specified inquiry page is supported.
4724 *
4725 * Return value:
4726 * 1 if page is supported / 0 if not
4727 **/
4728static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4729{
4730 int i;
4731
4732 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4733 if (page0->page[i] == page)
4734 return 1;
4735
4736 return 0;
4737}
4738
1da177e4
LT
4739/**
4740 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4741 * @ipr_cmd: ipr command struct
4742 *
4743 * This function sends a Page 3 inquiry to the adapter
4744 * to retrieve software VPD information.
4745 *
4746 * Return value:
4747 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4748 **/
4749static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
4750{
4751 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4752 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4753
4754 ENTER;
4755
4756 if (!ipr_inquiry_page_supported(page0, 1))
4757 ioa_cfg->cache_state = CACHE_NONE;
4758
4759 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4760
4761 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4762 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4763 sizeof(struct ipr_inquiry_page3));
4764
4765 LEAVE;
4766 return IPR_RC_JOB_RETURN;
4767}
4768
4769/**
4770 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4771 * @ipr_cmd: ipr command struct
4772 *
4773 * This function sends a Page 0 inquiry to the adapter
4774 * to retrieve supported inquiry pages.
4775 *
4776 * Return value:
4777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4778 **/
4779static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
4780{
4781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4782 char type[5];
4783
4784 ENTER;
4785
4786 /* Grab the type out of the VPD and store it away */
4787 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4788 type[4] = '\0';
4789 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4790
62275040 4791 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 4792
62275040
BK
4793 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4794 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4795 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
4796
4797 LEAVE;
4798 return IPR_RC_JOB_RETURN;
4799}
4800
4801/**
4802 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4803 * @ipr_cmd: ipr command struct
4804 *
4805 * This function sends a standard inquiry to the adapter.
4806 *
4807 * Return value:
4808 * IPR_RC_JOB_RETURN
4809 **/
4810static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4811{
4812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4813
4814 ENTER;
62275040 4815 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
4816
4817 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4818 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4819 sizeof(struct ipr_ioa_vpd));
4820
4821 LEAVE;
4822 return IPR_RC_JOB_RETURN;
4823}
4824
4825/**
4826 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4827 * @ipr_cmd: ipr command struct
4828 *
4829 * This function send an Identify Host Request Response Queue
4830 * command to establish the HRRQ with the adapter.
4831 *
4832 * Return value:
4833 * IPR_RC_JOB_RETURN
4834 **/
4835static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4836{
4837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4838 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4839
4840 ENTER;
4841 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4842
4843 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4844 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4845
4846 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4847 ioarcb->cmd_pkt.cdb[2] =
4848 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4849 ioarcb->cmd_pkt.cdb[3] =
4850 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4851 ioarcb->cmd_pkt.cdb[4] =
4852 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4853 ioarcb->cmd_pkt.cdb[5] =
4854 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4855 ioarcb->cmd_pkt.cdb[7] =
4856 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4857 ioarcb->cmd_pkt.cdb[8] =
4858 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4859
4860 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4861
4862 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4863
4864 LEAVE;
4865 return IPR_RC_JOB_RETURN;
4866}
4867
4868/**
4869 * ipr_reset_timer_done - Adapter reset timer function
4870 * @ipr_cmd: ipr command struct
4871 *
4872 * Description: This function is used in adapter reset processing
4873 * for timing events. If the reset_cmd pointer in the IOA
4874 * config struct is not this adapter's we are doing nested
4875 * resets and fail_all_ops will take care of freeing the
4876 * command block.
4877 *
4878 * Return value:
4879 * none
4880 **/
4881static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4882{
4883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4884 unsigned long lock_flags = 0;
4885
4886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4887
4888 if (ioa_cfg->reset_cmd == ipr_cmd) {
4889 list_del(&ipr_cmd->queue);
4890 ipr_cmd->done(ipr_cmd);
4891 }
4892
4893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894}
4895
4896/**
4897 * ipr_reset_start_timer - Start a timer for adapter reset job
4898 * @ipr_cmd: ipr command struct
4899 * @timeout: timeout value
4900 *
4901 * Description: This function is used in adapter reset processing
4902 * for timing events. If the reset_cmd pointer in the IOA
4903 * config struct is not this adapter's we are doing nested
4904 * resets and fail_all_ops will take care of freeing the
4905 * command block.
4906 *
4907 * Return value:
4908 * none
4909 **/
4910static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4911 unsigned long timeout)
4912{
4913 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4914 ipr_cmd->done = ipr_reset_ioa_job;
4915
4916 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4917 ipr_cmd->timer.expires = jiffies + timeout;
4918 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4919 add_timer(&ipr_cmd->timer);
4920}
4921
4922/**
4923 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4924 * @ioa_cfg: ioa cfg struct
4925 *
4926 * Return value:
4927 * nothing
4928 **/
4929static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4930{
4931 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4932
4933 /* Initialize Host RRQ pointers */
4934 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4935 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4936 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4937 ioa_cfg->toggle_bit = 1;
4938
4939 /* Zero out config table */
4940 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4941}
4942
4943/**
4944 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4945 * @ipr_cmd: ipr command struct
4946 *
4947 * This function reinitializes some control blocks and
4948 * enables destructive diagnostics on the adapter.
4949 *
4950 * Return value:
4951 * IPR_RC_JOB_RETURN
4952 **/
4953static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4954{
4955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4956 volatile u32 int_reg;
4957
4958 ENTER;
4959 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4960 ipr_init_ioa_mem(ioa_cfg);
4961
4962 ioa_cfg->allow_interrupts = 1;
4963 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4964
4965 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4966 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4967 ioa_cfg->regs.clr_interrupt_mask_reg);
4968 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4969 return IPR_RC_JOB_CONTINUE;
4970 }
4971
4972 /* Enable destructive diagnostics on IOA */
4973 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4974
4975 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4976 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4977
4978 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4979
4980 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4981 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4982 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4983 ipr_cmd->done = ipr_reset_ioa_job;
4984 add_timer(&ipr_cmd->timer);
4985 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4986
4987 LEAVE;
4988 return IPR_RC_JOB_RETURN;
4989}
4990
4991/**
4992 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4993 * @ipr_cmd: ipr command struct
4994 *
4995 * This function is invoked when an adapter dump has run out
4996 * of processing time.
4997 *
4998 * Return value:
4999 * IPR_RC_JOB_CONTINUE
5000 **/
5001static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5002{
5003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5004
5005 if (ioa_cfg->sdt_state == GET_DUMP)
5006 ioa_cfg->sdt_state = ABORT_DUMP;
5007
5008 ipr_cmd->job_step = ipr_reset_alert;
5009
5010 return IPR_RC_JOB_CONTINUE;
5011}
5012
5013/**
5014 * ipr_unit_check_no_data - Log a unit check/no data error log
5015 * @ioa_cfg: ioa config struct
5016 *
5017 * Logs an error indicating the adapter unit checked, but for some
5018 * reason, we were unable to fetch the unit check buffer.
5019 *
5020 * Return value:
5021 * nothing
5022 **/
5023static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5024{
5025 ioa_cfg->errors_logged++;
5026 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5027}
5028
5029/**
5030 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5031 * @ioa_cfg: ioa config struct
5032 *
5033 * Fetches the unit check buffer from the adapter by clocking the data
5034 * through the mailbox register.
5035 *
5036 * Return value:
5037 * nothing
5038 **/
5039static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5040{
5041 unsigned long mailbox;
5042 struct ipr_hostrcb *hostrcb;
5043 struct ipr_uc_sdt sdt;
5044 int rc, length;
5045
5046 mailbox = readl(ioa_cfg->ioa_mailbox);
5047
5048 if (!ipr_sdt_is_fmt2(mailbox)) {
5049 ipr_unit_check_no_data(ioa_cfg);
5050 return;
5051 }
5052
5053 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5054 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5055 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5056
5057 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5058 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5059 ipr_unit_check_no_data(ioa_cfg);
5060 return;
5061 }
5062
5063 /* Find length of the first sdt entry (UC buffer) */
5064 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5065 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5066
5067 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5068 struct ipr_hostrcb, queue);
5069 list_del(&hostrcb->queue);
5070 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5071
5072 rc = ipr_get_ldump_data_section(ioa_cfg,
5073 be32_to_cpu(sdt.entry[0].bar_str_offset),
5074 (__be32 *)&hostrcb->hcam,
5075 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5076
5077 if (!rc)
5078 ipr_handle_log_data(ioa_cfg, hostrcb);
5079 else
5080 ipr_unit_check_no_data(ioa_cfg);
5081
5082 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5083}
5084
5085/**
5086 * ipr_reset_restore_cfg_space - Restore PCI config space.
5087 * @ipr_cmd: ipr command struct
5088 *
5089 * Description: This function restores the saved PCI config space of
5090 * the adapter, fails all outstanding ops back to the callers, and
5091 * fetches the dump/unit check if applicable to this reset.
5092 *
5093 * Return value:
5094 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5095 **/
5096static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5097{
5098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5099 int rc;
5100
5101 ENTER;
b30197d2 5102 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5103 rc = pci_restore_state(ioa_cfg->pdev);
5104
5105 if (rc != PCIBIOS_SUCCESSFUL) {
5106 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5107 return IPR_RC_JOB_CONTINUE;
5108 }
5109
5110 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5111 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5112 return IPR_RC_JOB_CONTINUE;
5113 }
5114
5115 ipr_fail_all_ops(ioa_cfg);
5116
5117 if (ioa_cfg->ioa_unit_checked) {
5118 ioa_cfg->ioa_unit_checked = 0;
5119 ipr_get_unit_check_buffer(ioa_cfg);
5120 ipr_cmd->job_step = ipr_reset_alert;
5121 ipr_reset_start_timer(ipr_cmd, 0);
5122 return IPR_RC_JOB_RETURN;
5123 }
5124
5125 if (ioa_cfg->in_ioa_bringdown) {
5126 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5127 } else {
5128 ipr_cmd->job_step = ipr_reset_enable_ioa;
5129
5130 if (GET_DUMP == ioa_cfg->sdt_state) {
5131 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5132 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5133 schedule_work(&ioa_cfg->work_q);
5134 return IPR_RC_JOB_RETURN;
5135 }
5136 }
5137
5138 ENTER;
5139 return IPR_RC_JOB_CONTINUE;
5140}
5141
5142/**
5143 * ipr_reset_start_bist - Run BIST on the adapter.
5144 * @ipr_cmd: ipr command struct
5145 *
5146 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5147 *
5148 * Return value:
5149 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5150 **/
5151static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5152{
5153 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5154 int rc;
5155
5156 ENTER;
b30197d2 5157 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5158 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5159
5160 if (rc != PCIBIOS_SUCCESSFUL) {
5161 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5162 rc = IPR_RC_JOB_CONTINUE;
5163 } else {
5164 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5165 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5166 rc = IPR_RC_JOB_RETURN;
5167 }
5168
5169 LEAVE;
5170 return rc;
5171}
5172
5173/**
5174 * ipr_reset_allowed - Query whether or not IOA can be reset
5175 * @ioa_cfg: ioa config struct
5176 *
5177 * Return value:
5178 * 0 if reset not allowed / non-zero if reset is allowed
5179 **/
5180static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5181{
5182 volatile u32 temp_reg;
5183
5184 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5185 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5186}
5187
5188/**
5189 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5190 * @ipr_cmd: ipr command struct
5191 *
5192 * Description: This function waits for adapter permission to run BIST,
5193 * then runs BIST. If the adapter does not give permission after a
5194 * reasonable time, we will reset the adapter anyway. The impact of
5195 * resetting the adapter without warning the adapter is the risk of
5196 * losing the persistent error log on the adapter. If the adapter is
5197 * reset while it is writing to the flash on the adapter, the flash
5198 * segment will have bad ECC and be zeroed.
5199 *
5200 * Return value:
5201 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5202 **/
5203static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5204{
5205 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5206 int rc = IPR_RC_JOB_RETURN;
5207
5208 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5209 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5210 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5211 } else {
5212 ipr_cmd->job_step = ipr_reset_start_bist;
5213 rc = IPR_RC_JOB_CONTINUE;
5214 }
5215
5216 return rc;
5217}
5218
5219/**
5220 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5221 * @ipr_cmd: ipr command struct
5222 *
5223 * Description: This function alerts the adapter that it will be reset.
5224 * If memory space is not currently enabled, proceed directly
5225 * to running BIST on the adapter. The timer must always be started
5226 * so we guarantee we do not run BIST from ipr_isr.
5227 *
5228 * Return value:
5229 * IPR_RC_JOB_RETURN
5230 **/
5231static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5232{
5233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5234 u16 cmd_reg;
5235 int rc;
5236
5237 ENTER;
5238 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5239
5240 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5241 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5242 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5243 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5244 } else {
5245 ipr_cmd->job_step = ipr_reset_start_bist;
5246 }
5247
5248 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5249 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5250
5251 LEAVE;
5252 return IPR_RC_JOB_RETURN;
5253}
5254
5255/**
5256 * ipr_reset_ucode_download_done - Microcode download completion
5257 * @ipr_cmd: ipr command struct
5258 *
5259 * Description: This function unmaps the microcode download buffer.
5260 *
5261 * Return value:
5262 * IPR_RC_JOB_CONTINUE
5263 **/
5264static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5265{
5266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5267 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5268
5269 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5270 sglist->num_sg, DMA_TO_DEVICE);
5271
5272 ipr_cmd->job_step = ipr_reset_alert;
5273 return IPR_RC_JOB_CONTINUE;
5274}
5275
5276/**
5277 * ipr_reset_ucode_download - Download microcode to the adapter
5278 * @ipr_cmd: ipr command struct
5279 *
5280 * Description: This function checks to see if it there is microcode
5281 * to download to the adapter. If there is, a download is performed.
5282 *
5283 * Return value:
5284 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5285 **/
5286static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5287{
5288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5289 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5290
5291 ENTER;
5292 ipr_cmd->job_step = ipr_reset_alert;
5293
5294 if (!sglist)
5295 return IPR_RC_JOB_CONTINUE;
5296
5297 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5298 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5299 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5300 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5301 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5302 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5303 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5304
12baa420 5305 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
5306 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5307
5308 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5309 IPR_WRITE_BUFFER_TIMEOUT);
5310
5311 LEAVE;
5312 return IPR_RC_JOB_RETURN;
5313}
5314
5315/**
5316 * ipr_reset_shutdown_ioa - Shutdown the adapter
5317 * @ipr_cmd: ipr command struct
5318 *
5319 * Description: This function issues an adapter shutdown of the
5320 * specified type to the specified adapter as part of the
5321 * adapter reset job.
5322 *
5323 * Return value:
5324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5325 **/
5326static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5327{
5328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5329 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5330 unsigned long timeout;
5331 int rc = IPR_RC_JOB_CONTINUE;
5332
5333 ENTER;
5334 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5335 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5336 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5337 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5338 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5339
5340 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5341 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5342 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5343 timeout = IPR_INTERNAL_TIMEOUT;
5344 else
5345 timeout = IPR_SHUTDOWN_TIMEOUT;
5346
5347 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5348
5349 rc = IPR_RC_JOB_RETURN;
5350 ipr_cmd->job_step = ipr_reset_ucode_download;
5351 } else
5352 ipr_cmd->job_step = ipr_reset_alert;
5353
5354 LEAVE;
5355 return rc;
5356}
5357
5358/**
5359 * ipr_reset_ioa_job - Adapter reset job
5360 * @ipr_cmd: ipr command struct
5361 *
5362 * Description: This function is the job router for the adapter reset job.
5363 *
5364 * Return value:
5365 * none
5366 **/
5367static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5368{
5369 u32 rc, ioasc;
5370 unsigned long scratch = ipr_cmd->u.scratch;
5371 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5372
5373 do {
5374 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5375
5376 if (ioa_cfg->reset_cmd != ipr_cmd) {
5377 /*
5378 * We are doing nested adapter resets and this is
5379 * not the current reset job.
5380 */
5381 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5382 return;
5383 }
5384
5385 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5386 dev_err(&ioa_cfg->pdev->dev,
5387 "0x%02X failed with IOASC: 0x%08X\n",
5388 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5389
5390 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5391 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5392 return;
5393 }
5394
5395 ipr_reinit_ipr_cmnd(ipr_cmd);
5396 ipr_cmd->u.scratch = scratch;
5397 rc = ipr_cmd->job_step(ipr_cmd);
5398 } while(rc == IPR_RC_JOB_CONTINUE);
5399}
5400
5401/**
5402 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5403 * @ioa_cfg: ioa config struct
5404 * @job_step: first job step of reset job
5405 * @shutdown_type: shutdown type
5406 *
5407 * Description: This function will initiate the reset of the given adapter
5408 * starting at the selected job step.
5409 * If the caller needs to wait on the completion of the reset,
5410 * the caller must sleep on the reset_wait_q.
5411 *
5412 * Return value:
5413 * none
5414 **/
5415static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5416 int (*job_step) (struct ipr_cmnd *),
5417 enum ipr_shutdown_type shutdown_type)
5418{
5419 struct ipr_cmnd *ipr_cmd;
5420
5421 ioa_cfg->in_reset_reload = 1;
5422 ioa_cfg->allow_cmds = 0;
5423 scsi_block_requests(ioa_cfg->host);
5424
5425 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5426 ioa_cfg->reset_cmd = ipr_cmd;
5427 ipr_cmd->job_step = job_step;
5428 ipr_cmd->u.shutdown_type = shutdown_type;
5429
5430 ipr_reset_ioa_job(ipr_cmd);
5431}
5432
5433/**
5434 * ipr_initiate_ioa_reset - Initiate an adapter reset
5435 * @ioa_cfg: ioa config struct
5436 * @shutdown_type: shutdown type
5437 *
5438 * Description: This function will initiate the reset of the given adapter.
5439 * If the caller needs to wait on the completion of the reset,
5440 * the caller must sleep on the reset_wait_q.
5441 *
5442 * Return value:
5443 * none
5444 **/
5445static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5446 enum ipr_shutdown_type shutdown_type)
5447{
5448 if (ioa_cfg->ioa_is_dead)
5449 return;
5450
5451 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5452 ioa_cfg->sdt_state = ABORT_DUMP;
5453
5454 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5455 dev_err(&ioa_cfg->pdev->dev,
5456 "IOA taken offline - error recovery failed\n");
5457
5458 ioa_cfg->reset_retries = 0;
5459 ioa_cfg->ioa_is_dead = 1;
5460
5461 if (ioa_cfg->in_ioa_bringdown) {
5462 ioa_cfg->reset_cmd = NULL;
5463 ioa_cfg->in_reset_reload = 0;
5464 ipr_fail_all_ops(ioa_cfg);
5465 wake_up_all(&ioa_cfg->reset_wait_q);
5466
5467 spin_unlock_irq(ioa_cfg->host->host_lock);
5468 scsi_unblock_requests(ioa_cfg->host);
5469 spin_lock_irq(ioa_cfg->host->host_lock);
5470 return;
5471 } else {
5472 ioa_cfg->in_ioa_bringdown = 1;
5473 shutdown_type = IPR_SHUTDOWN_NONE;
5474 }
5475 }
5476
5477 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5478 shutdown_type);
5479}
5480
5481/**
5482 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5483 * @ioa_cfg: ioa cfg struct
5484 *
5485 * Description: This is the second phase of adapter intialization
5486 * This function takes care of initilizing the adapter to the point
5487 * where it can accept new commands.
5488
5489 * Return value:
5490 * 0 on sucess / -EIO on failure
5491 **/
5492static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5493{
5494 int rc = 0;
5495 unsigned long host_lock_flags = 0;
5496
5497 ENTER;
5498 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5499 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5500 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5501
5502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5503 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5504 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5505
5506 if (ioa_cfg->ioa_is_dead) {
5507 rc = -EIO;
5508 } else if (ipr_invalid_adapter(ioa_cfg)) {
5509 if (!ipr_testmode)
5510 rc = -EIO;
5511
5512 dev_err(&ioa_cfg->pdev->dev,
5513 "Adapter not supported in this hardware configuration.\n");
5514 }
5515
5516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5517
5518 LEAVE;
5519 return rc;
5520}
5521
5522/**
5523 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5524 * @ioa_cfg: ioa config struct
5525 *
5526 * Return value:
5527 * none
5528 **/
5529static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5530{
5531 int i;
5532
5533 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5534 if (ioa_cfg->ipr_cmnd_list[i])
5535 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5536 ioa_cfg->ipr_cmnd_list[i],
5537 ioa_cfg->ipr_cmnd_list_dma[i]);
5538
5539 ioa_cfg->ipr_cmnd_list[i] = NULL;
5540 }
5541
5542 if (ioa_cfg->ipr_cmd_pool)
5543 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5544
5545 ioa_cfg->ipr_cmd_pool = NULL;
5546}
5547
5548/**
5549 * ipr_free_mem - Frees memory allocated for an adapter
5550 * @ioa_cfg: ioa cfg struct
5551 *
5552 * Return value:
5553 * nothing
5554 **/
5555static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5556{
5557 int i;
5558
5559 kfree(ioa_cfg->res_entries);
5560 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5561 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5562 ipr_free_cmd_blks(ioa_cfg);
5563 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5564 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5565 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5566 ioa_cfg->cfg_table,
5567 ioa_cfg->cfg_table_dma);
5568
5569 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5570 pci_free_consistent(ioa_cfg->pdev,
5571 sizeof(struct ipr_hostrcb),
5572 ioa_cfg->hostrcb[i],
5573 ioa_cfg->hostrcb_dma[i]);
5574 }
5575
5576 ipr_free_dump(ioa_cfg);
5577 kfree(ioa_cfg->saved_mode_pages);
5578 kfree(ioa_cfg->trace);
5579}
5580
5581/**
5582 * ipr_free_all_resources - Free all allocated resources for an adapter.
5583 * @ipr_cmd: ipr command struct
5584 *
5585 * This function frees all allocated resources for the
5586 * specified adapter.
5587 *
5588 * Return value:
5589 * none
5590 **/
5591static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5592{
5593 struct pci_dev *pdev = ioa_cfg->pdev;
5594
5595 ENTER;
5596 free_irq(pdev->irq, ioa_cfg);
5597 iounmap(ioa_cfg->hdw_dma_regs);
5598 pci_release_regions(pdev);
5599 ipr_free_mem(ioa_cfg);
5600 scsi_host_put(ioa_cfg->host);
5601 pci_disable_device(pdev);
5602 LEAVE;
5603}
5604
5605/**
5606 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5607 * @ioa_cfg: ioa config struct
5608 *
5609 * Return value:
5610 * 0 on success / -ENOMEM on allocation failure
5611 **/
5612static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5613{
5614 struct ipr_cmnd *ipr_cmd;
5615 struct ipr_ioarcb *ioarcb;
5616 dma_addr_t dma_addr;
5617 int i;
5618
5619 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5620 sizeof(struct ipr_cmnd), 8, 0);
5621
5622 if (!ioa_cfg->ipr_cmd_pool)
5623 return -ENOMEM;
5624
5625 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5626 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5627
5628 if (!ipr_cmd) {
5629 ipr_free_cmd_blks(ioa_cfg);
5630 return -ENOMEM;
5631 }
5632
5633 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5634 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5635 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5636
5637 ioarcb = &ipr_cmd->ioarcb;
5638 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5639 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5640 ioarcb->write_ioadl_addr =
5641 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5642 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5643 ioarcb->ioasa_host_pci_addr =
5644 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5645 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5646 ipr_cmd->cmd_index = i;
5647 ipr_cmd->ioa_cfg = ioa_cfg;
5648 ipr_cmd->sense_buffer_dma = dma_addr +
5649 offsetof(struct ipr_cmnd, sense_buffer);
5650
5651 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5652 }
5653
5654 return 0;
5655}
5656
5657/**
5658 * ipr_alloc_mem - Allocate memory for an adapter
5659 * @ioa_cfg: ioa config struct
5660 *
5661 * Return value:
5662 * 0 on success / non-zero for error
5663 **/
5664static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5665{
5666 struct pci_dev *pdev = ioa_cfg->pdev;
5667 int i, rc = -ENOMEM;
5668
5669 ENTER;
0bc42e35 5670 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
5671 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5672
5673 if (!ioa_cfg->res_entries)
5674 goto out;
5675
1da177e4
LT
5676 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5677 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5678
5679 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5680 sizeof(struct ipr_misc_cbs),
5681 &ioa_cfg->vpd_cbs_dma);
5682
5683 if (!ioa_cfg->vpd_cbs)
5684 goto out_free_res_entries;
5685
5686 if (ipr_alloc_cmd_blks(ioa_cfg))
5687 goto out_free_vpd_cbs;
5688
5689 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5690 sizeof(u32) * IPR_NUM_CMD_BLKS,
5691 &ioa_cfg->host_rrq_dma);
5692
5693 if (!ioa_cfg->host_rrq)
5694 goto out_ipr_free_cmd_blocks;
5695
5696 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5697 sizeof(struct ipr_config_table),
5698 &ioa_cfg->cfg_table_dma);
5699
5700 if (!ioa_cfg->cfg_table)
5701 goto out_free_host_rrq;
5702
5703 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5704 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5705 sizeof(struct ipr_hostrcb),
5706 &ioa_cfg->hostrcb_dma[i]);
5707
5708 if (!ioa_cfg->hostrcb[i])
5709 goto out_free_hostrcb_dma;
5710
5711 ioa_cfg->hostrcb[i]->hostrcb_dma =
5712 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5713 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5714 }
5715
0bc42e35 5716 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
5717 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5718
5719 if (!ioa_cfg->trace)
5720 goto out_free_hostrcb_dma;
5721
1da177e4
LT
5722 rc = 0;
5723out:
5724 LEAVE;
5725 return rc;
5726
5727out_free_hostrcb_dma:
5728 while (i-- > 0) {
5729 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5730 ioa_cfg->hostrcb[i],
5731 ioa_cfg->hostrcb_dma[i]);
5732 }
5733 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5734 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5735out_free_host_rrq:
5736 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5737 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5738out_ipr_free_cmd_blocks:
5739 ipr_free_cmd_blks(ioa_cfg);
5740out_free_vpd_cbs:
5741 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5742 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5743out_free_res_entries:
5744 kfree(ioa_cfg->res_entries);
5745 goto out;
5746}
5747
5748/**
5749 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5750 * @ioa_cfg: ioa config struct
5751 *
5752 * Return value:
5753 * none
5754 **/
5755static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5756{
5757 int i;
5758
5759 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5760 ioa_cfg->bus_attr[i].bus = i;
5761 ioa_cfg->bus_attr[i].qas_enabled = 0;
5762 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5763 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5764 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5765 else
5766 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5767 }
5768}
5769
5770/**
5771 * ipr_init_ioa_cfg - Initialize IOA config struct
5772 * @ioa_cfg: ioa config struct
5773 * @host: scsi host struct
5774 * @pdev: PCI dev struct
5775 *
5776 * Return value:
5777 * none
5778 **/
5779static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5780 struct Scsi_Host *host, struct pci_dev *pdev)
5781{
5782 const struct ipr_interrupt_offsets *p;
5783 struct ipr_interrupts *t;
5784 void __iomem *base;
5785
5786 ioa_cfg->host = host;
5787 ioa_cfg->pdev = pdev;
5788 ioa_cfg->log_level = ipr_log_level;
5789 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5790 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5791 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5792 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5793 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5794 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5795 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5796 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5797
5798 INIT_LIST_HEAD(&ioa_cfg->free_q);
5799 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5800 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5801 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5802 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5803 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5804 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5805 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5806 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
5807 if (ipr_enable_cache)
5808 ioa_cfg->cache_state = CACHE_ENABLED;
5809 else
5810 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
5811
5812 ipr_initialize_bus_attr(ioa_cfg);
5813
5814 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5815 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5816 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5817 host->unique_id = host->host_no;
5818 host->max_cmd_len = IPR_MAX_CDB_LEN;
5819 pci_set_drvdata(pdev, ioa_cfg);
5820
5821 p = &ioa_cfg->chip_cfg->regs;
5822 t = &ioa_cfg->regs;
5823 base = ioa_cfg->hdw_dma_regs;
5824
5825 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5826 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5827 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5828 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5829 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5830 t->ioarrin_reg = base + p->ioarrin_reg;
5831 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5832 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5833 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5834}
5835
5836/**
5837 * ipr_get_chip_cfg - Find adapter chip configuration
5838 * @dev_id: PCI device id struct
5839 *
5840 * Return value:
5841 * ptr to chip config on success / NULL on failure
5842 **/
5843static const struct ipr_chip_cfg_t * __devinit
5844ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5845{
5846 int i;
5847
5848 if (dev_id->driver_data)
5849 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5850
5851 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5852 if (ipr_chip[i].vendor == dev_id->vendor &&
5853 ipr_chip[i].device == dev_id->device)
5854 return ipr_chip[i].cfg;
5855 return NULL;
5856}
5857
5858/**
5859 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5860 * @pdev: PCI device struct
5861 * @dev_id: PCI device id struct
5862 *
5863 * Return value:
5864 * 0 on success / non-zero on failure
5865 **/
5866static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5867 const struct pci_device_id *dev_id)
5868{
5869 struct ipr_ioa_cfg *ioa_cfg;
5870 struct Scsi_Host *host;
5871 unsigned long ipr_regs_pci;
5872 void __iomem *ipr_regs;
5873 u32 rc = PCIBIOS_SUCCESSFUL;
5874
5875 ENTER;
5876
5877 if ((rc = pci_enable_device(pdev))) {
5878 dev_err(&pdev->dev, "Cannot enable adapter\n");
5879 goto out;
5880 }
5881
5882 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5883
5884 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5885
5886 if (!host) {
5887 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5888 rc = -ENOMEM;
5889 goto out_disable;
5890 }
5891
5892 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5893 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5894
5895 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5896
5897 if (!ioa_cfg->chip_cfg) {
5898 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5899 dev_id->vendor, dev_id->device);
5900 goto out_scsi_host_put;
5901 }
5902
5903 ipr_regs_pci = pci_resource_start(pdev, 0);
5904
5905 rc = pci_request_regions(pdev, IPR_NAME);
5906 if (rc < 0) {
5907 dev_err(&pdev->dev,
5908 "Couldn't register memory range of registers\n");
5909 goto out_scsi_host_put;
5910 }
5911
5912 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5913
5914 if (!ipr_regs) {
5915 dev_err(&pdev->dev,
5916 "Couldn't map memory range of registers\n");
5917 rc = -ENOMEM;
5918 goto out_release_regions;
5919 }
5920
5921 ioa_cfg->hdw_dma_regs = ipr_regs;
5922 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5923 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5924
5925 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5926
5927 pci_set_master(pdev);
5928
5929 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5930 if (rc < 0) {
5931 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5932 goto cleanup_nomem;
5933 }
5934
5935 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5936 ioa_cfg->chip_cfg->cache_line_size);
5937
5938 if (rc != PCIBIOS_SUCCESSFUL) {
5939 dev_err(&pdev->dev, "Write of cache line size failed\n");
5940 rc = -EIO;
5941 goto cleanup_nomem;
5942 }
5943
5944 /* Save away PCI config space for use following IOA reset */
5945 rc = pci_save_state(pdev);
5946
5947 if (rc != PCIBIOS_SUCCESSFUL) {
5948 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5949 rc = -EIO;
5950 goto cleanup_nomem;
5951 }
5952
5953 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5954 goto cleanup_nomem;
5955
5956 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5957 goto cleanup_nomem;
5958
5959 rc = ipr_alloc_mem(ioa_cfg);
5960 if (rc < 0) {
5961 dev_err(&pdev->dev,
5962 "Couldn't allocate enough memory for device driver!\n");
5963 goto cleanup_nomem;
5964 }
5965
5966 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5967 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5968
5969 if (rc) {
5970 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5971 pdev->irq, rc);
5972 goto cleanup_nolog;
5973 }
5974
5975 spin_lock(&ipr_driver_lock);
5976 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5977 spin_unlock(&ipr_driver_lock);
5978
5979 LEAVE;
5980out:
5981 return rc;
5982
5983cleanup_nolog:
5984 ipr_free_mem(ioa_cfg);
5985cleanup_nomem:
5986 iounmap(ipr_regs);
5987out_release_regions:
5988 pci_release_regions(pdev);
5989out_scsi_host_put:
5990 scsi_host_put(host);
5991out_disable:
5992 pci_disable_device(pdev);
5993 goto out;
5994}
5995
5996/**
5997 * ipr_scan_vsets - Scans for VSET devices
5998 * @ioa_cfg: ioa config struct
5999 *
6000 * Description: Since the VSET resources do not follow SAM in that we can have
6001 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6002 *
6003 * Return value:
6004 * none
6005 **/
6006static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6007{
6008 int target, lun;
6009
6010 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6011 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6012 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6013}
6014
6015/**
6016 * ipr_initiate_ioa_bringdown - Bring down an adapter
6017 * @ioa_cfg: ioa config struct
6018 * @shutdown_type: shutdown type
6019 *
6020 * Description: This function will initiate bringing down the adapter.
6021 * This consists of issuing an IOA shutdown to the adapter
6022 * to flush the cache, and running BIST.
6023 * If the caller needs to wait on the completion of the reset,
6024 * the caller must sleep on the reset_wait_q.
6025 *
6026 * Return value:
6027 * none
6028 **/
6029static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6030 enum ipr_shutdown_type shutdown_type)
6031{
6032 ENTER;
6033 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6034 ioa_cfg->sdt_state = ABORT_DUMP;
6035 ioa_cfg->reset_retries = 0;
6036 ioa_cfg->in_ioa_bringdown = 1;
6037 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6038 LEAVE;
6039}
6040
6041/**
6042 * __ipr_remove - Remove a single adapter
6043 * @pdev: pci device struct
6044 *
6045 * Adapter hot plug remove entry point.
6046 *
6047 * Return value:
6048 * none
6049 **/
6050static void __ipr_remove(struct pci_dev *pdev)
6051{
6052 unsigned long host_lock_flags = 0;
6053 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6054 ENTER;
6055
6056 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6057 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6058
6059 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6060 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 6061 flush_scheduled_work();
1da177e4
LT
6062 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6063
6064 spin_lock(&ipr_driver_lock);
6065 list_del(&ioa_cfg->queue);
6066 spin_unlock(&ipr_driver_lock);
6067
6068 if (ioa_cfg->sdt_state == ABORT_DUMP)
6069 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6071
6072 ipr_free_all_resources(ioa_cfg);
6073
6074 LEAVE;
6075}
6076
6077/**
6078 * ipr_remove - IOA hot plug remove entry point
6079 * @pdev: pci device struct
6080 *
6081 * Adapter hot plug remove entry point.
6082 *
6083 * Return value:
6084 * none
6085 **/
6086static void ipr_remove(struct pci_dev *pdev)
6087{
6088 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6089
6090 ENTER;
6091
1da177e4
LT
6092 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6093 &ipr_trace_attr);
6094 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6095 &ipr_dump_attr);
6096 scsi_remove_host(ioa_cfg->host);
6097
6098 __ipr_remove(pdev);
6099
6100 LEAVE;
6101}
6102
6103/**
6104 * ipr_probe - Adapter hot plug add entry point
6105 *
6106 * Return value:
6107 * 0 on success / non-zero on failure
6108 **/
6109static int __devinit ipr_probe(struct pci_dev *pdev,
6110 const struct pci_device_id *dev_id)
6111{
6112 struct ipr_ioa_cfg *ioa_cfg;
6113 int rc;
6114
6115 rc = ipr_probe_ioa(pdev, dev_id);
6116
6117 if (rc)
6118 return rc;
6119
6120 ioa_cfg = pci_get_drvdata(pdev);
6121 rc = ipr_probe_ioa_part2(ioa_cfg);
6122
6123 if (rc) {
6124 __ipr_remove(pdev);
6125 return rc;
6126 }
6127
6128 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6129
6130 if (rc) {
6131 __ipr_remove(pdev);
6132 return rc;
6133 }
6134
6135 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6136 &ipr_trace_attr);
6137
6138 if (rc) {
6139 scsi_remove_host(ioa_cfg->host);
6140 __ipr_remove(pdev);
6141 return rc;
6142 }
6143
6144 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6145 &ipr_dump_attr);
6146
6147 if (rc) {
6148 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6149 &ipr_trace_attr);
6150 scsi_remove_host(ioa_cfg->host);
6151 __ipr_remove(pdev);
6152 return rc;
6153 }
6154
6155 scsi_scan_host(ioa_cfg->host);
6156 ipr_scan_vsets(ioa_cfg);
6157 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6158 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 6159 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
6160 schedule_work(&ioa_cfg->work_q);
6161 return 0;
6162}
6163
6164/**
6165 * ipr_shutdown - Shutdown handler.
d18c3db5 6166 * @pdev: pci device struct
1da177e4
LT
6167 *
6168 * This function is invoked upon system shutdown/reboot. It will issue
6169 * an adapter shutdown to the adapter to flush the write cache.
6170 *
6171 * Return value:
6172 * none
6173 **/
d18c3db5 6174static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 6175{
d18c3db5 6176 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
6177 unsigned long lock_flags = 0;
6178
6179 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6180 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6182 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6183}
6184
6185static struct pci_device_id ipr_pci_table[] __devinitdata = {
6186 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6188 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6190 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6191 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6192 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6194 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6195 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6196 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6197 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6200 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6201 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6202 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6203 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6204 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6206 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6207 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6208 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6209 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6210 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6211 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6212 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6213 { }
6214};
6215MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6216
6217static struct pci_driver ipr_driver = {
6218 .name = IPR_NAME,
6219 .id_table = ipr_pci_table,
6220 .probe = ipr_probe,
6221 .remove = ipr_remove,
d18c3db5 6222 .shutdown = ipr_shutdown,
1da177e4
LT
6223};
6224
6225/**
6226 * ipr_init - Module entry point
6227 *
6228 * Return value:
6229 * 0 on success / negative value on failure
6230 **/
6231static int __init ipr_init(void)
6232{
6233 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6234 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6235
6236 return pci_module_init(&ipr_driver);
6237}
6238
6239/**
6240 * ipr_exit - Module unload
6241 *
6242 * Module unload entry point.
6243 *
6244 * Return value:
6245 * none
6246 **/
6247static void __exit ipr_exit(void)
6248{
6249 pci_unregister_driver(&ipr_driver);
6250}
6251
6252module_init(ipr_init);
6253module_exit(ipr_exit);