]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ipr.c
[SCSI] ipr: Convert to use kzalloc
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 94static unsigned int ipr_enable_cache = 1;
1da177e4
LT
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone and Citrine */
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
136};
137
138static int ipr_max_bus_speeds [] = {
139 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
140};
141
142MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
143MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
144module_param_named(max_speed, ipr_max_speed, uint, 0);
145MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
146module_param_named(log_level, ipr_log_level, uint, 0);
147MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
148module_param_named(testmode, ipr_testmode, int, 0);
149MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
150module_param_named(fastfail, ipr_fastfail, int, 0);
151MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
152module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
153MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040
BK
154module_param_named(enable_cache, ipr_enable_cache, int, 0);
155MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
1da177e4
LT
156MODULE_LICENSE("GPL");
157MODULE_VERSION(IPR_DRIVER_VERSION);
158
159static const char *ipr_gpdd_dev_end_states[] = {
160 "Command complete",
161 "Terminated by host",
162 "Terminated by device reset",
163 "Terminated by bus reset",
164 "Unknown",
165 "Command not started"
166};
167
168static const char *ipr_gpdd_dev_bus_phases[] = {
169 "Bus free",
170 "Arbitration",
171 "Selection",
172 "Message out",
173 "Command",
174 "Message in",
175 "Data out",
176 "Data in",
177 "Status",
178 "Reselection",
179 "Unknown"
180};
181
182/* A constant array of IOASCs/URCs/Error Messages */
183static const
184struct ipr_error_table_t ipr_error_table[] = {
185 {0x00000000, 1, 1,
186 "8155: An unknown error was received"},
187 {0x00330000, 0, 0,
188 "Soft underlength error"},
189 {0x005A0000, 0, 0,
190 "Command to be cancelled not found"},
191 {0x00808000, 0, 0,
192 "Qualified success"},
193 {0x01080000, 1, 1,
194 "FFFE: Soft device bus error recovered by the IOA"},
195 {0x01170600, 0, 1,
196 "FFF9: Device sector reassign successful"},
197 {0x01170900, 0, 1,
198 "FFF7: Media error recovered by device rewrite procedures"},
199 {0x01180200, 0, 1,
200 "7001: IOA sector reassignment successful"},
201 {0x01180500, 0, 1,
202 "FFF9: Soft media error. Sector reassignment recommended"},
203 {0x01180600, 0, 1,
204 "FFF7: Media error recovered by IOA rewrite procedures"},
205 {0x01418000, 0, 1,
206 "FF3D: Soft PCI bus error recovered by the IOA"},
207 {0x01440000, 1, 1,
208 "FFF6: Device hardware error recovered by the IOA"},
209 {0x01448100, 0, 1,
210 "FFF6: Device hardware error recovered by the device"},
211 {0x01448200, 1, 1,
212 "FF3D: Soft IOA error recovered by the IOA"},
213 {0x01448300, 0, 1,
214 "FFFA: Undefined device response recovered by the IOA"},
215 {0x014A0000, 1, 1,
216 "FFF6: Device bus error, message or command phase"},
217 {0x015D0000, 0, 1,
218 "FFF6: Failure prediction threshold exceeded"},
219 {0x015D9200, 0, 1,
220 "8009: Impending cache battery pack failure"},
221 {0x02040400, 0, 0,
222 "34FF: Disk device format in progress"},
223 {0x023F0000, 0, 0,
224 "Synchronization required"},
225 {0x024E0000, 0, 0,
226 "No ready, IOA shutdown"},
227 {0x025A0000, 0, 0,
228 "Not ready, IOA has been shutdown"},
229 {0x02670100, 0, 1,
230 "3020: Storage subsystem configuration error"},
231 {0x03110B00, 0, 0,
232 "FFF5: Medium error, data unreadable, recommend reassign"},
233 {0x03110C00, 0, 0,
234 "7000: Medium error, data unreadable, do not reassign"},
235 {0x03310000, 0, 1,
236 "FFF3: Disk media format bad"},
237 {0x04050000, 0, 1,
238 "3002: Addressed device failed to respond to selection"},
239 {0x04080000, 1, 1,
240 "3100: Device bus error"},
241 {0x04080100, 0, 1,
242 "3109: IOA timed out a device command"},
243 {0x04088000, 0, 0,
244 "3120: SCSI bus is not operational"},
245 {0x04118000, 0, 1,
246 "9000: IOA reserved area data check"},
247 {0x04118100, 0, 1,
248 "9001: IOA reserved area invalid data pattern"},
249 {0x04118200, 0, 1,
250 "9002: IOA reserved area LRC error"},
251 {0x04320000, 0, 1,
252 "102E: Out of alternate sectors for disk storage"},
253 {0x04330000, 1, 1,
254 "FFF4: Data transfer underlength error"},
255 {0x04338000, 1, 1,
256 "FFF4: Data transfer overlength error"},
257 {0x043E0100, 0, 1,
258 "3400: Logical unit failure"},
259 {0x04408500, 0, 1,
260 "FFF4: Device microcode is corrupt"},
261 {0x04418000, 1, 1,
262 "8150: PCI bus error"},
263 {0x04430000, 1, 0,
264 "Unsupported device bus message received"},
265 {0x04440000, 1, 1,
266 "FFF4: Disk device problem"},
267 {0x04448200, 1, 1,
268 "8150: Permanent IOA failure"},
269 {0x04448300, 0, 1,
270 "3010: Disk device returned wrong response to IOA"},
271 {0x04448400, 0, 1,
272 "8151: IOA microcode error"},
273 {0x04448500, 0, 0,
274 "Device bus status error"},
275 {0x04448600, 0, 1,
276 "8157: IOA error requiring IOA reset to recover"},
277 {0x04490000, 0, 0,
278 "Message reject received from the device"},
279 {0x04449200, 0, 1,
280 "8008: A permanent cache battery pack failure occurred"},
281 {0x0444A000, 0, 1,
282 "9090: Disk unit has been modified after the last known status"},
283 {0x0444A200, 0, 1,
284 "9081: IOA detected device error"},
285 {0x0444A300, 0, 1,
286 "9082: IOA detected device error"},
287 {0x044A0000, 1, 1,
288 "3110: Device bus error, message or command phase"},
289 {0x04670400, 0, 1,
290 "9091: Incorrect hardware configuration change has been detected"},
291 {0x046E0000, 0, 1,
292 "FFF4: Command to logical unit failed"},
293 {0x05240000, 1, 0,
294 "Illegal request, invalid request type or request packet"},
295 {0x05250000, 0, 0,
296 "Illegal request, invalid resource handle"},
297 {0x05260000, 0, 0,
298 "Illegal request, invalid field in parameter list"},
299 {0x05260100, 0, 0,
300 "Illegal request, parameter not supported"},
301 {0x05260200, 0, 0,
302 "Illegal request, parameter value invalid"},
303 {0x052C0000, 0, 0,
304 "Illegal request, command sequence error"},
305 {0x06040500, 0, 1,
306 "9031: Array protection temporarily suspended, protection resuming"},
307 {0x06040600, 0, 1,
308 "9040: Array protection temporarily suspended, protection resuming"},
309 {0x06290000, 0, 1,
310 "FFFB: SCSI bus was reset"},
311 {0x06290500, 0, 0,
312 "FFFE: SCSI bus transition to single ended"},
313 {0x06290600, 0, 0,
314 "FFFE: SCSI bus transition to LVD"},
315 {0x06298000, 0, 1,
316 "FFFB: SCSI bus was reset by another initiator"},
317 {0x063F0300, 0, 1,
318 "3029: A device replacement has occurred"},
319 {0x064C8000, 0, 1,
320 "9051: IOA cache data exists for a missing or failed device"},
321 {0x06670100, 0, 1,
322 "9025: Disk unit is not supported at its physical location"},
323 {0x06670600, 0, 1,
324 "3020: IOA detected a SCSI bus configuration error"},
325 {0x06678000, 0, 1,
326 "3150: SCSI bus configuration error"},
327 {0x06690200, 0, 1,
328 "9041: Array protection temporarily suspended"},
329 {0x06698200, 0, 1,
330 "9042: Corrupt array parity detected on specified device"},
331 {0x066B0200, 0, 1,
332 "9030: Array no longer protected due to missing or failed disk unit"},
333 {0x066B8200, 0, 1,
334 "9032: Array exposed but still protected"},
335 {0x07270000, 0, 0,
336 "Failure due to other device"},
337 {0x07278000, 0, 1,
338 "9008: IOA does not support functions expected by devices"},
339 {0x07278100, 0, 1,
340 "9010: Cache data associated with attached devices cannot be found"},
341 {0x07278200, 0, 1,
342 "9011: Cache data belongs to devices other than those attached"},
343 {0x07278400, 0, 1,
344 "9020: Array missing 2 or more devices with only 1 device present"},
345 {0x07278500, 0, 1,
346 "9021: Array missing 2 or more devices with 2 or more devices present"},
347 {0x07278600, 0, 1,
348 "9022: Exposed array is missing a required device"},
349 {0x07278700, 0, 1,
350 "9023: Array member(s) not at required physical locations"},
351 {0x07278800, 0, 1,
352 "9024: Array not functional due to present hardware configuration"},
353 {0x07278900, 0, 1,
354 "9026: Array not functional due to present hardware configuration"},
355 {0x07278A00, 0, 1,
356 "9027: Array is missing a device and parity is out of sync"},
357 {0x07278B00, 0, 1,
358 "9028: Maximum number of arrays already exist"},
359 {0x07278C00, 0, 1,
360 "9050: Required cache data cannot be located for a disk unit"},
361 {0x07278D00, 0, 1,
362 "9052: Cache data exists for a device that has been modified"},
363 {0x07278F00, 0, 1,
364 "9054: IOA resources not available due to previous problems"},
365 {0x07279100, 0, 1,
366 "9092: Disk unit requires initialization before use"},
367 {0x07279200, 0, 1,
368 "9029: Incorrect hardware configuration change has been detected"},
369 {0x07279600, 0, 1,
370 "9060: One or more disk pairs are missing from an array"},
371 {0x07279700, 0, 1,
372 "9061: One or more disks are missing from an array"},
373 {0x07279800, 0, 1,
374 "9062: One or more disks are missing from an array"},
375 {0x07279900, 0, 1,
376 "9063: Maximum number of functional arrays has been exceeded"},
377 {0x0B260000, 0, 0,
378 "Aborted command, invalid descriptor"},
379 {0x0B5A0000, 0, 0,
380 "Command terminated by host"}
381};
382
383static const struct ipr_ses_table_entry ipr_ses_table[] = {
384 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
386 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
387 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
388 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
389 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
390 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
392 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
397};
398
399/*
400 * Function Prototypes
401 */
402static int ipr_reset_alert(struct ipr_cmnd *);
403static void ipr_process_ccn(struct ipr_cmnd *);
404static void ipr_process_error(struct ipr_cmnd *);
405static void ipr_reset_ioa_job(struct ipr_cmnd *);
406static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
407 enum ipr_shutdown_type);
408
409#ifdef CONFIG_SCSI_IPR_TRACE
410/**
411 * ipr_trc_hook - Add a trace entry to the driver trace
412 * @ipr_cmd: ipr command struct
413 * @type: trace type
414 * @add_data: additional data
415 *
416 * Return value:
417 * none
418 **/
419static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
420 u8 type, u32 add_data)
421{
422 struct ipr_trace_entry *trace_entry;
423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
424
425 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
426 trace_entry->time = jiffies;
427 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
428 trace_entry->type = type;
429 trace_entry->cmd_index = ipr_cmd->cmd_index;
430 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
431 trace_entry->u.add_data = add_data;
432}
433#else
434#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
435#endif
436
437/**
438 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
439 * @ipr_cmd: ipr command struct
440 *
441 * Return value:
442 * none
443 **/
444static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
445{
446 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
447 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
448
449 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
450 ioarcb->write_data_transfer_length = 0;
451 ioarcb->read_data_transfer_length = 0;
452 ioarcb->write_ioadl_len = 0;
453 ioarcb->read_ioadl_len = 0;
454 ioasa->ioasc = 0;
455 ioasa->residual_data_len = 0;
456
457 ipr_cmd->scsi_cmd = NULL;
458 ipr_cmd->sense_buffer[0] = 0;
459 ipr_cmd->dma_use_sg = 0;
460}
461
462/**
463 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
464 * @ipr_cmd: ipr command struct
465 *
466 * Return value:
467 * none
468 **/
469static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
470{
471 ipr_reinit_ipr_cmnd(ipr_cmd);
472 ipr_cmd->u.scratch = 0;
473 ipr_cmd->sibling = NULL;
474 init_timer(&ipr_cmd->timer);
475}
476
477/**
478 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
479 * @ioa_cfg: ioa config struct
480 *
481 * Return value:
482 * pointer to ipr command struct
483 **/
484static
485struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
486{
487 struct ipr_cmnd *ipr_cmd;
488
489 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
490 list_del(&ipr_cmd->queue);
491 ipr_init_ipr_cmnd(ipr_cmd);
492
493 return ipr_cmd;
494}
495
496/**
497 * ipr_unmap_sglist - Unmap scatterlist if mapped
498 * @ioa_cfg: ioa config struct
499 * @ipr_cmd: ipr command struct
500 *
501 * Return value:
502 * nothing
503 **/
504static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
505 struct ipr_cmnd *ipr_cmd)
506{
507 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
508
509 if (ipr_cmd->dma_use_sg) {
510 if (scsi_cmd->use_sg > 0) {
511 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
512 scsi_cmd->use_sg,
513 scsi_cmd->sc_data_direction);
514 } else {
515 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
516 scsi_cmd->request_bufflen,
517 scsi_cmd->sc_data_direction);
518 }
519 }
520}
521
522/**
523 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
524 * @ioa_cfg: ioa config struct
525 * @clr_ints: interrupts to clear
526 *
527 * This function masks all interrupts on the adapter, then clears the
528 * interrupts specified in the mask
529 *
530 * Return value:
531 * none
532 **/
533static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
534 u32 clr_ints)
535{
536 volatile u32 int_reg;
537
538 /* Stop new interrupts */
539 ioa_cfg->allow_interrupts = 0;
540
541 /* Set interrupt mask to stop all new interrupts */
542 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
543
544 /* Clear any pending interrupts */
545 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
546 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
547}
548
549/**
550 * ipr_save_pcix_cmd_reg - Save PCI-X command register
551 * @ioa_cfg: ioa config struct
552 *
553 * Return value:
554 * 0 on success / -EIO on failure
555 **/
556static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
557{
558 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
559
560 if (pcix_cmd_reg == 0) {
561 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
562 return -EIO;
563 }
564
565 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
566 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
567 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 return -EIO;
569 }
570
571 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
572 return 0;
573}
574
575/**
576 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
577 * @ioa_cfg: ioa config struct
578 *
579 * Return value:
580 * 0 on success / -EIO on failure
581 **/
582static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
583{
584 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
585
586 if (pcix_cmd_reg) {
587 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
588 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
589 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
590 return -EIO;
591 }
592 } else {
593 dev_err(&ioa_cfg->pdev->dev,
594 "Failed to setup PCI-X command register\n");
595 return -EIO;
596 }
597
598 return 0;
599}
600
601/**
602 * ipr_scsi_eh_done - mid-layer done function for aborted ops
603 * @ipr_cmd: ipr command struct
604 *
605 * This function is invoked by the interrupt handler for
606 * ops generated by the SCSI mid-layer which are being aborted.
607 *
608 * Return value:
609 * none
610 **/
611static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
612{
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
615
616 scsi_cmd->result |= (DID_ERROR << 16);
617
618 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
619 scsi_cmd->scsi_done(scsi_cmd);
620 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
621}
622
623/**
624 * ipr_fail_all_ops - Fails all outstanding ops.
625 * @ioa_cfg: ioa config struct
626 *
627 * This function fails all outstanding ops.
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
633{
634 struct ipr_cmnd *ipr_cmd, *temp;
635
636 ENTER;
637 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
638 list_del(&ipr_cmd->queue);
639
640 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
641 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
642
643 if (ipr_cmd->scsi_cmd)
644 ipr_cmd->done = ipr_scsi_eh_done;
645
646 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
647 del_timer(&ipr_cmd->timer);
648 ipr_cmd->done(ipr_cmd);
649 }
650
651 LEAVE;
652}
653
654/**
655 * ipr_do_req - Send driver initiated requests.
656 * @ipr_cmd: ipr command struct
657 * @done: done function
658 * @timeout_func: timeout function
659 * @timeout: timeout value
660 *
661 * This function sends the specified command to the adapter with the
662 * timeout given. The done function is invoked on command completion.
663 *
664 * Return value:
665 * none
666 **/
667static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
668 void (*done) (struct ipr_cmnd *),
669 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
670{
671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
672
673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
674
675 ipr_cmd->done = done;
676
677 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
678 ipr_cmd->timer.expires = jiffies + timeout;
679 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
680
681 add_timer(&ipr_cmd->timer);
682
683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
684
685 mb();
686 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
687 ioa_cfg->regs.ioarrin_reg);
688}
689
690/**
691 * ipr_internal_cmd_done - Op done function for an internally generated op.
692 * @ipr_cmd: ipr command struct
693 *
694 * This function is the op done function for an internally generated,
695 * blocking op. It simply wakes the sleeping thread.
696 *
697 * Return value:
698 * none
699 **/
700static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
701{
702 if (ipr_cmd->sibling)
703 ipr_cmd->sibling = NULL;
704 else
705 complete(&ipr_cmd->completion);
706}
707
708/**
709 * ipr_send_blocking_cmd - Send command and sleep on its completion.
710 * @ipr_cmd: ipr command struct
711 * @timeout_func: function to invoke if command times out
712 * @timeout: timeout
713 *
714 * Return value:
715 * none
716 **/
717static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
718 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
719 u32 timeout)
720{
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722
723 init_completion(&ipr_cmd->completion);
724 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
725
726 spin_unlock_irq(ioa_cfg->host->host_lock);
727 wait_for_completion(&ipr_cmd->completion);
728 spin_lock_irq(ioa_cfg->host->host_lock);
729}
730
731/**
732 * ipr_send_hcam - Send an HCAM to the adapter.
733 * @ioa_cfg: ioa config struct
734 * @type: HCAM type
735 * @hostrcb: hostrcb struct
736 *
737 * This function will send a Host Controlled Async command to the adapter.
738 * If HCAMs are currently not allowed to be issued to the adapter, it will
739 * place the hostrcb on the free queue.
740 *
741 * Return value:
742 * none
743 **/
744static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
745 struct ipr_hostrcb *hostrcb)
746{
747 struct ipr_cmnd *ipr_cmd;
748 struct ipr_ioarcb *ioarcb;
749
750 if (ioa_cfg->allow_cmds) {
751 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
753 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
754
755 ipr_cmd->u.hostrcb = hostrcb;
756 ioarcb = &ipr_cmd->ioarcb;
757
758 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
759 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
760 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
761 ioarcb->cmd_pkt.cdb[1] = type;
762 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
763 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
764
765 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
766 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
767 ipr_cmd->ioadl[0].flags_and_data_len =
768 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
769 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
770
771 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
772 ipr_cmd->done = ipr_process_ccn;
773 else
774 ipr_cmd->done = ipr_process_error;
775
776 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
777
778 mb();
779 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
780 ioa_cfg->regs.ioarrin_reg);
781 } else {
782 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
783 }
784}
785
786/**
787 * ipr_init_res_entry - Initialize a resource entry struct.
788 * @res: resource entry struct
789 *
790 * Return value:
791 * none
792 **/
793static void ipr_init_res_entry(struct ipr_resource_entry *res)
794{
795 res->needs_sync_complete = 1;
796 res->in_erp = 0;
797 res->add_to_ml = 0;
798 res->del_from_ml = 0;
799 res->resetting_device = 0;
800 res->sdev = NULL;
801}
802
803/**
804 * ipr_handle_config_change - Handle a config change from the adapter
805 * @ioa_cfg: ioa config struct
806 * @hostrcb: hostrcb
807 *
808 * Return value:
809 * none
810 **/
811static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
812 struct ipr_hostrcb *hostrcb)
813{
814 struct ipr_resource_entry *res = NULL;
815 struct ipr_config_table_entry *cfgte;
816 u32 is_ndn = 1;
817
818 cfgte = &hostrcb->hcam.u.ccn.cfgte;
819
820 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
821 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
822 sizeof(cfgte->res_addr))) {
823 is_ndn = 0;
824 break;
825 }
826 }
827
828 if (is_ndn) {
829 if (list_empty(&ioa_cfg->free_res_q)) {
830 ipr_send_hcam(ioa_cfg,
831 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
832 hostrcb);
833 return;
834 }
835
836 res = list_entry(ioa_cfg->free_res_q.next,
837 struct ipr_resource_entry, queue);
838
839 list_del(&res->queue);
840 ipr_init_res_entry(res);
841 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
842 }
843
844 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
845
846 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
847 if (res->sdev) {
848 res->sdev->hostdata = NULL;
849 res->del_from_ml = 1;
850 if (ioa_cfg->allow_ml_add_del)
851 schedule_work(&ioa_cfg->work_q);
852 } else
853 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
854 } else if (!res->sdev) {
855 res->add_to_ml = 1;
856 if (ioa_cfg->allow_ml_add_del)
857 schedule_work(&ioa_cfg->work_q);
858 }
859
860 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
861}
862
863/**
864 * ipr_process_ccn - Op done function for a CCN.
865 * @ipr_cmd: ipr command struct
866 *
867 * This function is the op done function for a configuration
868 * change notification host controlled async from the adapter.
869 *
870 * Return value:
871 * none
872 **/
873static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
874{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
877 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
878
879 list_del(&hostrcb->queue);
880 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
881
882 if (ioasc) {
883 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
884 dev_err(&ioa_cfg->pdev->dev,
885 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
886
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888 } else {
889 ipr_handle_config_change(ioa_cfg, hostrcb);
890 }
891}
892
893/**
894 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 895 * @vpd: vendor/product id/sn struct
1da177e4
LT
896 *
897 * Return value:
898 * none
899 **/
cfc32139 900static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
901{
902 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
903 + IPR_SERIAL_NUM_LEN];
904
cfc32139
BK
905 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
906 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
907 IPR_PROD_ID_LEN);
908 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
909 ipr_err("Vendor/Product ID: %s\n", buffer);
910
cfc32139 911 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
912 buffer[IPR_SERIAL_NUM_LEN] = '\0';
913 ipr_err(" Serial Number: %s\n", buffer);
914}
915
916/**
917 * ipr_log_cache_error - Log a cache error.
918 * @ioa_cfg: ioa config struct
919 * @hostrcb: hostrcb struct
920 *
921 * Return value:
922 * none
923 **/
924static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
925 struct ipr_hostrcb *hostrcb)
926{
927 struct ipr_hostrcb_type_02_error *error =
928 &hostrcb->hcam.u.error.u.type_02_error;
929
930 ipr_err("-----Current Configuration-----\n");
931 ipr_err("Cache Directory Card Information:\n");
cfc32139 932 ipr_log_vpd(&error->ioa_vpd);
1da177e4 933 ipr_err("Adapter Card Information:\n");
cfc32139 934 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
935
936 ipr_err("-----Expected Configuration-----\n");
937 ipr_err("Cache Directory Card Information:\n");
cfc32139 938 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 939 ipr_err("Adapter Card Information:\n");
cfc32139 940 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
941
942 ipr_err("Additional IOA Data: %08X %08X %08X\n",
943 be32_to_cpu(error->ioa_data[0]),
944 be32_to_cpu(error->ioa_data[1]),
945 be32_to_cpu(error->ioa_data[2]));
946}
947
948/**
949 * ipr_log_config_error - Log a configuration error.
950 * @ioa_cfg: ioa config struct
951 * @hostrcb: hostrcb struct
952 *
953 * Return value:
954 * none
955 **/
956static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
957 struct ipr_hostrcb *hostrcb)
958{
959 int errors_logged, i;
960 struct ipr_hostrcb_device_data_entry *dev_entry;
961 struct ipr_hostrcb_type_03_error *error;
962
963 error = &hostrcb->hcam.u.error.u.type_03_error;
964 errors_logged = be32_to_cpu(error->errors_logged);
965
966 ipr_err("Device Errors Detected/Logged: %d/%d\n",
967 be32_to_cpu(error->errors_detected), errors_logged);
968
cfc32139 969 dev_entry = error->dev;
1da177e4
LT
970
971 for (i = 0; i < errors_logged; i++, dev_entry++) {
972 ipr_err_separator;
973
fa15b1f6 974 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 975 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
976
977 ipr_err("-----New Device Information-----\n");
cfc32139 978 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
979
980 ipr_err("Cache Directory Card Information:\n");
cfc32139 981 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
982
983 ipr_err("Adapter Card Information:\n");
cfc32139 984 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
985
986 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
987 be32_to_cpu(dev_entry->ioa_data[0]),
988 be32_to_cpu(dev_entry->ioa_data[1]),
989 be32_to_cpu(dev_entry->ioa_data[2]),
990 be32_to_cpu(dev_entry->ioa_data[3]),
991 be32_to_cpu(dev_entry->ioa_data[4]));
992 }
993}
994
995/**
996 * ipr_log_array_error - Log an array configuration error.
997 * @ioa_cfg: ioa config struct
998 * @hostrcb: hostrcb struct
999 *
1000 * Return value:
1001 * none
1002 **/
1003static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1004 struct ipr_hostrcb *hostrcb)
1005{
1006 int i;
1007 struct ipr_hostrcb_type_04_error *error;
1008 struct ipr_hostrcb_array_data_entry *array_entry;
1009 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1010
1011 error = &hostrcb->hcam.u.error.u.type_04_error;
1012
1013 ipr_err_separator;
1014
1015 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1016 error->protection_level,
1017 ioa_cfg->host->host_no,
1018 error->last_func_vset_res_addr.bus,
1019 error->last_func_vset_res_addr.target,
1020 error->last_func_vset_res_addr.lun);
1021
1022 ipr_err_separator;
1023
1024 array_entry = error->array_member;
1025
1026 for (i = 0; i < 18; i++) {
cfc32139 1027 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1028 continue;
1029
fa15b1f6 1030 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1031 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1032 else
1da177e4 1033 ipr_err("Array Member %d:\n", i);
1da177e4 1034
cfc32139 1035 ipr_log_vpd(&array_entry->vpd);
1da177e4 1036
fa15b1f6
BK
1037 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1038 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1039 "Expected Location");
1da177e4
LT
1040
1041 ipr_err_separator;
1042
1043 if (i == 9)
1044 array_entry = error->array_member2;
1045 else
1046 array_entry++;
1047 }
1048}
1049
1050/**
1051 * ipr_log_generic_error - Log an adapter error.
1052 * @ioa_cfg: ioa config struct
1053 * @hostrcb: hostrcb struct
1054 *
1055 * Return value:
1056 * none
1057 **/
1058static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1059 struct ipr_hostrcb *hostrcb)
1060{
1061 int i;
1062 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1063
1064 if (ioa_data_len == 0)
1065 return;
1066
1da177e4
LT
1067 for (i = 0; i < ioa_data_len / 4; i += 4) {
1068 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1069 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1070 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1071 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1073 }
1074}
1075
1076/**
1077 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1078 * @ioasc: IOASC
1079 *
1080 * This function will return the index of into the ipr_error_table
1081 * for the specified IOASC. If the IOASC is not in the table,
1082 * 0 will be returned, which points to the entry used for unknown errors.
1083 *
1084 * Return value:
1085 * index into the ipr_error_table
1086 **/
1087static u32 ipr_get_error(u32 ioasc)
1088{
1089 int i;
1090
1091 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1092 if (ipr_error_table[i].ioasc == ioasc)
1093 return i;
1094
1095 return 0;
1096}
1097
1098/**
1099 * ipr_handle_log_data - Log an adapter error.
1100 * @ioa_cfg: ioa config struct
1101 * @hostrcb: hostrcb struct
1102 *
1103 * This function logs an adapter error to the system.
1104 *
1105 * Return value:
1106 * none
1107 **/
1108static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1109 struct ipr_hostrcb *hostrcb)
1110{
1111 u32 ioasc;
1112 int error_index;
1113
1114 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1115 return;
1116
1117 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1118 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1119
1120 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1121
1122 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1123 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1124 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1125 scsi_report_bus_reset(ioa_cfg->host,
1126 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1127 }
1128
1129 error_index = ipr_get_error(ioasc);
1130
1131 if (!ipr_error_table[error_index].log_hcam)
1132 return;
1133
1134 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1135 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1136 "%s\n", ipr_error_table[error_index].error);
1137 } else {
1138 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1139 ipr_error_table[error_index].error);
1140 }
1141
1142 /* Set indication we have logged an error */
1143 ioa_cfg->errors_logged++;
1144
1145 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1146 return;
cf852037
BK
1147 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1148 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1149
1150 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1151 case IPR_HOST_RCB_OVERLAY_ID_2:
1152 ipr_log_cache_error(ioa_cfg, hostrcb);
1153 break;
1154 case IPR_HOST_RCB_OVERLAY_ID_3:
1155 ipr_log_config_error(ioa_cfg, hostrcb);
1156 break;
1157 case IPR_HOST_RCB_OVERLAY_ID_4:
1158 case IPR_HOST_RCB_OVERLAY_ID_6:
1159 ipr_log_array_error(ioa_cfg, hostrcb);
1160 break;
cf852037 1161 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1162 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1163 default:
a9cfca96 1164 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1165 break;
1166 }
1167}
1168
1169/**
1170 * ipr_process_error - Op done function for an adapter error log.
1171 * @ipr_cmd: ipr command struct
1172 *
1173 * This function is the op done function for an error log host
1174 * controlled async from the adapter. It will log the error and
1175 * send the HCAM back to the adapter.
1176 *
1177 * Return value:
1178 * none
1179 **/
1180static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1181{
1182 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1183 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1184 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1185
1186 list_del(&hostrcb->queue);
1187 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1188
1189 if (!ioasc) {
1190 ipr_handle_log_data(ioa_cfg, hostrcb);
1191 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1192 dev_err(&ioa_cfg->pdev->dev,
1193 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1194 }
1195
1196 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1197}
1198
1199/**
1200 * ipr_timeout - An internally generated op has timed out.
1201 * @ipr_cmd: ipr command struct
1202 *
1203 * This function blocks host requests and initiates an
1204 * adapter reset.
1205 *
1206 * Return value:
1207 * none
1208 **/
1209static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1210{
1211 unsigned long lock_flags = 0;
1212 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1213
1214 ENTER;
1215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1216
1217 ioa_cfg->errors_logged++;
1218 dev_err(&ioa_cfg->pdev->dev,
1219 "Adapter being reset due to command timeout.\n");
1220
1221 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1222 ioa_cfg->sdt_state = GET_DUMP;
1223
1224 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1225 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1226
1227 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1228 LEAVE;
1229}
1230
1231/**
1232 * ipr_oper_timeout - Adapter timed out transitioning to operational
1233 * @ipr_cmd: ipr command struct
1234 *
1235 * This function blocks host requests and initiates an
1236 * adapter reset.
1237 *
1238 * Return value:
1239 * none
1240 **/
1241static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1242{
1243 unsigned long lock_flags = 0;
1244 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1245
1246 ENTER;
1247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1248
1249 ioa_cfg->errors_logged++;
1250 dev_err(&ioa_cfg->pdev->dev,
1251 "Adapter timed out transitioning to operational.\n");
1252
1253 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1254 ioa_cfg->sdt_state = GET_DUMP;
1255
1256 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1257 if (ipr_fastfail)
1258 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1259 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1260 }
1261
1262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1263 LEAVE;
1264}
1265
1266/**
1267 * ipr_reset_reload - Reset/Reload the IOA
1268 * @ioa_cfg: ioa config struct
1269 * @shutdown_type: shutdown type
1270 *
1271 * This function resets the adapter and re-initializes it.
1272 * This function assumes that all new host commands have been stopped.
1273 * Return value:
1274 * SUCCESS / FAILED
1275 **/
1276static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1277 enum ipr_shutdown_type shutdown_type)
1278{
1279 if (!ioa_cfg->in_reset_reload)
1280 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1281
1282 spin_unlock_irq(ioa_cfg->host->host_lock);
1283 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1284 spin_lock_irq(ioa_cfg->host->host_lock);
1285
1286 /* If we got hit with a host reset while we were already resetting
1287 the adapter for some reason, and the reset failed. */
1288 if (ioa_cfg->ioa_is_dead) {
1289 ipr_trace;
1290 return FAILED;
1291 }
1292
1293 return SUCCESS;
1294}
1295
1296/**
1297 * ipr_find_ses_entry - Find matching SES in SES table
1298 * @res: resource entry struct of SES
1299 *
1300 * Return value:
1301 * pointer to SES table entry / NULL on failure
1302 **/
1303static const struct ipr_ses_table_entry *
1304ipr_find_ses_entry(struct ipr_resource_entry *res)
1305{
1306 int i, j, matches;
1307 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1308
1309 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1310 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1311 if (ste->compare_product_id_byte[j] == 'X') {
1312 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1313 matches++;
1314 else
1315 break;
1316 } else
1317 matches++;
1318 }
1319
1320 if (matches == IPR_PROD_ID_LEN)
1321 return ste;
1322 }
1323
1324 return NULL;
1325}
1326
1327/**
1328 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1329 * @ioa_cfg: ioa config struct
1330 * @bus: SCSI bus
1331 * @bus_width: bus width
1332 *
1333 * Return value:
1334 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1335 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1336 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1337 * max 160MHz = max 320MB/sec).
1338 **/
1339static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1340{
1341 struct ipr_resource_entry *res;
1342 const struct ipr_ses_table_entry *ste;
1343 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1344
1345 /* Loop through each config table entry in the config table buffer */
1346 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1347 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1348 continue;
1349
1350 if (bus != res->cfgte.res_addr.bus)
1351 continue;
1352
1353 if (!(ste = ipr_find_ses_entry(res)))
1354 continue;
1355
1356 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1357 }
1358
1359 return max_xfer_rate;
1360}
1361
1362/**
1363 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1364 * @ioa_cfg: ioa config struct
1365 * @max_delay: max delay in micro-seconds to wait
1366 *
1367 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1368 *
1369 * Return value:
1370 * 0 on success / other on failure
1371 **/
1372static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1373{
1374 volatile u32 pcii_reg;
1375 int delay = 1;
1376
1377 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1378 while (delay < max_delay) {
1379 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1380
1381 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1382 return 0;
1383
1384 /* udelay cannot be used if delay is more than a few milliseconds */
1385 if ((delay / 1000) > MAX_UDELAY_MS)
1386 mdelay(delay / 1000);
1387 else
1388 udelay(delay);
1389
1390 delay += delay;
1391 }
1392 return -EIO;
1393}
1394
1395/**
1396 * ipr_get_ldump_data_section - Dump IOA memory
1397 * @ioa_cfg: ioa config struct
1398 * @start_addr: adapter address to dump
1399 * @dest: destination kernel buffer
1400 * @length_in_words: length to dump in 4 byte words
1401 *
1402 * Return value:
1403 * 0 on success / -EIO on failure
1404 **/
1405static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1406 u32 start_addr,
1407 __be32 *dest, u32 length_in_words)
1408{
1409 volatile u32 temp_pcii_reg;
1410 int i, delay = 0;
1411
1412 /* Write IOA interrupt reg starting LDUMP state */
1413 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1414 ioa_cfg->regs.set_uproc_interrupt_reg);
1415
1416 /* Wait for IO debug acknowledge */
1417 if (ipr_wait_iodbg_ack(ioa_cfg,
1418 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1419 dev_err(&ioa_cfg->pdev->dev,
1420 "IOA dump long data transfer timeout\n");
1421 return -EIO;
1422 }
1423
1424 /* Signal LDUMP interlocked - clear IO debug ack */
1425 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1426 ioa_cfg->regs.clr_interrupt_reg);
1427
1428 /* Write Mailbox with starting address */
1429 writel(start_addr, ioa_cfg->ioa_mailbox);
1430
1431 /* Signal address valid - clear IOA Reset alert */
1432 writel(IPR_UPROCI_RESET_ALERT,
1433 ioa_cfg->regs.clr_uproc_interrupt_reg);
1434
1435 for (i = 0; i < length_in_words; i++) {
1436 /* Wait for IO debug acknowledge */
1437 if (ipr_wait_iodbg_ack(ioa_cfg,
1438 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1439 dev_err(&ioa_cfg->pdev->dev,
1440 "IOA dump short data transfer timeout\n");
1441 return -EIO;
1442 }
1443
1444 /* Read data from mailbox and increment destination pointer */
1445 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1446 dest++;
1447
1448 /* For all but the last word of data, signal data received */
1449 if (i < (length_in_words - 1)) {
1450 /* Signal dump data received - Clear IO debug Ack */
1451 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1452 ioa_cfg->regs.clr_interrupt_reg);
1453 }
1454 }
1455
1456 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1457 writel(IPR_UPROCI_RESET_ALERT,
1458 ioa_cfg->regs.set_uproc_interrupt_reg);
1459
1460 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1461 ioa_cfg->regs.clr_uproc_interrupt_reg);
1462
1463 /* Signal dump data received - Clear IO debug Ack */
1464 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1465 ioa_cfg->regs.clr_interrupt_reg);
1466
1467 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1468 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1469 temp_pcii_reg =
1470 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1471
1472 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1473 return 0;
1474
1475 udelay(10);
1476 delay += 10;
1477 }
1478
1479 return 0;
1480}
1481
1482#ifdef CONFIG_SCSI_IPR_DUMP
1483/**
1484 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1485 * @ioa_cfg: ioa config struct
1486 * @pci_address: adapter address
1487 * @length: length of data to copy
1488 *
1489 * Copy data from PCI adapter to kernel buffer.
1490 * Note: length MUST be a 4 byte multiple
1491 * Return value:
1492 * 0 on success / other on failure
1493 **/
1494static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1495 unsigned long pci_address, u32 length)
1496{
1497 int bytes_copied = 0;
1498 int cur_len, rc, rem_len, rem_page_len;
1499 __be32 *page;
1500 unsigned long lock_flags = 0;
1501 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1502
1503 while (bytes_copied < length &&
1504 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1505 if (ioa_dump->page_offset >= PAGE_SIZE ||
1506 ioa_dump->page_offset == 0) {
1507 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1508
1509 if (!page) {
1510 ipr_trace;
1511 return bytes_copied;
1512 }
1513
1514 ioa_dump->page_offset = 0;
1515 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1516 ioa_dump->next_page_index++;
1517 } else
1518 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1519
1520 rem_len = length - bytes_copied;
1521 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1522 cur_len = min(rem_len, rem_page_len);
1523
1524 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1525 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1526 rc = -EIO;
1527 } else {
1528 rc = ipr_get_ldump_data_section(ioa_cfg,
1529 pci_address + bytes_copied,
1530 &page[ioa_dump->page_offset / 4],
1531 (cur_len / sizeof(u32)));
1532 }
1533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1534
1535 if (!rc) {
1536 ioa_dump->page_offset += cur_len;
1537 bytes_copied += cur_len;
1538 } else {
1539 ipr_trace;
1540 break;
1541 }
1542 schedule();
1543 }
1544
1545 return bytes_copied;
1546}
1547
1548/**
1549 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1550 * @hdr: dump entry header struct
1551 *
1552 * Return value:
1553 * nothing
1554 **/
1555static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1556{
1557 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1558 hdr->num_elems = 1;
1559 hdr->offset = sizeof(*hdr);
1560 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1561}
1562
1563/**
1564 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1565 * @ioa_cfg: ioa config struct
1566 * @driver_dump: driver dump struct
1567 *
1568 * Return value:
1569 * nothing
1570 **/
1571static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1572 struct ipr_driver_dump *driver_dump)
1573{
1574 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1575
1576 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1577 driver_dump->ioa_type_entry.hdr.len =
1578 sizeof(struct ipr_dump_ioa_type_entry) -
1579 sizeof(struct ipr_dump_entry_header);
1580 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1581 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1582 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1583 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1584 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1585 ucode_vpd->minor_release[1];
1586 driver_dump->hdr.num_entries++;
1587}
1588
1589/**
1590 * ipr_dump_version_data - Fill in the driver version in the dump.
1591 * @ioa_cfg: ioa config struct
1592 * @driver_dump: driver dump struct
1593 *
1594 * Return value:
1595 * nothing
1596 **/
1597static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1598 struct ipr_driver_dump *driver_dump)
1599{
1600 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1601 driver_dump->version_entry.hdr.len =
1602 sizeof(struct ipr_dump_version_entry) -
1603 sizeof(struct ipr_dump_entry_header);
1604 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1605 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1606 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1607 driver_dump->hdr.num_entries++;
1608}
1609
1610/**
1611 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1612 * @ioa_cfg: ioa config struct
1613 * @driver_dump: driver dump struct
1614 *
1615 * Return value:
1616 * nothing
1617 **/
1618static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_driver_dump *driver_dump)
1620{
1621 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1622 driver_dump->trace_entry.hdr.len =
1623 sizeof(struct ipr_dump_trace_entry) -
1624 sizeof(struct ipr_dump_entry_header);
1625 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1626 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1627 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1628 driver_dump->hdr.num_entries++;
1629}
1630
1631/**
1632 * ipr_dump_location_data - Fill in the IOA location in the dump.
1633 * @ioa_cfg: ioa config struct
1634 * @driver_dump: driver dump struct
1635 *
1636 * Return value:
1637 * nothing
1638 **/
1639static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1640 struct ipr_driver_dump *driver_dump)
1641{
1642 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1643 driver_dump->location_entry.hdr.len =
1644 sizeof(struct ipr_dump_location_entry) -
1645 sizeof(struct ipr_dump_entry_header);
1646 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1647 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1648 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1649 driver_dump->hdr.num_entries++;
1650}
1651
1652/**
1653 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1654 * @ioa_cfg: ioa config struct
1655 * @dump: dump struct
1656 *
1657 * Return value:
1658 * nothing
1659 **/
1660static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1661{
1662 unsigned long start_addr, sdt_word;
1663 unsigned long lock_flags = 0;
1664 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1665 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1666 u32 num_entries, start_off, end_off;
1667 u32 bytes_to_copy, bytes_copied, rc;
1668 struct ipr_sdt *sdt;
1669 int i;
1670
1671 ENTER;
1672
1673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1674
1675 if (ioa_cfg->sdt_state != GET_DUMP) {
1676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1677 return;
1678 }
1679
1680 start_addr = readl(ioa_cfg->ioa_mailbox);
1681
1682 if (!ipr_sdt_is_fmt2(start_addr)) {
1683 dev_err(&ioa_cfg->pdev->dev,
1684 "Invalid dump table format: %lx\n", start_addr);
1685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1686 return;
1687 }
1688
1689 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1690
1691 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1692
1693 /* Initialize the overall dump header */
1694 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1695 driver_dump->hdr.num_entries = 1;
1696 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1697 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1698 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1699 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1700
1701 ipr_dump_version_data(ioa_cfg, driver_dump);
1702 ipr_dump_location_data(ioa_cfg, driver_dump);
1703 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1704 ipr_dump_trace_data(ioa_cfg, driver_dump);
1705
1706 /* Update dump_header */
1707 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1708
1709 /* IOA Dump entry */
1710 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1711 ioa_dump->format = IPR_SDT_FMT2;
1712 ioa_dump->hdr.len = 0;
1713 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1714 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1715
1716 /* First entries in sdt are actually a list of dump addresses and
1717 lengths to gather the real dump data. sdt represents the pointer
1718 to the ioa generated dump table. Dump data will be extracted based
1719 on entries in this table */
1720 sdt = &ioa_dump->sdt;
1721
1722 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1723 sizeof(struct ipr_sdt) / sizeof(__be32));
1724
1725 /* Smart Dump table is ready to use and the first entry is valid */
1726 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1727 dev_err(&ioa_cfg->pdev->dev,
1728 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1729 rc, be32_to_cpu(sdt->hdr.state));
1730 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1731 ioa_cfg->sdt_state = DUMP_OBTAINED;
1732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1733 return;
1734 }
1735
1736 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1737
1738 if (num_entries > IPR_NUM_SDT_ENTRIES)
1739 num_entries = IPR_NUM_SDT_ENTRIES;
1740
1741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1742
1743 for (i = 0; i < num_entries; i++) {
1744 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1745 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1746 break;
1747 }
1748
1749 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1750 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1751 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1752 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1753
1754 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1755 bytes_to_copy = end_off - start_off;
1756 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1757 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1758 continue;
1759 }
1760
1761 /* Copy data from adapter to driver buffers */
1762 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1763 bytes_to_copy);
1764
1765 ioa_dump->hdr.len += bytes_copied;
1766
1767 if (bytes_copied != bytes_to_copy) {
1768 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1769 break;
1770 }
1771 }
1772 }
1773 }
1774
1775 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1776
1777 /* Update dump_header */
1778 driver_dump->hdr.len += ioa_dump->hdr.len;
1779 wmb();
1780 ioa_cfg->sdt_state = DUMP_OBTAINED;
1781 LEAVE;
1782}
1783
1784#else
1785#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1786#endif
1787
1788/**
1789 * ipr_release_dump - Free adapter dump memory
1790 * @kref: kref struct
1791 *
1792 * Return value:
1793 * nothing
1794 **/
1795static void ipr_release_dump(struct kref *kref)
1796{
1797 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1798 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1799 unsigned long lock_flags = 0;
1800 int i;
1801
1802 ENTER;
1803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1804 ioa_cfg->dump = NULL;
1805 ioa_cfg->sdt_state = INACTIVE;
1806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1807
1808 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1809 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1810
1811 kfree(dump);
1812 LEAVE;
1813}
1814
1815/**
1816 * ipr_worker_thread - Worker thread
1817 * @data: ioa config struct
1818 *
1819 * Called at task level from a work thread. This function takes care
1820 * of adding and removing device from the mid-layer as configuration
1821 * changes are detected by the adapter.
1822 *
1823 * Return value:
1824 * nothing
1825 **/
1826static void ipr_worker_thread(void *data)
1827{
1828 unsigned long lock_flags;
1829 struct ipr_resource_entry *res;
1830 struct scsi_device *sdev;
1831 struct ipr_dump *dump;
1832 struct ipr_ioa_cfg *ioa_cfg = data;
1833 u8 bus, target, lun;
1834 int did_work;
1835
1836 ENTER;
1837 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1838
1839 if (ioa_cfg->sdt_state == GET_DUMP) {
1840 dump = ioa_cfg->dump;
1841 if (!dump) {
1842 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1843 return;
1844 }
1845 kref_get(&dump->kref);
1846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1847 ipr_get_ioa_dump(ioa_cfg, dump);
1848 kref_put(&dump->kref, ipr_release_dump);
1849
1850 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1851 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1852 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1854 return;
1855 }
1856
1857restart:
1858 do {
1859 did_work = 0;
1860 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1861 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1862 return;
1863 }
1864
1865 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1866 if (res->del_from_ml && res->sdev) {
1867 did_work = 1;
1868 sdev = res->sdev;
1869 if (!scsi_device_get(sdev)) {
1870 res->sdev = NULL;
1871 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1873 scsi_remove_device(sdev);
1874 scsi_device_put(sdev);
1875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1876 }
1877 break;
1878 }
1879 }
1880 } while(did_work);
1881
1882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1883 if (res->add_to_ml) {
1884 bus = res->cfgte.res_addr.bus;
1885 target = res->cfgte.res_addr.target;
1886 lun = res->cfgte.res_addr.lun;
1887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1888 scsi_add_device(ioa_cfg->host, bus, target, lun);
1889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1890 goto restart;
1891 }
1892 }
1893
1894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1895 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1896 LEAVE;
1897}
1898
1899#ifdef CONFIG_SCSI_IPR_TRACE
1900/**
1901 * ipr_read_trace - Dump the adapter trace
1902 * @kobj: kobject struct
1903 * @buf: buffer
1904 * @off: offset
1905 * @count: buffer size
1906 *
1907 * Return value:
1908 * number of bytes printed to buffer
1909 **/
1910static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1911 loff_t off, size_t count)
1912{
1913 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1914 struct Scsi_Host *shost = class_to_shost(cdev);
1915 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1916 unsigned long lock_flags = 0;
1917 int size = IPR_TRACE_SIZE;
1918 char *src = (char *)ioa_cfg->trace;
1919
1920 if (off > size)
1921 return 0;
1922 if (off + count > size) {
1923 size -= off;
1924 count = size;
1925 }
1926
1927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1928 memcpy(buf, &src[off], count);
1929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1930 return count;
1931}
1932
1933static struct bin_attribute ipr_trace_attr = {
1934 .attr = {
1935 .name = "trace",
1936 .mode = S_IRUGO,
1937 },
1938 .size = 0,
1939 .read = ipr_read_trace,
1940};
1941#endif
1942
62275040
BK
1943static const struct {
1944 enum ipr_cache_state state;
1945 char *name;
1946} cache_state [] = {
1947 { CACHE_NONE, "none" },
1948 { CACHE_DISABLED, "disabled" },
1949 { CACHE_ENABLED, "enabled" }
1950};
1951
1952/**
1953 * ipr_show_write_caching - Show the write caching attribute
1954 * @class_dev: class device struct
1955 * @buf: buffer
1956 *
1957 * Return value:
1958 * number of bytes printed to buffer
1959 **/
1960static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1961{
1962 struct Scsi_Host *shost = class_to_shost(class_dev);
1963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1964 unsigned long lock_flags = 0;
1965 int i, len = 0;
1966
1967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1968 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1969 if (cache_state[i].state == ioa_cfg->cache_state) {
1970 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1975 return len;
1976}
1977
1978
1979/**
1980 * ipr_store_write_caching - Enable/disable adapter write cache
1981 * @class_dev: class_device struct
1982 * @buf: buffer
1983 * @count: buffer size
1984 *
1985 * This function will enable/disable adapter write cache.
1986 *
1987 * Return value:
1988 * count on success / other on failure
1989 **/
1990static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1991 const char *buf, size_t count)
1992{
1993 struct Scsi_Host *shost = class_to_shost(class_dev);
1994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1995 unsigned long lock_flags = 0;
1996 enum ipr_cache_state new_state = CACHE_INVALID;
1997 int i;
1998
1999 if (!capable(CAP_SYS_ADMIN))
2000 return -EACCES;
2001 if (ioa_cfg->cache_state == CACHE_NONE)
2002 return -EINVAL;
2003
2004 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2005 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2006 new_state = cache_state[i].state;
2007 break;
2008 }
2009 }
2010
2011 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2012 return -EINVAL;
2013
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->cache_state == new_state) {
2016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2017 return count;
2018 }
2019
2020 ioa_cfg->cache_state = new_state;
2021 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2022 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2023 if (!ioa_cfg->in_reset_reload)
2024 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2027
2028 return count;
2029}
2030
2031static struct class_device_attribute ipr_ioa_cache_attr = {
2032 .attr = {
2033 .name = "write_cache",
2034 .mode = S_IRUGO | S_IWUSR,
2035 },
2036 .show = ipr_show_write_caching,
2037 .store = ipr_store_write_caching
2038};
2039
1da177e4
LT
2040/**
2041 * ipr_show_fw_version - Show the firmware version
2042 * @class_dev: class device struct
2043 * @buf: buffer
2044 *
2045 * Return value:
2046 * number of bytes printed to buffer
2047 **/
2048static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2049{
2050 struct Scsi_Host *shost = class_to_shost(class_dev);
2051 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2052 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2053 unsigned long lock_flags = 0;
2054 int len;
2055
2056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2057 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2058 ucode_vpd->major_release, ucode_vpd->card_type,
2059 ucode_vpd->minor_release[0],
2060 ucode_vpd->minor_release[1]);
2061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2062 return len;
2063}
2064
2065static struct class_device_attribute ipr_fw_version_attr = {
2066 .attr = {
2067 .name = "fw_version",
2068 .mode = S_IRUGO,
2069 },
2070 .show = ipr_show_fw_version,
2071};
2072
2073/**
2074 * ipr_show_log_level - Show the adapter's error logging level
2075 * @class_dev: class device struct
2076 * @buf: buffer
2077 *
2078 * Return value:
2079 * number of bytes printed to buffer
2080 **/
2081static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2082{
2083 struct Scsi_Host *shost = class_to_shost(class_dev);
2084 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2085 unsigned long lock_flags = 0;
2086 int len;
2087
2088 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2089 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2090 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2091 return len;
2092}
2093
2094/**
2095 * ipr_store_log_level - Change the adapter's error logging level
2096 * @class_dev: class device struct
2097 * @buf: buffer
2098 *
2099 * Return value:
2100 * number of bytes printed to buffer
2101 **/
2102static ssize_t ipr_store_log_level(struct class_device *class_dev,
2103 const char *buf, size_t count)
2104{
2105 struct Scsi_Host *shost = class_to_shost(class_dev);
2106 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2107 unsigned long lock_flags = 0;
2108
2109 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2110 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112 return strlen(buf);
2113}
2114
2115static struct class_device_attribute ipr_log_level_attr = {
2116 .attr = {
2117 .name = "log_level",
2118 .mode = S_IRUGO | S_IWUSR,
2119 },
2120 .show = ipr_show_log_level,
2121 .store = ipr_store_log_level
2122};
2123
2124/**
2125 * ipr_store_diagnostics - IOA Diagnostics interface
2126 * @class_dev: class_device struct
2127 * @buf: buffer
2128 * @count: buffer size
2129 *
2130 * This function will reset the adapter and wait a reasonable
2131 * amount of time for any errors that the adapter might log.
2132 *
2133 * Return value:
2134 * count on success / other on failure
2135 **/
2136static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2137 const char *buf, size_t count)
2138{
2139 struct Scsi_Host *shost = class_to_shost(class_dev);
2140 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2141 unsigned long lock_flags = 0;
2142 int rc = count;
2143
2144 if (!capable(CAP_SYS_ADMIN))
2145 return -EACCES;
2146
2147 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2148 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2149 ioa_cfg->errors_logged = 0;
2150 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2151
2152 if (ioa_cfg->in_reset_reload) {
2153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2154 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2155
2156 /* Wait for a second for any errors to be logged */
2157 msleep(1000);
2158 } else {
2159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2160 return -EIO;
2161 }
2162
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2164 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2165 rc = -EIO;
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2167
2168 return rc;
2169}
2170
2171static struct class_device_attribute ipr_diagnostics_attr = {
2172 .attr = {
2173 .name = "run_diagnostics",
2174 .mode = S_IWUSR,
2175 },
2176 .store = ipr_store_diagnostics
2177};
2178
2179/**
2180 * ipr_store_reset_adapter - Reset the adapter
2181 * @class_dev: class_device struct
2182 * @buf: buffer
2183 * @count: buffer size
2184 *
2185 * This function will reset the adapter.
2186 *
2187 * Return value:
2188 * count on success / other on failure
2189 **/
2190static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2191 const char *buf, size_t count)
2192{
2193 struct Scsi_Host *shost = class_to_shost(class_dev);
2194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2195 unsigned long lock_flags;
2196 int result = count;
2197
2198 if (!capable(CAP_SYS_ADMIN))
2199 return -EACCES;
2200
2201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2202 if (!ioa_cfg->in_reset_reload)
2203 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2205 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2206
2207 return result;
2208}
2209
2210static struct class_device_attribute ipr_ioa_reset_attr = {
2211 .attr = {
2212 .name = "reset_host",
2213 .mode = S_IWUSR,
2214 },
2215 .store = ipr_store_reset_adapter
2216};
2217
2218/**
2219 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2220 * @buf_len: buffer length
2221 *
2222 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2223 * list to use for microcode download
2224 *
2225 * Return value:
2226 * pointer to sglist / NULL on failure
2227 **/
2228static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2229{
2230 int sg_size, order, bsize_elem, num_elem, i, j;
2231 struct ipr_sglist *sglist;
2232 struct scatterlist *scatterlist;
2233 struct page *page;
2234
2235 /* Get the minimum size per scatter/gather element */
2236 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2237
2238 /* Get the actual size per element */
2239 order = get_order(sg_size);
2240
2241 /* Determine the actual number of bytes per element */
2242 bsize_elem = PAGE_SIZE * (1 << order);
2243
2244 /* Determine the actual number of sg entries needed */
2245 if (buf_len % bsize_elem)
2246 num_elem = (buf_len / bsize_elem) + 1;
2247 else
2248 num_elem = buf_len / bsize_elem;
2249
2250 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2251 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2252 (sizeof(struct scatterlist) * (num_elem - 1)),
2253 GFP_KERNEL);
2254
2255 if (sglist == NULL) {
2256 ipr_trace;
2257 return NULL;
2258 }
2259
1da177e4
LT
2260 scatterlist = sglist->scatterlist;
2261
2262 sglist->order = order;
2263 sglist->num_sg = num_elem;
2264
2265 /* Allocate a bunch of sg elements */
2266 for (i = 0; i < num_elem; i++) {
2267 page = alloc_pages(GFP_KERNEL, order);
2268 if (!page) {
2269 ipr_trace;
2270
2271 /* Free up what we already allocated */
2272 for (j = i - 1; j >= 0; j--)
2273 __free_pages(scatterlist[j].page, order);
2274 kfree(sglist);
2275 return NULL;
2276 }
2277
2278 scatterlist[i].page = page;
2279 }
2280
2281 return sglist;
2282}
2283
2284/**
2285 * ipr_free_ucode_buffer - Frees a microcode download buffer
2286 * @p_dnld: scatter/gather list pointer
2287 *
2288 * Free a DMA'able ucode download buffer previously allocated with
2289 * ipr_alloc_ucode_buffer
2290 *
2291 * Return value:
2292 * nothing
2293 **/
2294static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2295{
2296 int i;
2297
2298 for (i = 0; i < sglist->num_sg; i++)
2299 __free_pages(sglist->scatterlist[i].page, sglist->order);
2300
2301 kfree(sglist);
2302}
2303
2304/**
2305 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2306 * @sglist: scatter/gather list pointer
2307 * @buffer: buffer pointer
2308 * @len: buffer length
2309 *
2310 * Copy a microcode image from a user buffer into a buffer allocated by
2311 * ipr_alloc_ucode_buffer
2312 *
2313 * Return value:
2314 * 0 on success / other on failure
2315 **/
2316static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2317 u8 *buffer, u32 len)
2318{
2319 int bsize_elem, i, result = 0;
2320 struct scatterlist *scatterlist;
2321 void *kaddr;
2322
2323 /* Determine the actual number of bytes per element */
2324 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2325
2326 scatterlist = sglist->scatterlist;
2327
2328 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2329 kaddr = kmap(scatterlist[i].page);
2330 memcpy(kaddr, buffer, bsize_elem);
2331 kunmap(scatterlist[i].page);
2332
2333 scatterlist[i].length = bsize_elem;
2334
2335 if (result != 0) {
2336 ipr_trace;
2337 return result;
2338 }
2339 }
2340
2341 if (len % bsize_elem) {
2342 kaddr = kmap(scatterlist[i].page);
2343 memcpy(kaddr, buffer, len % bsize_elem);
2344 kunmap(scatterlist[i].page);
2345
2346 scatterlist[i].length = len % bsize_elem;
2347 }
2348
2349 sglist->buffer_len = len;
2350 return result;
2351}
2352
2353/**
2354 * ipr_map_ucode_buffer - Map a microcode download buffer
2355 * @ipr_cmd: ipr command struct
2356 * @sglist: scatter/gather list
2357 * @len: total length of download buffer
2358 *
2359 * Maps a microcode download scatter/gather list for DMA and
2360 * builds the IOADL.
2361 *
2362 * Return value:
2363 * 0 on success / -EIO on failure
2364 **/
2365static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2366 struct ipr_sglist *sglist, int len)
2367{
2368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2369 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2370 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2371 struct scatterlist *scatterlist = sglist->scatterlist;
2372 int i;
2373
2374 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2375 sglist->num_sg, DMA_TO_DEVICE);
2376
2377 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2378 ioarcb->write_data_transfer_length = cpu_to_be32(len);
2379 ioarcb->write_ioadl_len =
2380 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2381
2382 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2383 ioadl[i].flags_and_data_len =
2384 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2385 ioadl[i].address =
2386 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2387 }
2388
2389 if (likely(ipr_cmd->dma_use_sg)) {
2390 ioadl[i-1].flags_and_data_len |=
2391 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2392 }
2393 else {
2394 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2395 return -EIO;
2396 }
2397
2398 return 0;
2399}
2400
2401/**
2402 * ipr_store_update_fw - Update the firmware on the adapter
2403 * @class_dev: class_device struct
2404 * @buf: buffer
2405 * @count: buffer size
2406 *
2407 * This function will update the firmware on the adapter.
2408 *
2409 * Return value:
2410 * count on success / other on failure
2411 **/
2412static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2413 const char *buf, size_t count)
2414{
2415 struct Scsi_Host *shost = class_to_shost(class_dev);
2416 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2417 struct ipr_ucode_image_header *image_hdr;
2418 const struct firmware *fw_entry;
2419 struct ipr_sglist *sglist;
2420 unsigned long lock_flags;
2421 char fname[100];
2422 char *src;
2423 int len, result, dnld_size;
2424
2425 if (!capable(CAP_SYS_ADMIN))
2426 return -EACCES;
2427
2428 len = snprintf(fname, 99, "%s", buf);
2429 fname[len-1] = '\0';
2430
2431 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2432 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2433 return -EIO;
2434 }
2435
2436 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2437
2438 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2439 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2440 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2441 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2442 release_firmware(fw_entry);
2443 return -EINVAL;
2444 }
2445
2446 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2447 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2448 sglist = ipr_alloc_ucode_buffer(dnld_size);
2449
2450 if (!sglist) {
2451 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2452 release_firmware(fw_entry);
2453 return -ENOMEM;
2454 }
2455
2456 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2457
2458 if (result) {
2459 dev_err(&ioa_cfg->pdev->dev,
2460 "Microcode buffer copy to DMA buffer failed\n");
2461 ipr_free_ucode_buffer(sglist);
2462 release_firmware(fw_entry);
2463 return result;
2464 }
2465
2466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2467
2468 if (ioa_cfg->ucode_sglist) {
2469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2470 dev_err(&ioa_cfg->pdev->dev,
2471 "Microcode download already in progress\n");
2472 ipr_free_ucode_buffer(sglist);
2473 release_firmware(fw_entry);
2474 return -EIO;
2475 }
2476
2477 ioa_cfg->ucode_sglist = sglist;
2478 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2481
2482 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2483 ioa_cfg->ucode_sglist = NULL;
2484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2485
2486 ipr_free_ucode_buffer(sglist);
2487 release_firmware(fw_entry);
2488
2489 return count;
2490}
2491
2492static struct class_device_attribute ipr_update_fw_attr = {
2493 .attr = {
2494 .name = "update_fw",
2495 .mode = S_IWUSR,
2496 },
2497 .store = ipr_store_update_fw
2498};
2499
2500static struct class_device_attribute *ipr_ioa_attrs[] = {
2501 &ipr_fw_version_attr,
2502 &ipr_log_level_attr,
2503 &ipr_diagnostics_attr,
2504 &ipr_ioa_reset_attr,
2505 &ipr_update_fw_attr,
62275040 2506 &ipr_ioa_cache_attr,
1da177e4
LT
2507 NULL,
2508};
2509
2510#ifdef CONFIG_SCSI_IPR_DUMP
2511/**
2512 * ipr_read_dump - Dump the adapter
2513 * @kobj: kobject struct
2514 * @buf: buffer
2515 * @off: offset
2516 * @count: buffer size
2517 *
2518 * Return value:
2519 * number of bytes printed to buffer
2520 **/
2521static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2522 loff_t off, size_t count)
2523{
2524 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2525 struct Scsi_Host *shost = class_to_shost(cdev);
2526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2527 struct ipr_dump *dump;
2528 unsigned long lock_flags = 0;
2529 char *src;
2530 int len;
2531 size_t rc = count;
2532
2533 if (!capable(CAP_SYS_ADMIN))
2534 return -EACCES;
2535
2536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2537 dump = ioa_cfg->dump;
2538
2539 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2541 return 0;
2542 }
2543 kref_get(&dump->kref);
2544 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2545
2546 if (off > dump->driver_dump.hdr.len) {
2547 kref_put(&dump->kref, ipr_release_dump);
2548 return 0;
2549 }
2550
2551 if (off + count > dump->driver_dump.hdr.len) {
2552 count = dump->driver_dump.hdr.len - off;
2553 rc = count;
2554 }
2555
2556 if (count && off < sizeof(dump->driver_dump)) {
2557 if (off + count > sizeof(dump->driver_dump))
2558 len = sizeof(dump->driver_dump) - off;
2559 else
2560 len = count;
2561 src = (u8 *)&dump->driver_dump + off;
2562 memcpy(buf, src, len);
2563 buf += len;
2564 off += len;
2565 count -= len;
2566 }
2567
2568 off -= sizeof(dump->driver_dump);
2569
2570 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2571 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2572 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2573 else
2574 len = count;
2575 src = (u8 *)&dump->ioa_dump + off;
2576 memcpy(buf, src, len);
2577 buf += len;
2578 off += len;
2579 count -= len;
2580 }
2581
2582 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2583
2584 while (count) {
2585 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2586 len = PAGE_ALIGN(off) - off;
2587 else
2588 len = count;
2589 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2590 src += off & ~PAGE_MASK;
2591 memcpy(buf, src, len);
2592 buf += len;
2593 off += len;
2594 count -= len;
2595 }
2596
2597 kref_put(&dump->kref, ipr_release_dump);
2598 return rc;
2599}
2600
2601/**
2602 * ipr_alloc_dump - Prepare for adapter dump
2603 * @ioa_cfg: ioa config struct
2604 *
2605 * Return value:
2606 * 0 on success / other on failure
2607 **/
2608static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2609{
2610 struct ipr_dump *dump;
2611 unsigned long lock_flags = 0;
2612
2613 ENTER;
0bc42e35 2614 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2615
2616 if (!dump) {
2617 ipr_err("Dump memory allocation failed\n");
2618 return -ENOMEM;
2619 }
2620
1da177e4
LT
2621 kref_init(&dump->kref);
2622 dump->ioa_cfg = ioa_cfg;
2623
2624 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625
2626 if (INACTIVE != ioa_cfg->sdt_state) {
2627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2628 kfree(dump);
2629 return 0;
2630 }
2631
2632 ioa_cfg->dump = dump;
2633 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2634 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2635 ioa_cfg->dump_taken = 1;
2636 schedule_work(&ioa_cfg->work_q);
2637 }
2638 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2639
2640 LEAVE;
2641 return 0;
2642}
2643
2644/**
2645 * ipr_free_dump - Free adapter dump memory
2646 * @ioa_cfg: ioa config struct
2647 *
2648 * Return value:
2649 * 0 on success / other on failure
2650 **/
2651static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2652{
2653 struct ipr_dump *dump;
2654 unsigned long lock_flags = 0;
2655
2656 ENTER;
2657
2658 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2659 dump = ioa_cfg->dump;
2660 if (!dump) {
2661 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2662 return 0;
2663 }
2664
2665 ioa_cfg->dump = NULL;
2666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2667
2668 kref_put(&dump->kref, ipr_release_dump);
2669
2670 LEAVE;
2671 return 0;
2672}
2673
2674/**
2675 * ipr_write_dump - Setup dump state of adapter
2676 * @kobj: kobject struct
2677 * @buf: buffer
2678 * @off: offset
2679 * @count: buffer size
2680 *
2681 * Return value:
2682 * number of bytes printed to buffer
2683 **/
2684static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2685 loff_t off, size_t count)
2686{
2687 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2688 struct Scsi_Host *shost = class_to_shost(cdev);
2689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2690 int rc;
2691
2692 if (!capable(CAP_SYS_ADMIN))
2693 return -EACCES;
2694
2695 if (buf[0] == '1')
2696 rc = ipr_alloc_dump(ioa_cfg);
2697 else if (buf[0] == '0')
2698 rc = ipr_free_dump(ioa_cfg);
2699 else
2700 return -EINVAL;
2701
2702 if (rc)
2703 return rc;
2704 else
2705 return count;
2706}
2707
2708static struct bin_attribute ipr_dump_attr = {
2709 .attr = {
2710 .name = "dump",
2711 .mode = S_IRUSR | S_IWUSR,
2712 },
2713 .size = 0,
2714 .read = ipr_read_dump,
2715 .write = ipr_write_dump
2716};
2717#else
2718static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2719#endif
2720
2721/**
2722 * ipr_change_queue_depth - Change the device's queue depth
2723 * @sdev: scsi device struct
2724 * @qdepth: depth to set
2725 *
2726 * Return value:
2727 * actual depth set
2728 **/
2729static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2730{
2731 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2732 return sdev->queue_depth;
2733}
2734
2735/**
2736 * ipr_change_queue_type - Change the device's queue type
2737 * @dsev: scsi device struct
2738 * @tag_type: type of tags to use
2739 *
2740 * Return value:
2741 * actual queue type set
2742 **/
2743static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2744{
2745 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2746 struct ipr_resource_entry *res;
2747 unsigned long lock_flags = 0;
2748
2749 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2750 res = (struct ipr_resource_entry *)sdev->hostdata;
2751
2752 if (res) {
2753 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2754 /*
2755 * We don't bother quiescing the device here since the
2756 * adapter firmware does it for us.
2757 */
2758 scsi_set_tag_type(sdev, tag_type);
2759
2760 if (tag_type)
2761 scsi_activate_tcq(sdev, sdev->queue_depth);
2762 else
2763 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2764 } else
2765 tag_type = 0;
2766 } else
2767 tag_type = 0;
2768
2769 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2770 return tag_type;
2771}
2772
2773/**
2774 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2775 * @dev: device struct
2776 * @buf: buffer
2777 *
2778 * Return value:
2779 * number of bytes printed to buffer
2780 **/
10523b3b 2781static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
2782{
2783 struct scsi_device *sdev = to_scsi_device(dev);
2784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2785 struct ipr_resource_entry *res;
2786 unsigned long lock_flags = 0;
2787 ssize_t len = -ENXIO;
2788
2789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2790 res = (struct ipr_resource_entry *)sdev->hostdata;
2791 if (res)
2792 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2793 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2794 return len;
2795}
2796
2797static struct device_attribute ipr_adapter_handle_attr = {
2798 .attr = {
2799 .name = "adapter_handle",
2800 .mode = S_IRUSR,
2801 },
2802 .show = ipr_show_adapter_handle
2803};
2804
2805static struct device_attribute *ipr_dev_attrs[] = {
2806 &ipr_adapter_handle_attr,
2807 NULL,
2808};
2809
2810/**
2811 * ipr_biosparam - Return the HSC mapping
2812 * @sdev: scsi device struct
2813 * @block_device: block device pointer
2814 * @capacity: capacity of the device
2815 * @parm: Array containing returned HSC values.
2816 *
2817 * This function generates the HSC parms that fdisk uses.
2818 * We want to make sure we return something that places partitions
2819 * on 4k boundaries for best performance with the IOA.
2820 *
2821 * Return value:
2822 * 0 on success
2823 **/
2824static int ipr_biosparam(struct scsi_device *sdev,
2825 struct block_device *block_device,
2826 sector_t capacity, int *parm)
2827{
2828 int heads, sectors;
2829 sector_t cylinders;
2830
2831 heads = 128;
2832 sectors = 32;
2833
2834 cylinders = capacity;
2835 sector_div(cylinders, (128 * 32));
2836
2837 /* return result */
2838 parm[0] = heads;
2839 parm[1] = sectors;
2840 parm[2] = cylinders;
2841
2842 return 0;
2843}
2844
2845/**
2846 * ipr_slave_destroy - Unconfigure a SCSI device
2847 * @sdev: scsi device struct
2848 *
2849 * Return value:
2850 * nothing
2851 **/
2852static void ipr_slave_destroy(struct scsi_device *sdev)
2853{
2854 struct ipr_resource_entry *res;
2855 struct ipr_ioa_cfg *ioa_cfg;
2856 unsigned long lock_flags = 0;
2857
2858 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2859
2860 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2861 res = (struct ipr_resource_entry *) sdev->hostdata;
2862 if (res) {
2863 sdev->hostdata = NULL;
2864 res->sdev = NULL;
2865 }
2866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2867}
2868
2869/**
2870 * ipr_slave_configure - Configure a SCSI device
2871 * @sdev: scsi device struct
2872 *
2873 * This function configures the specified scsi device.
2874 *
2875 * Return value:
2876 * 0 on success
2877 **/
2878static int ipr_slave_configure(struct scsi_device *sdev)
2879{
2880 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2881 struct ipr_resource_entry *res;
2882 unsigned long lock_flags = 0;
2883
2884 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2885 res = sdev->hostdata;
2886 if (res) {
2887 if (ipr_is_af_dasd_device(res))
2888 sdev->type = TYPE_RAID;
0726ce26 2889 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 2890 sdev->scsi_level = 4;
0726ce26
BK
2891 sdev->no_uld_attach = 1;
2892 }
1da177e4
LT
2893 if (ipr_is_vset_device(res)) {
2894 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2895 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2896 }
2897 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2898 sdev->allow_restart = 1;
2899 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2900 }
2901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2902 return 0;
2903}
2904
2905/**
2906 * ipr_slave_alloc - Prepare for commands to a device.
2907 * @sdev: scsi device struct
2908 *
2909 * This function saves a pointer to the resource entry
2910 * in the scsi device struct if the device exists. We
2911 * can then use this pointer in ipr_queuecommand when
2912 * handling new commands.
2913 *
2914 * Return value:
692aebfc 2915 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
2916 **/
2917static int ipr_slave_alloc(struct scsi_device *sdev)
2918{
2919 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2920 struct ipr_resource_entry *res;
2921 unsigned long lock_flags;
692aebfc 2922 int rc = -ENXIO;
1da177e4
LT
2923
2924 sdev->hostdata = NULL;
2925
2926 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2927
2928 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2929 if ((res->cfgte.res_addr.bus == sdev->channel) &&
2930 (res->cfgte.res_addr.target == sdev->id) &&
2931 (res->cfgte.res_addr.lun == sdev->lun)) {
2932 res->sdev = sdev;
2933 res->add_to_ml = 0;
2934 res->in_erp = 0;
2935 sdev->hostdata = res;
2936 res->needs_sync_complete = 1;
692aebfc 2937 rc = 0;
1da177e4
LT
2938 break;
2939 }
2940 }
2941
2942 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2943
692aebfc 2944 return rc;
1da177e4
LT
2945}
2946
2947/**
2948 * ipr_eh_host_reset - Reset the host adapter
2949 * @scsi_cmd: scsi command struct
2950 *
2951 * Return value:
2952 * SUCCESS / FAILED
2953 **/
df0ae249 2954static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
2955{
2956 struct ipr_ioa_cfg *ioa_cfg;
2957 int rc;
2958
2959 ENTER;
2960 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2961
2962 dev_err(&ioa_cfg->pdev->dev,
2963 "Adapter being reset as a result of error recovery.\n");
2964
2965 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2966 ioa_cfg->sdt_state = GET_DUMP;
2967
2968 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2969
2970 LEAVE;
2971 return rc;
2972}
2973
df0ae249
JG
2974static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2975{
2976 int rc;
2977
2978 spin_lock_irq(cmd->device->host->host_lock);
2979 rc = __ipr_eh_host_reset(cmd);
2980 spin_unlock_irq(cmd->device->host->host_lock);
2981
2982 return rc;
2983}
2984
1da177e4
LT
2985/**
2986 * ipr_eh_dev_reset - Reset the device
2987 * @scsi_cmd: scsi command struct
2988 *
2989 * This function issues a device reset to the affected device.
2990 * A LUN reset will be sent to the device first. If that does
2991 * not work, a target reset will be sent.
2992 *
2993 * Return value:
2994 * SUCCESS / FAILED
2995 **/
94d0e7b8 2996static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
2997{
2998 struct ipr_cmnd *ipr_cmd;
2999 struct ipr_ioa_cfg *ioa_cfg;
3000 struct ipr_resource_entry *res;
3001 struct ipr_cmd_pkt *cmd_pkt;
3002 u32 ioasc;
3003
3004 ENTER;
3005 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3006 res = scsi_cmd->device->hostdata;
3007
3008 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3009 return FAILED;
3010
3011 /*
3012 * If we are currently going through reset/reload, return failed. This will force the
3013 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3014 * reset to complete
3015 */
3016 if (ioa_cfg->in_reset_reload)
3017 return FAILED;
3018 if (ioa_cfg->ioa_is_dead)
3019 return FAILED;
3020
3021 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3022 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3023 if (ipr_cmd->scsi_cmd)
3024 ipr_cmd->done = ipr_scsi_eh_done;
3025 }
3026 }
3027
3028 res->resetting_device = 1;
3029
3030 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3031
3032 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3033 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3034 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3035 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3036
3037 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3038 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3039
3040 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3041
3042 res->resetting_device = 0;
3043
3044 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3045
3046 LEAVE;
3047 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3048}
3049
94d0e7b8
JG
3050static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3051{
3052 int rc;
3053
3054 spin_lock_irq(cmd->device->host->host_lock);
3055 rc = __ipr_eh_dev_reset(cmd);
3056 spin_unlock_irq(cmd->device->host->host_lock);
3057
3058 return rc;
3059}
3060
1da177e4
LT
3061/**
3062 * ipr_bus_reset_done - Op done function for bus reset.
3063 * @ipr_cmd: ipr command struct
3064 *
3065 * This function is the op done function for a bus reset
3066 *
3067 * Return value:
3068 * none
3069 **/
3070static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3071{
3072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3073 struct ipr_resource_entry *res;
3074
3075 ENTER;
3076 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3077 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3078 sizeof(res->cfgte.res_handle))) {
3079 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3080 break;
3081 }
3082 }
3083
3084 /*
3085 * If abort has not completed, indicate the reset has, else call the
3086 * abort's done function to wake the sleeping eh thread
3087 */
3088 if (ipr_cmd->sibling->sibling)
3089 ipr_cmd->sibling->sibling = NULL;
3090 else
3091 ipr_cmd->sibling->done(ipr_cmd->sibling);
3092
3093 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3094 LEAVE;
3095}
3096
3097/**
3098 * ipr_abort_timeout - An abort task has timed out
3099 * @ipr_cmd: ipr command struct
3100 *
3101 * This function handles when an abort task times out. If this
3102 * happens we issue a bus reset since we have resources tied
3103 * up that must be freed before returning to the midlayer.
3104 *
3105 * Return value:
3106 * none
3107 **/
3108static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3109{
3110 struct ipr_cmnd *reset_cmd;
3111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3112 struct ipr_cmd_pkt *cmd_pkt;
3113 unsigned long lock_flags = 0;
3114
3115 ENTER;
3116 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3117 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3119 return;
3120 }
3121
3122 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3123 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3124 ipr_cmd->sibling = reset_cmd;
3125 reset_cmd->sibling = ipr_cmd;
3126 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3127 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3128 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3129 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3130 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3131
3132 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3134 LEAVE;
3135}
3136
3137/**
3138 * ipr_cancel_op - Cancel specified op
3139 * @scsi_cmd: scsi command struct
3140 *
3141 * This function cancels specified op.
3142 *
3143 * Return value:
3144 * SUCCESS / FAILED
3145 **/
3146static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3147{
3148 struct ipr_cmnd *ipr_cmd;
3149 struct ipr_ioa_cfg *ioa_cfg;
3150 struct ipr_resource_entry *res;
3151 struct ipr_cmd_pkt *cmd_pkt;
3152 u32 ioasc;
3153 int op_found = 0;
3154
3155 ENTER;
3156 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3157 res = scsi_cmd->device->hostdata;
3158
8fa728a2
JG
3159 /* If we are currently going through reset/reload, return failed.
3160 * This will force the mid-layer to call ipr_eh_host_reset,
3161 * which will then go to sleep and wait for the reset to complete
3162 */
3163 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3164 return FAILED;
1da177e4
LT
3165 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3166 return FAILED;
3167
3168 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3169 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3170 ipr_cmd->done = ipr_scsi_eh_done;
3171 op_found = 1;
3172 break;
3173 }
3174 }
3175
3176 if (!op_found)
3177 return SUCCESS;
3178
3179 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3180 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3181 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3182 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3183 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3184 ipr_cmd->u.sdev = scsi_cmd->device;
3185
3186 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3187 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3188 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3189
3190 /*
3191 * If the abort task timed out and we sent a bus reset, we will get
3192 * one the following responses to the abort
3193 */
3194 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3195 ioasc = 0;
3196 ipr_trace;
3197 }
3198
3199 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3200 res->needs_sync_complete = 1;
3201
3202 LEAVE;
3203 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3204}
3205
3206/**
3207 * ipr_eh_abort - Abort a single op
3208 * @scsi_cmd: scsi command struct
3209 *
3210 * Return value:
3211 * SUCCESS / FAILED
3212 **/
3213static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3214{
8fa728a2
JG
3215 unsigned long flags;
3216 int rc;
1da177e4
LT
3217
3218 ENTER;
1da177e4 3219
8fa728a2
JG
3220 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3221 rc = ipr_cancel_op(scsi_cmd);
3222 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3223
3224 LEAVE;
8fa728a2 3225 return rc;
1da177e4
LT
3226}
3227
3228/**
3229 * ipr_handle_other_interrupt - Handle "other" interrupts
3230 * @ioa_cfg: ioa config struct
3231 * @int_reg: interrupt register
3232 *
3233 * Return value:
3234 * IRQ_NONE / IRQ_HANDLED
3235 **/
3236static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3237 volatile u32 int_reg)
3238{
3239 irqreturn_t rc = IRQ_HANDLED;
3240
3241 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3242 /* Mask the interrupt */
3243 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3244
3245 /* Clear the interrupt */
3246 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3247 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3248
3249 list_del(&ioa_cfg->reset_cmd->queue);
3250 del_timer(&ioa_cfg->reset_cmd->timer);
3251 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3252 } else {
3253 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3254 ioa_cfg->ioa_unit_checked = 1;
3255 else
3256 dev_err(&ioa_cfg->pdev->dev,
3257 "Permanent IOA failure. 0x%08X\n", int_reg);
3258
3259 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3260 ioa_cfg->sdt_state = GET_DUMP;
3261
3262 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3263 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3264 }
3265
3266 return rc;
3267}
3268
3269/**
3270 * ipr_isr - Interrupt service routine
3271 * @irq: irq number
3272 * @devp: pointer to ioa config struct
3273 * @regs: pt_regs struct
3274 *
3275 * Return value:
3276 * IRQ_NONE / IRQ_HANDLED
3277 **/
3278static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3279{
3280 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3281 unsigned long lock_flags = 0;
3282 volatile u32 int_reg, int_mask_reg;
3283 u32 ioasc;
3284 u16 cmd_index;
3285 struct ipr_cmnd *ipr_cmd;
3286 irqreturn_t rc = IRQ_NONE;
3287
3288 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3289
3290 /* If interrupts are disabled, ignore the interrupt */
3291 if (!ioa_cfg->allow_interrupts) {
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293 return IRQ_NONE;
3294 }
3295
3296 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3297 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3298
3299 /* If an interrupt on the adapter did not occur, ignore it */
3300 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302 return IRQ_NONE;
3303 }
3304
3305 while (1) {
3306 ipr_cmd = NULL;
3307
3308 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3309 ioa_cfg->toggle_bit) {
3310
3311 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3312 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3313
3314 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3315 ioa_cfg->errors_logged++;
3316 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3317
3318 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3319 ioa_cfg->sdt_state = GET_DUMP;
3320
3321 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323 return IRQ_HANDLED;
3324 }
3325
3326 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3327
3328 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3329
3330 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3331
3332 list_del(&ipr_cmd->queue);
3333 del_timer(&ipr_cmd->timer);
3334 ipr_cmd->done(ipr_cmd);
3335
3336 rc = IRQ_HANDLED;
3337
3338 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3339 ioa_cfg->hrrq_curr++;
3340 } else {
3341 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3342 ioa_cfg->toggle_bit ^= 1u;
3343 }
3344 }
3345
3346 if (ipr_cmd != NULL) {
3347 /* Clear the PCI interrupt */
3348 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3349 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3350 } else
3351 break;
3352 }
3353
3354 if (unlikely(rc == IRQ_NONE))
3355 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3356
3357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3358 return rc;
3359}
3360
3361/**
3362 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3363 * @ioa_cfg: ioa config struct
3364 * @ipr_cmd: ipr command struct
3365 *
3366 * Return value:
3367 * 0 on success / -1 on failure
3368 **/
3369static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3370 struct ipr_cmnd *ipr_cmd)
3371{
3372 int i;
3373 struct scatterlist *sglist;
3374 u32 length;
3375 u32 ioadl_flags = 0;
3376 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3377 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3378 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3379
3380 length = scsi_cmd->request_bufflen;
3381
3382 if (length == 0)
3383 return 0;
3384
3385 if (scsi_cmd->use_sg) {
3386 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3387 scsi_cmd->request_buffer,
3388 scsi_cmd->use_sg,
3389 scsi_cmd->sc_data_direction);
3390
3391 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3392 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3393 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3394 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3395 ioarcb->write_ioadl_len =
3396 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3397 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3398 ioadl_flags = IPR_IOADL_FLAGS_READ;
3399 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3400 ioarcb->read_ioadl_len =
3401 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3402 }
3403
3404 sglist = scsi_cmd->request_buffer;
3405
3406 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3407 ioadl[i].flags_and_data_len =
3408 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3409 ioadl[i].address =
3410 cpu_to_be32(sg_dma_address(&sglist[i]));
3411 }
3412
3413 if (likely(ipr_cmd->dma_use_sg)) {
3414 ioadl[i-1].flags_and_data_len |=
3415 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3416 return 0;
3417 } else
3418 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3419 } else {
3420 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3421 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3422 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3423 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3424 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3425 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3426 ioadl_flags = IPR_IOADL_FLAGS_READ;
3427 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3428 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3429 }
3430
3431 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3432 scsi_cmd->request_buffer, length,
3433 scsi_cmd->sc_data_direction);
3434
3435 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3436 ipr_cmd->dma_use_sg = 1;
3437 ioadl[0].flags_and_data_len =
3438 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3439 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3440 return 0;
3441 } else
3442 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3443 }
3444
3445 return -1;
3446}
3447
3448/**
3449 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3450 * @scsi_cmd: scsi command struct
3451 *
3452 * Return value:
3453 * task attributes
3454 **/
3455static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3456{
3457 u8 tag[2];
3458 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3459
3460 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3461 switch (tag[0]) {
3462 case MSG_SIMPLE_TAG:
3463 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3464 break;
3465 case MSG_HEAD_TAG:
3466 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3467 break;
3468 case MSG_ORDERED_TAG:
3469 rc = IPR_FLAGS_LO_ORDERED_TASK;
3470 break;
3471 };
3472 }
3473
3474 return rc;
3475}
3476
3477/**
3478 * ipr_erp_done - Process completion of ERP for a device
3479 * @ipr_cmd: ipr command struct
3480 *
3481 * This function copies the sense buffer into the scsi_cmd
3482 * struct and pushes the scsi_done function.
3483 *
3484 * Return value:
3485 * nothing
3486 **/
3487static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3488{
3489 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3490 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3492 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3493
3494 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3495 scsi_cmd->result |= (DID_ERROR << 16);
3496 ipr_sdev_err(scsi_cmd->device,
3497 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3498 } else {
3499 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3500 SCSI_SENSE_BUFFERSIZE);
3501 }
3502
3503 if (res) {
3504 res->needs_sync_complete = 1;
3505 res->in_erp = 0;
3506 }
3507 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3508 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3509 scsi_cmd->scsi_done(scsi_cmd);
3510}
3511
3512/**
3513 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3514 * @ipr_cmd: ipr command struct
3515 *
3516 * Return value:
3517 * none
3518 **/
3519static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3520{
3521 struct ipr_ioarcb *ioarcb;
3522 struct ipr_ioasa *ioasa;
3523
3524 ioarcb = &ipr_cmd->ioarcb;
3525 ioasa = &ipr_cmd->ioasa;
3526
3527 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3528 ioarcb->write_data_transfer_length = 0;
3529 ioarcb->read_data_transfer_length = 0;
3530 ioarcb->write_ioadl_len = 0;
3531 ioarcb->read_ioadl_len = 0;
3532 ioasa->ioasc = 0;
3533 ioasa->residual_data_len = 0;
3534}
3535
3536/**
3537 * ipr_erp_request_sense - Send request sense to a device
3538 * @ipr_cmd: ipr command struct
3539 *
3540 * This function sends a request sense to a device as a result
3541 * of a check condition.
3542 *
3543 * Return value:
3544 * nothing
3545 **/
3546static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3547{
3548 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3549 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3550
3551 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3552 ipr_erp_done(ipr_cmd);
3553 return;
3554 }
3555
3556 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3557
3558 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3559 cmd_pkt->cdb[0] = REQUEST_SENSE;
3560 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3561 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3562 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3563 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3564
3565 ipr_cmd->ioadl[0].flags_and_data_len =
3566 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3567 ipr_cmd->ioadl[0].address =
3568 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3569
3570 ipr_cmd->ioarcb.read_ioadl_len =
3571 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3572 ipr_cmd->ioarcb.read_data_transfer_length =
3573 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3574
3575 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3576 IPR_REQUEST_SENSE_TIMEOUT * 2);
3577}
3578
3579/**
3580 * ipr_erp_cancel_all - Send cancel all to a device
3581 * @ipr_cmd: ipr command struct
3582 *
3583 * This function sends a cancel all to a device to clear the
3584 * queue. If we are running TCQ on the device, QERR is set to 1,
3585 * which means all outstanding ops have been dropped on the floor.
3586 * Cancel all will return them to us.
3587 *
3588 * Return value:
3589 * nothing
3590 **/
3591static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3592{
3593 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3594 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3595 struct ipr_cmd_pkt *cmd_pkt;
3596
3597 res->in_erp = 1;
3598
3599 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3600
3601 if (!scsi_get_tag_type(scsi_cmd->device)) {
3602 ipr_erp_request_sense(ipr_cmd);
3603 return;
3604 }
3605
3606 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3607 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3608 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3609
3610 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3611 IPR_CANCEL_ALL_TIMEOUT);
3612}
3613
3614/**
3615 * ipr_dump_ioasa - Dump contents of IOASA
3616 * @ioa_cfg: ioa config struct
3617 * @ipr_cmd: ipr command struct
3618 *
3619 * This function is invoked by the interrupt handler when ops
3620 * fail. It will log the IOASA if appropriate. Only called
3621 * for GPDD ops.
3622 *
3623 * Return value:
3624 * none
3625 **/
3626static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3627 struct ipr_cmnd *ipr_cmd)
3628{
3629 int i;
3630 u16 data_len;
3631 u32 ioasc;
3632 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3633 __be32 *ioasa_data = (__be32 *)ioasa;
3634 int error_index;
3635
3636 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3637
3638 if (0 == ioasc)
3639 return;
3640
3641 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3642 return;
3643
3644 error_index = ipr_get_error(ioasc);
3645
3646 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3647 /* Don't log an error if the IOA already logged one */
3648 if (ioasa->ilid != 0)
3649 return;
3650
3651 if (ipr_error_table[error_index].log_ioasa == 0)
3652 return;
3653 }
3654
3655 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3656 ipr_error_table[error_index].error);
3657
3658 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3659 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3660 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3661 "Device End state: %s Phase: %s\n",
3662 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3663 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3664 }
3665
3666 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3667 data_len = sizeof(struct ipr_ioasa);
3668 else
3669 data_len = be16_to_cpu(ioasa->ret_stat_len);
3670
3671 ipr_err("IOASA Dump:\n");
3672
3673 for (i = 0; i < data_len / 4; i += 4) {
3674 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3675 be32_to_cpu(ioasa_data[i]),
3676 be32_to_cpu(ioasa_data[i+1]),
3677 be32_to_cpu(ioasa_data[i+2]),
3678 be32_to_cpu(ioasa_data[i+3]));
3679 }
3680}
3681
3682/**
3683 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3684 * @ioasa: IOASA
3685 * @sense_buf: sense data buffer
3686 *
3687 * Return value:
3688 * none
3689 **/
3690static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3691{
3692 u32 failing_lba;
3693 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3694 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3695 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3696 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3697
3698 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3699
3700 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3701 return;
3702
3703 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3704
3705 if (ipr_is_vset_device(res) &&
3706 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3707 ioasa->u.vset.failing_lba_hi != 0) {
3708 sense_buf[0] = 0x72;
3709 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3710 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3711 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3712
3713 sense_buf[7] = 12;
3714 sense_buf[8] = 0;
3715 sense_buf[9] = 0x0A;
3716 sense_buf[10] = 0x80;
3717
3718 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3719
3720 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3721 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3722 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3723 sense_buf[15] = failing_lba & 0x000000ff;
3724
3725 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3726
3727 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3728 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3729 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3730 sense_buf[19] = failing_lba & 0x000000ff;
3731 } else {
3732 sense_buf[0] = 0x70;
3733 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3734 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3735 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3736
3737 /* Illegal request */
3738 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3739 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3740 sense_buf[7] = 10; /* additional length */
3741
3742 /* IOARCB was in error */
3743 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3744 sense_buf[15] = 0xC0;
3745 else /* Parameter data was invalid */
3746 sense_buf[15] = 0x80;
3747
3748 sense_buf[16] =
3749 ((IPR_FIELD_POINTER_MASK &
3750 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3751 sense_buf[17] =
3752 (IPR_FIELD_POINTER_MASK &
3753 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3754 } else {
3755 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3756 if (ipr_is_vset_device(res))
3757 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3758 else
3759 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3760
3761 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3762 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3763 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3764 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3765 sense_buf[6] = failing_lba & 0x000000ff;
3766 }
3767
3768 sense_buf[7] = 6; /* additional length */
3769 }
3770 }
3771}
3772
3773/**
3774 * ipr_erp_start - Process an error response for a SCSI op
3775 * @ioa_cfg: ioa config struct
3776 * @ipr_cmd: ipr command struct
3777 *
3778 * This function determines whether or not to initiate ERP
3779 * on the affected device.
3780 *
3781 * Return value:
3782 * nothing
3783 **/
3784static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3785 struct ipr_cmnd *ipr_cmd)
3786{
3787 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3788 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3789 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3790
3791 if (!res) {
3792 ipr_scsi_eh_done(ipr_cmd);
3793 return;
3794 }
3795
3796 if (ipr_is_gscsi(res))
3797 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3798 else
3799 ipr_gen_sense(ipr_cmd);
3800
3801 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3802 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3803 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3804 break;
3805 case IPR_IOASC_IR_RESOURCE_HANDLE:
3806 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3807 break;
3808 case IPR_IOASC_HW_SEL_TIMEOUT:
3809 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3810 res->needs_sync_complete = 1;
3811 break;
3812 case IPR_IOASC_SYNC_REQUIRED:
3813 if (!res->in_erp)
3814 res->needs_sync_complete = 1;
3815 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3816 break;
3817 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3818 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3819 break;
3820 case IPR_IOASC_BUS_WAS_RESET:
3821 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3822 /*
3823 * Report the bus reset and ask for a retry. The device
3824 * will give CC/UA the next command.
3825 */
3826 if (!res->resetting_device)
3827 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3828 scsi_cmd->result |= (DID_ERROR << 16);
3829 res->needs_sync_complete = 1;
3830 break;
3831 case IPR_IOASC_HW_DEV_BUS_STATUS:
3832 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3833 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3834 ipr_erp_cancel_all(ipr_cmd);
3835 return;
3836 }
3837 res->needs_sync_complete = 1;
3838 break;
3839 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3840 break;
3841 default:
3842 scsi_cmd->result |= (DID_ERROR << 16);
3843 if (!ipr_is_vset_device(res))
3844 res->needs_sync_complete = 1;
3845 break;
3846 }
3847
3848 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3849 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3850 scsi_cmd->scsi_done(scsi_cmd);
3851}
3852
3853/**
3854 * ipr_scsi_done - mid-layer done function
3855 * @ipr_cmd: ipr command struct
3856 *
3857 * This function is invoked by the interrupt handler for
3858 * ops generated by the SCSI mid-layer
3859 *
3860 * Return value:
3861 * none
3862 **/
3863static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3864{
3865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3866 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3867 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3868
3869 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3870
3871 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3872 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3873 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3874 scsi_cmd->scsi_done(scsi_cmd);
3875 } else
3876 ipr_erp_start(ioa_cfg, ipr_cmd);
3877}
3878
3879/**
3880 * ipr_save_ioafp_mode_select - Save adapters mode select data
3881 * @ioa_cfg: ioa config struct
3882 * @scsi_cmd: scsi command struct
3883 *
3884 * This function saves mode select data for the adapter to
3885 * use following an adapter reset.
3886 *
3887 * Return value:
3888 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3889 **/
3890static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3891 struct scsi_cmnd *scsi_cmd)
3892{
3893 if (!ioa_cfg->saved_mode_pages) {
3894 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3895 GFP_ATOMIC);
3896 if (!ioa_cfg->saved_mode_pages) {
3897 dev_err(&ioa_cfg->pdev->dev,
3898 "IOA mode select buffer allocation failed\n");
3899 return SCSI_MLQUEUE_HOST_BUSY;
3900 }
3901 }
3902
3903 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3904 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3905 return 0;
3906}
3907
3908/**
3909 * ipr_queuecommand - Queue a mid-layer request
3910 * @scsi_cmd: scsi command struct
3911 * @done: done function
3912 *
3913 * This function queues a request generated by the mid-layer.
3914 *
3915 * Return value:
3916 * 0 on success
3917 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3918 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3919 **/
3920static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3921 void (*done) (struct scsi_cmnd *))
3922{
3923 struct ipr_ioa_cfg *ioa_cfg;
3924 struct ipr_resource_entry *res;
3925 struct ipr_ioarcb *ioarcb;
3926 struct ipr_cmnd *ipr_cmd;
3927 int rc = 0;
3928
3929 scsi_cmd->scsi_done = done;
3930 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3931 res = scsi_cmd->device->hostdata;
3932 scsi_cmd->result = (DID_OK << 16);
3933
3934 /*
3935 * We are currently blocking all devices due to a host reset
3936 * We have told the host to stop giving us new requests, but
3937 * ERP ops don't count. FIXME
3938 */
3939 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3940 return SCSI_MLQUEUE_HOST_BUSY;
3941
3942 /*
3943 * FIXME - Create scsi_set_host_offline interface
3944 * and the ioa_is_dead check can be removed
3945 */
3946 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3947 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3948 scsi_cmd->result = (DID_NO_CONNECT << 16);
3949 scsi_cmd->scsi_done(scsi_cmd);
3950 return 0;
3951 }
3952
3953 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3954 ioarcb = &ipr_cmd->ioarcb;
3955 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3956
3957 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3958 ipr_cmd->scsi_cmd = scsi_cmd;
3959 ioarcb->res_handle = res->cfgte.res_handle;
3960 ipr_cmd->done = ipr_scsi_done;
3961 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3962
3963 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3964 if (scsi_cmd->underflow == 0)
3965 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3966
3967 if (res->needs_sync_complete) {
3968 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3969 res->needs_sync_complete = 0;
3970 }
3971
3972 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3973 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3974 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3975 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3976 }
3977
3978 if (scsi_cmd->cmnd[0] >= 0xC0 &&
3979 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3980 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3981
3982 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3983 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3984
3985 if (likely(rc == 0))
3986 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3987
3988 if (likely(rc == 0)) {
3989 mb();
3990 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3991 ioa_cfg->regs.ioarrin_reg);
3992 } else {
3993 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3994 return SCSI_MLQUEUE_HOST_BUSY;
3995 }
3996
3997 return 0;
3998}
3999
4000/**
4001 * ipr_info - Get information about the card/driver
4002 * @scsi_host: scsi host struct
4003 *
4004 * Return value:
4005 * pointer to buffer with description string
4006 **/
4007static const char * ipr_ioa_info(struct Scsi_Host *host)
4008{
4009 static char buffer[512];
4010 struct ipr_ioa_cfg *ioa_cfg;
4011 unsigned long lock_flags = 0;
4012
4013 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4014
4015 spin_lock_irqsave(host->host_lock, lock_flags);
4016 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4017 spin_unlock_irqrestore(host->host_lock, lock_flags);
4018
4019 return buffer;
4020}
4021
4022static struct scsi_host_template driver_template = {
4023 .module = THIS_MODULE,
4024 .name = "IPR",
4025 .info = ipr_ioa_info,
4026 .queuecommand = ipr_queuecommand,
4027 .eh_abort_handler = ipr_eh_abort,
4028 .eh_device_reset_handler = ipr_eh_dev_reset,
4029 .eh_host_reset_handler = ipr_eh_host_reset,
4030 .slave_alloc = ipr_slave_alloc,
4031 .slave_configure = ipr_slave_configure,
4032 .slave_destroy = ipr_slave_destroy,
4033 .change_queue_depth = ipr_change_queue_depth,
4034 .change_queue_type = ipr_change_queue_type,
4035 .bios_param = ipr_biosparam,
4036 .can_queue = IPR_MAX_COMMANDS,
4037 .this_id = -1,
4038 .sg_tablesize = IPR_MAX_SGLIST,
4039 .max_sectors = IPR_IOA_MAX_SECTORS,
4040 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4041 .use_clustering = ENABLE_CLUSTERING,
4042 .shost_attrs = ipr_ioa_attrs,
4043 .sdev_attrs = ipr_dev_attrs,
4044 .proc_name = IPR_NAME
4045};
4046
4047#ifdef CONFIG_PPC_PSERIES
4048static const u16 ipr_blocked_processors[] = {
4049 PV_NORTHSTAR,
4050 PV_PULSAR,
4051 PV_POWER4,
4052 PV_ICESTAR,
4053 PV_SSTAR,
4054 PV_POWER4p,
4055 PV_630,
4056 PV_630p
4057};
4058
4059/**
4060 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4061 * @ioa_cfg: ioa cfg struct
4062 *
4063 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4064 * certain pSeries hardware. This function determines if the given
4065 * adapter is in one of these confgurations or not.
4066 *
4067 * Return value:
4068 * 1 if adapter is not supported / 0 if adapter is supported
4069 **/
4070static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4071{
4072 u8 rev_id;
4073 int i;
4074
4075 if (ioa_cfg->type == 0x5702) {
4076 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4077 &rev_id) == PCIBIOS_SUCCESSFUL) {
4078 if (rev_id < 4) {
4079 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4080 if (__is_processor(ipr_blocked_processors[i]))
4081 return 1;
4082 }
4083 }
4084 }
4085 }
4086 return 0;
4087}
4088#else
4089#define ipr_invalid_adapter(ioa_cfg) 0
4090#endif
4091
4092/**
4093 * ipr_ioa_bringdown_done - IOA bring down completion.
4094 * @ipr_cmd: ipr command struct
4095 *
4096 * This function processes the completion of an adapter bring down.
4097 * It wakes any reset sleepers.
4098 *
4099 * Return value:
4100 * IPR_RC_JOB_RETURN
4101 **/
4102static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4103{
4104 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4105
4106 ENTER;
4107 ioa_cfg->in_reset_reload = 0;
4108 ioa_cfg->reset_retries = 0;
4109 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4110 wake_up_all(&ioa_cfg->reset_wait_q);
4111
4112 spin_unlock_irq(ioa_cfg->host->host_lock);
4113 scsi_unblock_requests(ioa_cfg->host);
4114 spin_lock_irq(ioa_cfg->host->host_lock);
4115 LEAVE;
4116
4117 return IPR_RC_JOB_RETURN;
4118}
4119
4120/**
4121 * ipr_ioa_reset_done - IOA reset completion.
4122 * @ipr_cmd: ipr command struct
4123 *
4124 * This function processes the completion of an adapter reset.
4125 * It schedules any necessary mid-layer add/removes and
4126 * wakes any reset sleepers.
4127 *
4128 * Return value:
4129 * IPR_RC_JOB_RETURN
4130 **/
4131static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4132{
4133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4134 struct ipr_resource_entry *res;
4135 struct ipr_hostrcb *hostrcb, *temp;
4136 int i = 0;
4137
4138 ENTER;
4139 ioa_cfg->in_reset_reload = 0;
4140 ioa_cfg->allow_cmds = 1;
4141 ioa_cfg->reset_cmd = NULL;
4142
4143 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4144 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4145 ipr_trace;
4146 break;
4147 }
4148 }
4149 schedule_work(&ioa_cfg->work_q);
4150
4151 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4152 list_del(&hostrcb->queue);
4153 if (i++ < IPR_NUM_LOG_HCAMS)
4154 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4155 else
4156 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4157 }
4158
4159 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4160
4161 ioa_cfg->reset_retries = 0;
4162 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4163 wake_up_all(&ioa_cfg->reset_wait_q);
4164
4165 spin_unlock_irq(ioa_cfg->host->host_lock);
4166 scsi_unblock_requests(ioa_cfg->host);
4167 spin_lock_irq(ioa_cfg->host->host_lock);
4168
4169 if (!ioa_cfg->allow_cmds)
4170 scsi_block_requests(ioa_cfg->host);
4171
4172 LEAVE;
4173 return IPR_RC_JOB_RETURN;
4174}
4175
4176/**
4177 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4178 * @supported_dev: supported device struct
4179 * @vpids: vendor product id struct
4180 *
4181 * Return value:
4182 * none
4183 **/
4184static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4185 struct ipr_std_inq_vpids *vpids)
4186{
4187 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4188 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4189 supported_dev->num_records = 1;
4190 supported_dev->data_length =
4191 cpu_to_be16(sizeof(struct ipr_supported_device));
4192 supported_dev->reserved = 0;
4193}
4194
4195/**
4196 * ipr_set_supported_devs - Send Set Supported Devices for a device
4197 * @ipr_cmd: ipr command struct
4198 *
4199 * This function send a Set Supported Devices to the adapter
4200 *
4201 * Return value:
4202 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4203 **/
4204static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4205{
4206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4207 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4208 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4209 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4210 struct ipr_resource_entry *res = ipr_cmd->u.res;
4211
4212 ipr_cmd->job_step = ipr_ioa_reset_done;
4213
4214 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
d0ad6f50 4215 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
1da177e4
LT
4216 continue;
4217
4218 ipr_cmd->u.res = res;
4219 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4220
4221 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4222 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4223 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4224
4225 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4226 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4227 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4228
4229 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4230 sizeof(struct ipr_supported_device));
4231 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4232 offsetof(struct ipr_misc_cbs, supp_dev));
4233 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4234 ioarcb->write_data_transfer_length =
4235 cpu_to_be32(sizeof(struct ipr_supported_device));
4236
4237 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4238 IPR_SET_SUP_DEVICE_TIMEOUT);
4239
4240 ipr_cmd->job_step = ipr_set_supported_devs;
4241 return IPR_RC_JOB_RETURN;
4242 }
4243
4244 return IPR_RC_JOB_CONTINUE;
4245}
4246
62275040
BK
4247/**
4248 * ipr_setup_write_cache - Disable write cache if needed
4249 * @ipr_cmd: ipr command struct
4250 *
4251 * This function sets up adapters write cache to desired setting
4252 *
4253 * Return value:
4254 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4255 **/
4256static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4257{
4258 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4259
4260 ipr_cmd->job_step = ipr_set_supported_devs;
4261 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4262 struct ipr_resource_entry, queue);
4263
4264 if (ioa_cfg->cache_state != CACHE_DISABLED)
4265 return IPR_RC_JOB_CONTINUE;
4266
4267 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4268 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4269 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4270 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4271
4272 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4273
4274 return IPR_RC_JOB_RETURN;
4275}
4276
1da177e4
LT
4277/**
4278 * ipr_get_mode_page - Locate specified mode page
4279 * @mode_pages: mode page buffer
4280 * @page_code: page code to find
4281 * @len: minimum required length for mode page
4282 *
4283 * Return value:
4284 * pointer to mode page / NULL on failure
4285 **/
4286static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4287 u32 page_code, u32 len)
4288{
4289 struct ipr_mode_page_hdr *mode_hdr;
4290 u32 page_length;
4291 u32 length;
4292
4293 if (!mode_pages || (mode_pages->hdr.length == 0))
4294 return NULL;
4295
4296 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4297 mode_hdr = (struct ipr_mode_page_hdr *)
4298 (mode_pages->data + mode_pages->hdr.block_desc_len);
4299
4300 while (length) {
4301 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4302 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4303 return mode_hdr;
4304 break;
4305 } else {
4306 page_length = (sizeof(struct ipr_mode_page_hdr) +
4307 mode_hdr->page_length);
4308 length -= page_length;
4309 mode_hdr = (struct ipr_mode_page_hdr *)
4310 ((unsigned long)mode_hdr + page_length);
4311 }
4312 }
4313 return NULL;
4314}
4315
4316/**
4317 * ipr_check_term_power - Check for term power errors
4318 * @ioa_cfg: ioa config struct
4319 * @mode_pages: IOAFP mode pages buffer
4320 *
4321 * Check the IOAFP's mode page 28 for term power errors
4322 *
4323 * Return value:
4324 * nothing
4325 **/
4326static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4327 struct ipr_mode_pages *mode_pages)
4328{
4329 int i;
4330 int entry_length;
4331 struct ipr_dev_bus_entry *bus;
4332 struct ipr_mode_page28 *mode_page;
4333
4334 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4335 sizeof(struct ipr_mode_page28));
4336
4337 entry_length = mode_page->entry_length;
4338
4339 bus = mode_page->bus;
4340
4341 for (i = 0; i < mode_page->num_entries; i++) {
4342 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4343 dev_err(&ioa_cfg->pdev->dev,
4344 "Term power is absent on scsi bus %d\n",
4345 bus->res_addr.bus);
4346 }
4347
4348 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4349 }
4350}
4351
4352/**
4353 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4354 * @ioa_cfg: ioa config struct
4355 *
4356 * Looks through the config table checking for SES devices. If
4357 * the SES device is in the SES table indicating a maximum SCSI
4358 * bus speed, the speed is limited for the bus.
4359 *
4360 * Return value:
4361 * none
4362 **/
4363static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4364{
4365 u32 max_xfer_rate;
4366 int i;
4367
4368 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4369 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4370 ioa_cfg->bus_attr[i].bus_width);
4371
4372 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4373 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4374 }
4375}
4376
4377/**
4378 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4379 * @ioa_cfg: ioa config struct
4380 * @mode_pages: mode page 28 buffer
4381 *
4382 * Updates mode page 28 based on driver configuration
4383 *
4384 * Return value:
4385 * none
4386 **/
4387static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4388 struct ipr_mode_pages *mode_pages)
4389{
4390 int i, entry_length;
4391 struct ipr_dev_bus_entry *bus;
4392 struct ipr_bus_attributes *bus_attr;
4393 struct ipr_mode_page28 *mode_page;
4394
4395 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4396 sizeof(struct ipr_mode_page28));
4397
4398 entry_length = mode_page->entry_length;
4399
4400 /* Loop for each device bus entry */
4401 for (i = 0, bus = mode_page->bus;
4402 i < mode_page->num_entries;
4403 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4404 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4405 dev_err(&ioa_cfg->pdev->dev,
4406 "Invalid resource address reported: 0x%08X\n",
4407 IPR_GET_PHYS_LOC(bus->res_addr));
4408 continue;
4409 }
4410
4411 bus_attr = &ioa_cfg->bus_attr[i];
4412 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4413 bus->bus_width = bus_attr->bus_width;
4414 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4415 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4416 if (bus_attr->qas_enabled)
4417 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4418 else
4419 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4420 }
4421}
4422
4423/**
4424 * ipr_build_mode_select - Build a mode select command
4425 * @ipr_cmd: ipr command struct
4426 * @res_handle: resource handle to send command to
4427 * @parm: Byte 2 of Mode Sense command
4428 * @dma_addr: DMA buffer address
4429 * @xfer_len: data transfer length
4430 *
4431 * Return value:
4432 * none
4433 **/
4434static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4435 __be32 res_handle, u8 parm, u32 dma_addr,
4436 u8 xfer_len)
4437{
4438 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4439 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4440
4441 ioarcb->res_handle = res_handle;
4442 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4443 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4444 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4445 ioarcb->cmd_pkt.cdb[1] = parm;
4446 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4447
4448 ioadl->flags_and_data_len =
4449 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4450 ioadl->address = cpu_to_be32(dma_addr);
4451 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4452 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4453}
4454
4455/**
4456 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4457 * @ipr_cmd: ipr command struct
4458 *
4459 * This function sets up the SCSI bus attributes and sends
4460 * a Mode Select for Page 28 to activate them.
4461 *
4462 * Return value:
4463 * IPR_RC_JOB_RETURN
4464 **/
4465static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4466{
4467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4468 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4469 int length;
4470
4471 ENTER;
4472 if (ioa_cfg->saved_mode_pages) {
4473 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4474 ioa_cfg->saved_mode_page_len);
4475 length = ioa_cfg->saved_mode_page_len;
4476 } else {
4477 ipr_scsi_bus_speed_limit(ioa_cfg);
4478 ipr_check_term_power(ioa_cfg, mode_pages);
4479 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4480 length = mode_pages->hdr.length + 1;
4481 mode_pages->hdr.length = 0;
4482 }
4483
4484 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4485 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4486 length);
4487
62275040 4488 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
4489 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4490
4491 LEAVE;
4492 return IPR_RC_JOB_RETURN;
4493}
4494
4495/**
4496 * ipr_build_mode_sense - Builds a mode sense command
4497 * @ipr_cmd: ipr command struct
4498 * @res: resource entry struct
4499 * @parm: Byte 2 of mode sense command
4500 * @dma_addr: DMA address of mode sense buffer
4501 * @xfer_len: Size of DMA buffer
4502 *
4503 * Return value:
4504 * none
4505 **/
4506static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4507 __be32 res_handle,
4508 u8 parm, u32 dma_addr, u8 xfer_len)
4509{
4510 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4511 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4512
4513 ioarcb->res_handle = res_handle;
4514 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4515 ioarcb->cmd_pkt.cdb[2] = parm;
4516 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4517 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4518
4519 ioadl->flags_and_data_len =
4520 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4521 ioadl->address = cpu_to_be32(dma_addr);
4522 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4523 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4524}
4525
4526/**
4527 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4528 * @ipr_cmd: ipr command struct
4529 *
4530 * This function send a Page 28 mode sense to the IOA to
4531 * retrieve SCSI bus attributes.
4532 *
4533 * Return value:
4534 * IPR_RC_JOB_RETURN
4535 **/
4536static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4537{
4538 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4539
4540 ENTER;
4541 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4542 0x28, ioa_cfg->vpd_cbs_dma +
4543 offsetof(struct ipr_misc_cbs, mode_pages),
4544 sizeof(struct ipr_mode_pages));
4545
4546 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4547
4548 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4549
4550 LEAVE;
4551 return IPR_RC_JOB_RETURN;
4552}
4553
4554/**
4555 * ipr_init_res_table - Initialize the resource table
4556 * @ipr_cmd: ipr command struct
4557 *
4558 * This function looks through the existing resource table, comparing
4559 * it with the config table. This function will take care of old/new
4560 * devices and schedule adding/removing them from the mid-layer
4561 * as appropriate.
4562 *
4563 * Return value:
4564 * IPR_RC_JOB_CONTINUE
4565 **/
4566static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4567{
4568 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4569 struct ipr_resource_entry *res, *temp;
4570 struct ipr_config_table_entry *cfgte;
4571 int found, i;
4572 LIST_HEAD(old_res);
4573
4574 ENTER;
4575 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4576 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4577
4578 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4579 list_move_tail(&res->queue, &old_res);
4580
4581 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4582 cfgte = &ioa_cfg->cfg_table->dev[i];
4583 found = 0;
4584
4585 list_for_each_entry_safe(res, temp, &old_res, queue) {
4586 if (!memcmp(&res->cfgte.res_addr,
4587 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4588 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4589 found = 1;
4590 break;
4591 }
4592 }
4593
4594 if (!found) {
4595 if (list_empty(&ioa_cfg->free_res_q)) {
4596 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4597 break;
4598 }
4599
4600 found = 1;
4601 res = list_entry(ioa_cfg->free_res_q.next,
4602 struct ipr_resource_entry, queue);
4603 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4604 ipr_init_res_entry(res);
4605 res->add_to_ml = 1;
4606 }
4607
4608 if (found)
4609 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4610 }
4611
4612 list_for_each_entry_safe(res, temp, &old_res, queue) {
4613 if (res->sdev) {
4614 res->del_from_ml = 1;
4615 res->sdev->hostdata = NULL;
4616 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4617 } else {
4618 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4619 }
4620 }
4621
4622 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4623
4624 LEAVE;
4625 return IPR_RC_JOB_CONTINUE;
4626}
4627
4628/**
4629 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4630 * @ipr_cmd: ipr command struct
4631 *
4632 * This function sends a Query IOA Configuration command
4633 * to the adapter to retrieve the IOA configuration table.
4634 *
4635 * Return value:
4636 * IPR_RC_JOB_RETURN
4637 **/
4638static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4639{
4640 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4641 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4642 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4643 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4644
4645 ENTER;
4646 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4647 ucode_vpd->major_release, ucode_vpd->card_type,
4648 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4649 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4650 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4651
4652 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4653 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4654 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4655
4656 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4657 ioarcb->read_data_transfer_length =
4658 cpu_to_be32(sizeof(struct ipr_config_table));
4659
4660 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4661 ioadl->flags_and_data_len =
4662 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4663
4664 ipr_cmd->job_step = ipr_init_res_table;
4665
4666 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4667
4668 LEAVE;
4669 return IPR_RC_JOB_RETURN;
4670}
4671
4672/**
4673 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4674 * @ipr_cmd: ipr command struct
4675 *
4676 * This utility function sends an inquiry to the adapter.
4677 *
4678 * Return value:
4679 * none
4680 **/
4681static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4682 u32 dma_addr, u8 xfer_len)
4683{
4684 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4685 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4686
4687 ENTER;
4688 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4689 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4690
4691 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4692 ioarcb->cmd_pkt.cdb[1] = flags;
4693 ioarcb->cmd_pkt.cdb[2] = page;
4694 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4695
4696 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4697 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4698
4699 ioadl->address = cpu_to_be32(dma_addr);
4700 ioadl->flags_and_data_len =
4701 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4702
4703 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4704 LEAVE;
4705}
4706
62275040
BK
4707/**
4708 * ipr_inquiry_page_supported - Is the given inquiry page supported
4709 * @page0: inquiry page 0 buffer
4710 * @page: page code.
4711 *
4712 * This function determines if the specified inquiry page is supported.
4713 *
4714 * Return value:
4715 * 1 if page is supported / 0 if not
4716 **/
4717static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4718{
4719 int i;
4720
4721 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4722 if (page0->page[i] == page)
4723 return 1;
4724
4725 return 0;
4726}
4727
1da177e4
LT
4728/**
4729 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4730 * @ipr_cmd: ipr command struct
4731 *
4732 * This function sends a Page 3 inquiry to the adapter
4733 * to retrieve software VPD information.
4734 *
4735 * Return value:
4736 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4737 **/
4738static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
4739{
4740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4741 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4742
4743 ENTER;
4744
4745 if (!ipr_inquiry_page_supported(page0, 1))
4746 ioa_cfg->cache_state = CACHE_NONE;
4747
4748 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4749
4750 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4751 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4752 sizeof(struct ipr_inquiry_page3));
4753
4754 LEAVE;
4755 return IPR_RC_JOB_RETURN;
4756}
4757
4758/**
4759 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4760 * @ipr_cmd: ipr command struct
4761 *
4762 * This function sends a Page 0 inquiry to the adapter
4763 * to retrieve supported inquiry pages.
4764 *
4765 * Return value:
4766 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4767 **/
4768static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
4769{
4770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4771 char type[5];
4772
4773 ENTER;
4774
4775 /* Grab the type out of the VPD and store it away */
4776 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4777 type[4] = '\0';
4778 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4779
62275040 4780 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 4781
62275040
BK
4782 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4783 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4784 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
4785
4786 LEAVE;
4787 return IPR_RC_JOB_RETURN;
4788}
4789
4790/**
4791 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4792 * @ipr_cmd: ipr command struct
4793 *
4794 * This function sends a standard inquiry to the adapter.
4795 *
4796 * Return value:
4797 * IPR_RC_JOB_RETURN
4798 **/
4799static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4800{
4801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4802
4803 ENTER;
62275040 4804 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
4805
4806 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4807 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4808 sizeof(struct ipr_ioa_vpd));
4809
4810 LEAVE;
4811 return IPR_RC_JOB_RETURN;
4812}
4813
4814/**
4815 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4816 * @ipr_cmd: ipr command struct
4817 *
4818 * This function send an Identify Host Request Response Queue
4819 * command to establish the HRRQ with the adapter.
4820 *
4821 * Return value:
4822 * IPR_RC_JOB_RETURN
4823 **/
4824static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4825{
4826 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4827 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4828
4829 ENTER;
4830 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4831
4832 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4833 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4834
4835 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4836 ioarcb->cmd_pkt.cdb[2] =
4837 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4838 ioarcb->cmd_pkt.cdb[3] =
4839 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4840 ioarcb->cmd_pkt.cdb[4] =
4841 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4842 ioarcb->cmd_pkt.cdb[5] =
4843 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4844 ioarcb->cmd_pkt.cdb[7] =
4845 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4846 ioarcb->cmd_pkt.cdb[8] =
4847 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4848
4849 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4850
4851 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4852
4853 LEAVE;
4854 return IPR_RC_JOB_RETURN;
4855}
4856
4857/**
4858 * ipr_reset_timer_done - Adapter reset timer function
4859 * @ipr_cmd: ipr command struct
4860 *
4861 * Description: This function is used in adapter reset processing
4862 * for timing events. If the reset_cmd pointer in the IOA
4863 * config struct is not this adapter's we are doing nested
4864 * resets and fail_all_ops will take care of freeing the
4865 * command block.
4866 *
4867 * Return value:
4868 * none
4869 **/
4870static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4871{
4872 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4873 unsigned long lock_flags = 0;
4874
4875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4876
4877 if (ioa_cfg->reset_cmd == ipr_cmd) {
4878 list_del(&ipr_cmd->queue);
4879 ipr_cmd->done(ipr_cmd);
4880 }
4881
4882 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4883}
4884
4885/**
4886 * ipr_reset_start_timer - Start a timer for adapter reset job
4887 * @ipr_cmd: ipr command struct
4888 * @timeout: timeout value
4889 *
4890 * Description: This function is used in adapter reset processing
4891 * for timing events. If the reset_cmd pointer in the IOA
4892 * config struct is not this adapter's we are doing nested
4893 * resets and fail_all_ops will take care of freeing the
4894 * command block.
4895 *
4896 * Return value:
4897 * none
4898 **/
4899static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4900 unsigned long timeout)
4901{
4902 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4903 ipr_cmd->done = ipr_reset_ioa_job;
4904
4905 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4906 ipr_cmd->timer.expires = jiffies + timeout;
4907 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4908 add_timer(&ipr_cmd->timer);
4909}
4910
4911/**
4912 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4913 * @ioa_cfg: ioa cfg struct
4914 *
4915 * Return value:
4916 * nothing
4917 **/
4918static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4919{
4920 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4921
4922 /* Initialize Host RRQ pointers */
4923 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4924 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4925 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4926 ioa_cfg->toggle_bit = 1;
4927
4928 /* Zero out config table */
4929 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4930}
4931
4932/**
4933 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4934 * @ipr_cmd: ipr command struct
4935 *
4936 * This function reinitializes some control blocks and
4937 * enables destructive diagnostics on the adapter.
4938 *
4939 * Return value:
4940 * IPR_RC_JOB_RETURN
4941 **/
4942static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4943{
4944 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4945 volatile u32 int_reg;
4946
4947 ENTER;
4948 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4949 ipr_init_ioa_mem(ioa_cfg);
4950
4951 ioa_cfg->allow_interrupts = 1;
4952 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4953
4954 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4955 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4956 ioa_cfg->regs.clr_interrupt_mask_reg);
4957 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4958 return IPR_RC_JOB_CONTINUE;
4959 }
4960
4961 /* Enable destructive diagnostics on IOA */
4962 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4963
4964 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4965 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4966
4967 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4968
4969 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4970 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4971 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4972 ipr_cmd->done = ipr_reset_ioa_job;
4973 add_timer(&ipr_cmd->timer);
4974 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4975
4976 LEAVE;
4977 return IPR_RC_JOB_RETURN;
4978}
4979
4980/**
4981 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4982 * @ipr_cmd: ipr command struct
4983 *
4984 * This function is invoked when an adapter dump has run out
4985 * of processing time.
4986 *
4987 * Return value:
4988 * IPR_RC_JOB_CONTINUE
4989 **/
4990static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4991{
4992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4993
4994 if (ioa_cfg->sdt_state == GET_DUMP)
4995 ioa_cfg->sdt_state = ABORT_DUMP;
4996
4997 ipr_cmd->job_step = ipr_reset_alert;
4998
4999 return IPR_RC_JOB_CONTINUE;
5000}
5001
5002/**
5003 * ipr_unit_check_no_data - Log a unit check/no data error log
5004 * @ioa_cfg: ioa config struct
5005 *
5006 * Logs an error indicating the adapter unit checked, but for some
5007 * reason, we were unable to fetch the unit check buffer.
5008 *
5009 * Return value:
5010 * nothing
5011 **/
5012static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5013{
5014 ioa_cfg->errors_logged++;
5015 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5016}
5017
5018/**
5019 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5020 * @ioa_cfg: ioa config struct
5021 *
5022 * Fetches the unit check buffer from the adapter by clocking the data
5023 * through the mailbox register.
5024 *
5025 * Return value:
5026 * nothing
5027 **/
5028static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5029{
5030 unsigned long mailbox;
5031 struct ipr_hostrcb *hostrcb;
5032 struct ipr_uc_sdt sdt;
5033 int rc, length;
5034
5035 mailbox = readl(ioa_cfg->ioa_mailbox);
5036
5037 if (!ipr_sdt_is_fmt2(mailbox)) {
5038 ipr_unit_check_no_data(ioa_cfg);
5039 return;
5040 }
5041
5042 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5043 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5044 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5045
5046 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5047 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5048 ipr_unit_check_no_data(ioa_cfg);
5049 return;
5050 }
5051
5052 /* Find length of the first sdt entry (UC buffer) */
5053 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5054 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5055
5056 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5057 struct ipr_hostrcb, queue);
5058 list_del(&hostrcb->queue);
5059 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5060
5061 rc = ipr_get_ldump_data_section(ioa_cfg,
5062 be32_to_cpu(sdt.entry[0].bar_str_offset),
5063 (__be32 *)&hostrcb->hcam,
5064 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5065
5066 if (!rc)
5067 ipr_handle_log_data(ioa_cfg, hostrcb);
5068 else
5069 ipr_unit_check_no_data(ioa_cfg);
5070
5071 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5072}
5073
5074/**
5075 * ipr_reset_restore_cfg_space - Restore PCI config space.
5076 * @ipr_cmd: ipr command struct
5077 *
5078 * Description: This function restores the saved PCI config space of
5079 * the adapter, fails all outstanding ops back to the callers, and
5080 * fetches the dump/unit check if applicable to this reset.
5081 *
5082 * Return value:
5083 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5084 **/
5085static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5086{
5087 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5088 int rc;
5089
5090 ENTER;
b30197d2 5091 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5092 rc = pci_restore_state(ioa_cfg->pdev);
5093
5094 if (rc != PCIBIOS_SUCCESSFUL) {
5095 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5096 return IPR_RC_JOB_CONTINUE;
5097 }
5098
5099 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5100 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5101 return IPR_RC_JOB_CONTINUE;
5102 }
5103
5104 ipr_fail_all_ops(ioa_cfg);
5105
5106 if (ioa_cfg->ioa_unit_checked) {
5107 ioa_cfg->ioa_unit_checked = 0;
5108 ipr_get_unit_check_buffer(ioa_cfg);
5109 ipr_cmd->job_step = ipr_reset_alert;
5110 ipr_reset_start_timer(ipr_cmd, 0);
5111 return IPR_RC_JOB_RETURN;
5112 }
5113
5114 if (ioa_cfg->in_ioa_bringdown) {
5115 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5116 } else {
5117 ipr_cmd->job_step = ipr_reset_enable_ioa;
5118
5119 if (GET_DUMP == ioa_cfg->sdt_state) {
5120 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5121 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5122 schedule_work(&ioa_cfg->work_q);
5123 return IPR_RC_JOB_RETURN;
5124 }
5125 }
5126
5127 ENTER;
5128 return IPR_RC_JOB_CONTINUE;
5129}
5130
5131/**
5132 * ipr_reset_start_bist - Run BIST on the adapter.
5133 * @ipr_cmd: ipr command struct
5134 *
5135 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5136 *
5137 * Return value:
5138 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5139 **/
5140static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5141{
5142 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5143 int rc;
5144
5145 ENTER;
b30197d2 5146 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5147 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5148
5149 if (rc != PCIBIOS_SUCCESSFUL) {
5150 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5151 rc = IPR_RC_JOB_CONTINUE;
5152 } else {
5153 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5154 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5155 rc = IPR_RC_JOB_RETURN;
5156 }
5157
5158 LEAVE;
5159 return rc;
5160}
5161
5162/**
5163 * ipr_reset_allowed - Query whether or not IOA can be reset
5164 * @ioa_cfg: ioa config struct
5165 *
5166 * Return value:
5167 * 0 if reset not allowed / non-zero if reset is allowed
5168 **/
5169static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5170{
5171 volatile u32 temp_reg;
5172
5173 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5174 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5175}
5176
5177/**
5178 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5179 * @ipr_cmd: ipr command struct
5180 *
5181 * Description: This function waits for adapter permission to run BIST,
5182 * then runs BIST. If the adapter does not give permission after a
5183 * reasonable time, we will reset the adapter anyway. The impact of
5184 * resetting the adapter without warning the adapter is the risk of
5185 * losing the persistent error log on the adapter. If the adapter is
5186 * reset while it is writing to the flash on the adapter, the flash
5187 * segment will have bad ECC and be zeroed.
5188 *
5189 * Return value:
5190 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5191 **/
5192static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5193{
5194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5195 int rc = IPR_RC_JOB_RETURN;
5196
5197 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5198 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5199 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5200 } else {
5201 ipr_cmd->job_step = ipr_reset_start_bist;
5202 rc = IPR_RC_JOB_CONTINUE;
5203 }
5204
5205 return rc;
5206}
5207
5208/**
5209 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5210 * @ipr_cmd: ipr command struct
5211 *
5212 * Description: This function alerts the adapter that it will be reset.
5213 * If memory space is not currently enabled, proceed directly
5214 * to running BIST on the adapter. The timer must always be started
5215 * so we guarantee we do not run BIST from ipr_isr.
5216 *
5217 * Return value:
5218 * IPR_RC_JOB_RETURN
5219 **/
5220static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5221{
5222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5223 u16 cmd_reg;
5224 int rc;
5225
5226 ENTER;
5227 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5228
5229 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5230 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5231 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5232 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5233 } else {
5234 ipr_cmd->job_step = ipr_reset_start_bist;
5235 }
5236
5237 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5238 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5239
5240 LEAVE;
5241 return IPR_RC_JOB_RETURN;
5242}
5243
5244/**
5245 * ipr_reset_ucode_download_done - Microcode download completion
5246 * @ipr_cmd: ipr command struct
5247 *
5248 * Description: This function unmaps the microcode download buffer.
5249 *
5250 * Return value:
5251 * IPR_RC_JOB_CONTINUE
5252 **/
5253static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5254{
5255 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5256 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5257
5258 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5259 sglist->num_sg, DMA_TO_DEVICE);
5260
5261 ipr_cmd->job_step = ipr_reset_alert;
5262 return IPR_RC_JOB_CONTINUE;
5263}
5264
5265/**
5266 * ipr_reset_ucode_download - Download microcode to the adapter
5267 * @ipr_cmd: ipr command struct
5268 *
5269 * Description: This function checks to see if it there is microcode
5270 * to download to the adapter. If there is, a download is performed.
5271 *
5272 * Return value:
5273 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5274 **/
5275static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5276{
5277 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5278 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5279
5280 ENTER;
5281 ipr_cmd->job_step = ipr_reset_alert;
5282
5283 if (!sglist)
5284 return IPR_RC_JOB_CONTINUE;
5285
5286 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5287 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5288 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5289 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5290 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5291 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5292 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5293
5294 if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5295 dev_err(&ioa_cfg->pdev->dev,
5296 "Failed to map microcode download buffer\n");
5297 return IPR_RC_JOB_CONTINUE;
5298 }
5299
5300 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5301
5302 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5303 IPR_WRITE_BUFFER_TIMEOUT);
5304
5305 LEAVE;
5306 return IPR_RC_JOB_RETURN;
5307}
5308
5309/**
5310 * ipr_reset_shutdown_ioa - Shutdown the adapter
5311 * @ipr_cmd: ipr command struct
5312 *
5313 * Description: This function issues an adapter shutdown of the
5314 * specified type to the specified adapter as part of the
5315 * adapter reset job.
5316 *
5317 * Return value:
5318 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5319 **/
5320static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5321{
5322 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5323 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5324 unsigned long timeout;
5325 int rc = IPR_RC_JOB_CONTINUE;
5326
5327 ENTER;
5328 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5329 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5330 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5331 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5332 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5333
5334 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5335 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5336 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5337 timeout = IPR_INTERNAL_TIMEOUT;
5338 else
5339 timeout = IPR_SHUTDOWN_TIMEOUT;
5340
5341 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5342
5343 rc = IPR_RC_JOB_RETURN;
5344 ipr_cmd->job_step = ipr_reset_ucode_download;
5345 } else
5346 ipr_cmd->job_step = ipr_reset_alert;
5347
5348 LEAVE;
5349 return rc;
5350}
5351
5352/**
5353 * ipr_reset_ioa_job - Adapter reset job
5354 * @ipr_cmd: ipr command struct
5355 *
5356 * Description: This function is the job router for the adapter reset job.
5357 *
5358 * Return value:
5359 * none
5360 **/
5361static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5362{
5363 u32 rc, ioasc;
5364 unsigned long scratch = ipr_cmd->u.scratch;
5365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5366
5367 do {
5368 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5369
5370 if (ioa_cfg->reset_cmd != ipr_cmd) {
5371 /*
5372 * We are doing nested adapter resets and this is
5373 * not the current reset job.
5374 */
5375 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5376 return;
5377 }
5378
5379 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5380 dev_err(&ioa_cfg->pdev->dev,
5381 "0x%02X failed with IOASC: 0x%08X\n",
5382 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5383
5384 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5385 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5386 return;
5387 }
5388
5389 ipr_reinit_ipr_cmnd(ipr_cmd);
5390 ipr_cmd->u.scratch = scratch;
5391 rc = ipr_cmd->job_step(ipr_cmd);
5392 } while(rc == IPR_RC_JOB_CONTINUE);
5393}
5394
5395/**
5396 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5397 * @ioa_cfg: ioa config struct
5398 * @job_step: first job step of reset job
5399 * @shutdown_type: shutdown type
5400 *
5401 * Description: This function will initiate the reset of the given adapter
5402 * starting at the selected job step.
5403 * If the caller needs to wait on the completion of the reset,
5404 * the caller must sleep on the reset_wait_q.
5405 *
5406 * Return value:
5407 * none
5408 **/
5409static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5410 int (*job_step) (struct ipr_cmnd *),
5411 enum ipr_shutdown_type shutdown_type)
5412{
5413 struct ipr_cmnd *ipr_cmd;
5414
5415 ioa_cfg->in_reset_reload = 1;
5416 ioa_cfg->allow_cmds = 0;
5417 scsi_block_requests(ioa_cfg->host);
5418
5419 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5420 ioa_cfg->reset_cmd = ipr_cmd;
5421 ipr_cmd->job_step = job_step;
5422 ipr_cmd->u.shutdown_type = shutdown_type;
5423
5424 ipr_reset_ioa_job(ipr_cmd);
5425}
5426
5427/**
5428 * ipr_initiate_ioa_reset - Initiate an adapter reset
5429 * @ioa_cfg: ioa config struct
5430 * @shutdown_type: shutdown type
5431 *
5432 * Description: This function will initiate the reset of the given adapter.
5433 * If the caller needs to wait on the completion of the reset,
5434 * the caller must sleep on the reset_wait_q.
5435 *
5436 * Return value:
5437 * none
5438 **/
5439static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5440 enum ipr_shutdown_type shutdown_type)
5441{
5442 if (ioa_cfg->ioa_is_dead)
5443 return;
5444
5445 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5446 ioa_cfg->sdt_state = ABORT_DUMP;
5447
5448 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5449 dev_err(&ioa_cfg->pdev->dev,
5450 "IOA taken offline - error recovery failed\n");
5451
5452 ioa_cfg->reset_retries = 0;
5453 ioa_cfg->ioa_is_dead = 1;
5454
5455 if (ioa_cfg->in_ioa_bringdown) {
5456 ioa_cfg->reset_cmd = NULL;
5457 ioa_cfg->in_reset_reload = 0;
5458 ipr_fail_all_ops(ioa_cfg);
5459 wake_up_all(&ioa_cfg->reset_wait_q);
5460
5461 spin_unlock_irq(ioa_cfg->host->host_lock);
5462 scsi_unblock_requests(ioa_cfg->host);
5463 spin_lock_irq(ioa_cfg->host->host_lock);
5464 return;
5465 } else {
5466 ioa_cfg->in_ioa_bringdown = 1;
5467 shutdown_type = IPR_SHUTDOWN_NONE;
5468 }
5469 }
5470
5471 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5472 shutdown_type);
5473}
5474
5475/**
5476 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5477 * @ioa_cfg: ioa cfg struct
5478 *
5479 * Description: This is the second phase of adapter intialization
5480 * This function takes care of initilizing the adapter to the point
5481 * where it can accept new commands.
5482
5483 * Return value:
5484 * 0 on sucess / -EIO on failure
5485 **/
5486static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5487{
5488 int rc = 0;
5489 unsigned long host_lock_flags = 0;
5490
5491 ENTER;
5492 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5493 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5494 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5495
5496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5497 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5498 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5499
5500 if (ioa_cfg->ioa_is_dead) {
5501 rc = -EIO;
5502 } else if (ipr_invalid_adapter(ioa_cfg)) {
5503 if (!ipr_testmode)
5504 rc = -EIO;
5505
5506 dev_err(&ioa_cfg->pdev->dev,
5507 "Adapter not supported in this hardware configuration.\n");
5508 }
5509
5510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5511
5512 LEAVE;
5513 return rc;
5514}
5515
5516/**
5517 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5518 * @ioa_cfg: ioa config struct
5519 *
5520 * Return value:
5521 * none
5522 **/
5523static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5524{
5525 int i;
5526
5527 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5528 if (ioa_cfg->ipr_cmnd_list[i])
5529 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5530 ioa_cfg->ipr_cmnd_list[i],
5531 ioa_cfg->ipr_cmnd_list_dma[i]);
5532
5533 ioa_cfg->ipr_cmnd_list[i] = NULL;
5534 }
5535
5536 if (ioa_cfg->ipr_cmd_pool)
5537 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5538
5539 ioa_cfg->ipr_cmd_pool = NULL;
5540}
5541
5542/**
5543 * ipr_free_mem - Frees memory allocated for an adapter
5544 * @ioa_cfg: ioa cfg struct
5545 *
5546 * Return value:
5547 * nothing
5548 **/
5549static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5550{
5551 int i;
5552
5553 kfree(ioa_cfg->res_entries);
5554 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5555 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5556 ipr_free_cmd_blks(ioa_cfg);
5557 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5558 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5559 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5560 ioa_cfg->cfg_table,
5561 ioa_cfg->cfg_table_dma);
5562
5563 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5564 pci_free_consistent(ioa_cfg->pdev,
5565 sizeof(struct ipr_hostrcb),
5566 ioa_cfg->hostrcb[i],
5567 ioa_cfg->hostrcb_dma[i]);
5568 }
5569
5570 ipr_free_dump(ioa_cfg);
5571 kfree(ioa_cfg->saved_mode_pages);
5572 kfree(ioa_cfg->trace);
5573}
5574
5575/**
5576 * ipr_free_all_resources - Free all allocated resources for an adapter.
5577 * @ipr_cmd: ipr command struct
5578 *
5579 * This function frees all allocated resources for the
5580 * specified adapter.
5581 *
5582 * Return value:
5583 * none
5584 **/
5585static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5586{
5587 struct pci_dev *pdev = ioa_cfg->pdev;
5588
5589 ENTER;
5590 free_irq(pdev->irq, ioa_cfg);
5591 iounmap(ioa_cfg->hdw_dma_regs);
5592 pci_release_regions(pdev);
5593 ipr_free_mem(ioa_cfg);
5594 scsi_host_put(ioa_cfg->host);
5595 pci_disable_device(pdev);
5596 LEAVE;
5597}
5598
5599/**
5600 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5601 * @ioa_cfg: ioa config struct
5602 *
5603 * Return value:
5604 * 0 on success / -ENOMEM on allocation failure
5605 **/
5606static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5607{
5608 struct ipr_cmnd *ipr_cmd;
5609 struct ipr_ioarcb *ioarcb;
5610 dma_addr_t dma_addr;
5611 int i;
5612
5613 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5614 sizeof(struct ipr_cmnd), 8, 0);
5615
5616 if (!ioa_cfg->ipr_cmd_pool)
5617 return -ENOMEM;
5618
5619 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5620 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5621
5622 if (!ipr_cmd) {
5623 ipr_free_cmd_blks(ioa_cfg);
5624 return -ENOMEM;
5625 }
5626
5627 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5628 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5629 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5630
5631 ioarcb = &ipr_cmd->ioarcb;
5632 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5633 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5634 ioarcb->write_ioadl_addr =
5635 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5636 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5637 ioarcb->ioasa_host_pci_addr =
5638 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5639 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5640 ipr_cmd->cmd_index = i;
5641 ipr_cmd->ioa_cfg = ioa_cfg;
5642 ipr_cmd->sense_buffer_dma = dma_addr +
5643 offsetof(struct ipr_cmnd, sense_buffer);
5644
5645 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5646 }
5647
5648 return 0;
5649}
5650
5651/**
5652 * ipr_alloc_mem - Allocate memory for an adapter
5653 * @ioa_cfg: ioa config struct
5654 *
5655 * Return value:
5656 * 0 on success / non-zero for error
5657 **/
5658static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5659{
5660 struct pci_dev *pdev = ioa_cfg->pdev;
5661 int i, rc = -ENOMEM;
5662
5663 ENTER;
0bc42e35 5664 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
5665 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5666
5667 if (!ioa_cfg->res_entries)
5668 goto out;
5669
1da177e4
LT
5670 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5671 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5672
5673 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5674 sizeof(struct ipr_misc_cbs),
5675 &ioa_cfg->vpd_cbs_dma);
5676
5677 if (!ioa_cfg->vpd_cbs)
5678 goto out_free_res_entries;
5679
5680 if (ipr_alloc_cmd_blks(ioa_cfg))
5681 goto out_free_vpd_cbs;
5682
5683 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5684 sizeof(u32) * IPR_NUM_CMD_BLKS,
5685 &ioa_cfg->host_rrq_dma);
5686
5687 if (!ioa_cfg->host_rrq)
5688 goto out_ipr_free_cmd_blocks;
5689
5690 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5691 sizeof(struct ipr_config_table),
5692 &ioa_cfg->cfg_table_dma);
5693
5694 if (!ioa_cfg->cfg_table)
5695 goto out_free_host_rrq;
5696
5697 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5698 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5699 sizeof(struct ipr_hostrcb),
5700 &ioa_cfg->hostrcb_dma[i]);
5701
5702 if (!ioa_cfg->hostrcb[i])
5703 goto out_free_hostrcb_dma;
5704
5705 ioa_cfg->hostrcb[i]->hostrcb_dma =
5706 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5707 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5708 }
5709
0bc42e35 5710 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
5711 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5712
5713 if (!ioa_cfg->trace)
5714 goto out_free_hostrcb_dma;
5715
1da177e4
LT
5716 rc = 0;
5717out:
5718 LEAVE;
5719 return rc;
5720
5721out_free_hostrcb_dma:
5722 while (i-- > 0) {
5723 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5724 ioa_cfg->hostrcb[i],
5725 ioa_cfg->hostrcb_dma[i]);
5726 }
5727 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5728 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5729out_free_host_rrq:
5730 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5731 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5732out_ipr_free_cmd_blocks:
5733 ipr_free_cmd_blks(ioa_cfg);
5734out_free_vpd_cbs:
5735 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5736 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5737out_free_res_entries:
5738 kfree(ioa_cfg->res_entries);
5739 goto out;
5740}
5741
5742/**
5743 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5744 * @ioa_cfg: ioa config struct
5745 *
5746 * Return value:
5747 * none
5748 **/
5749static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5750{
5751 int i;
5752
5753 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5754 ioa_cfg->bus_attr[i].bus = i;
5755 ioa_cfg->bus_attr[i].qas_enabled = 0;
5756 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5757 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5758 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5759 else
5760 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5761 }
5762}
5763
5764/**
5765 * ipr_init_ioa_cfg - Initialize IOA config struct
5766 * @ioa_cfg: ioa config struct
5767 * @host: scsi host struct
5768 * @pdev: PCI dev struct
5769 *
5770 * Return value:
5771 * none
5772 **/
5773static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5774 struct Scsi_Host *host, struct pci_dev *pdev)
5775{
5776 const struct ipr_interrupt_offsets *p;
5777 struct ipr_interrupts *t;
5778 void __iomem *base;
5779
5780 ioa_cfg->host = host;
5781 ioa_cfg->pdev = pdev;
5782 ioa_cfg->log_level = ipr_log_level;
5783 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5784 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5785 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5786 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5787 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5788 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5789 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5790 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5791
5792 INIT_LIST_HEAD(&ioa_cfg->free_q);
5793 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5794 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5795 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5796 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5797 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5798 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5799 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5800 ioa_cfg->sdt_state = INACTIVE;
62275040
BK
5801 if (ipr_enable_cache)
5802 ioa_cfg->cache_state = CACHE_ENABLED;
5803 else
5804 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
5805
5806 ipr_initialize_bus_attr(ioa_cfg);
5807
5808 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5809 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5810 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5811 host->unique_id = host->host_no;
5812 host->max_cmd_len = IPR_MAX_CDB_LEN;
5813 pci_set_drvdata(pdev, ioa_cfg);
5814
5815 p = &ioa_cfg->chip_cfg->regs;
5816 t = &ioa_cfg->regs;
5817 base = ioa_cfg->hdw_dma_regs;
5818
5819 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5820 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5821 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5822 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5823 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5824 t->ioarrin_reg = base + p->ioarrin_reg;
5825 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5826 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5827 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5828}
5829
5830/**
5831 * ipr_get_chip_cfg - Find adapter chip configuration
5832 * @dev_id: PCI device id struct
5833 *
5834 * Return value:
5835 * ptr to chip config on success / NULL on failure
5836 **/
5837static const struct ipr_chip_cfg_t * __devinit
5838ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5839{
5840 int i;
5841
5842 if (dev_id->driver_data)
5843 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5844
5845 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5846 if (ipr_chip[i].vendor == dev_id->vendor &&
5847 ipr_chip[i].device == dev_id->device)
5848 return ipr_chip[i].cfg;
5849 return NULL;
5850}
5851
5852/**
5853 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5854 * @pdev: PCI device struct
5855 * @dev_id: PCI device id struct
5856 *
5857 * Return value:
5858 * 0 on success / non-zero on failure
5859 **/
5860static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5861 const struct pci_device_id *dev_id)
5862{
5863 struct ipr_ioa_cfg *ioa_cfg;
5864 struct Scsi_Host *host;
5865 unsigned long ipr_regs_pci;
5866 void __iomem *ipr_regs;
5867 u32 rc = PCIBIOS_SUCCESSFUL;
5868
5869 ENTER;
5870
5871 if ((rc = pci_enable_device(pdev))) {
5872 dev_err(&pdev->dev, "Cannot enable adapter\n");
5873 goto out;
5874 }
5875
5876 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5877
5878 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5879
5880 if (!host) {
5881 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5882 rc = -ENOMEM;
5883 goto out_disable;
5884 }
5885
5886 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5887 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5888
5889 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5890
5891 if (!ioa_cfg->chip_cfg) {
5892 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5893 dev_id->vendor, dev_id->device);
5894 goto out_scsi_host_put;
5895 }
5896
5897 ipr_regs_pci = pci_resource_start(pdev, 0);
5898
5899 rc = pci_request_regions(pdev, IPR_NAME);
5900 if (rc < 0) {
5901 dev_err(&pdev->dev,
5902 "Couldn't register memory range of registers\n");
5903 goto out_scsi_host_put;
5904 }
5905
5906 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5907
5908 if (!ipr_regs) {
5909 dev_err(&pdev->dev,
5910 "Couldn't map memory range of registers\n");
5911 rc = -ENOMEM;
5912 goto out_release_regions;
5913 }
5914
5915 ioa_cfg->hdw_dma_regs = ipr_regs;
5916 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5917 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5918
5919 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5920
5921 pci_set_master(pdev);
5922
5923 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5924 if (rc < 0) {
5925 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5926 goto cleanup_nomem;
5927 }
5928
5929 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5930 ioa_cfg->chip_cfg->cache_line_size);
5931
5932 if (rc != PCIBIOS_SUCCESSFUL) {
5933 dev_err(&pdev->dev, "Write of cache line size failed\n");
5934 rc = -EIO;
5935 goto cleanup_nomem;
5936 }
5937
5938 /* Save away PCI config space for use following IOA reset */
5939 rc = pci_save_state(pdev);
5940
5941 if (rc != PCIBIOS_SUCCESSFUL) {
5942 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5943 rc = -EIO;
5944 goto cleanup_nomem;
5945 }
5946
5947 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5948 goto cleanup_nomem;
5949
5950 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5951 goto cleanup_nomem;
5952
5953 rc = ipr_alloc_mem(ioa_cfg);
5954 if (rc < 0) {
5955 dev_err(&pdev->dev,
5956 "Couldn't allocate enough memory for device driver!\n");
5957 goto cleanup_nomem;
5958 }
5959
5960 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5961 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5962
5963 if (rc) {
5964 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5965 pdev->irq, rc);
5966 goto cleanup_nolog;
5967 }
5968
5969 spin_lock(&ipr_driver_lock);
5970 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5971 spin_unlock(&ipr_driver_lock);
5972
5973 LEAVE;
5974out:
5975 return rc;
5976
5977cleanup_nolog:
5978 ipr_free_mem(ioa_cfg);
5979cleanup_nomem:
5980 iounmap(ipr_regs);
5981out_release_regions:
5982 pci_release_regions(pdev);
5983out_scsi_host_put:
5984 scsi_host_put(host);
5985out_disable:
5986 pci_disable_device(pdev);
5987 goto out;
5988}
5989
5990/**
5991 * ipr_scan_vsets - Scans for VSET devices
5992 * @ioa_cfg: ioa config struct
5993 *
5994 * Description: Since the VSET resources do not follow SAM in that we can have
5995 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5996 *
5997 * Return value:
5998 * none
5999 **/
6000static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6001{
6002 int target, lun;
6003
6004 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6005 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6006 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6007}
6008
6009/**
6010 * ipr_initiate_ioa_bringdown - Bring down an adapter
6011 * @ioa_cfg: ioa config struct
6012 * @shutdown_type: shutdown type
6013 *
6014 * Description: This function will initiate bringing down the adapter.
6015 * This consists of issuing an IOA shutdown to the adapter
6016 * to flush the cache, and running BIST.
6017 * If the caller needs to wait on the completion of the reset,
6018 * the caller must sleep on the reset_wait_q.
6019 *
6020 * Return value:
6021 * none
6022 **/
6023static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6024 enum ipr_shutdown_type shutdown_type)
6025{
6026 ENTER;
6027 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6028 ioa_cfg->sdt_state = ABORT_DUMP;
6029 ioa_cfg->reset_retries = 0;
6030 ioa_cfg->in_ioa_bringdown = 1;
6031 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6032 LEAVE;
6033}
6034
6035/**
6036 * __ipr_remove - Remove a single adapter
6037 * @pdev: pci device struct
6038 *
6039 * Adapter hot plug remove entry point.
6040 *
6041 * Return value:
6042 * none
6043 **/
6044static void __ipr_remove(struct pci_dev *pdev)
6045{
6046 unsigned long host_lock_flags = 0;
6047 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6048 ENTER;
6049
6050 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6051 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6052
6053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6054 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 6055 flush_scheduled_work();
1da177e4
LT
6056 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6057
6058 spin_lock(&ipr_driver_lock);
6059 list_del(&ioa_cfg->queue);
6060 spin_unlock(&ipr_driver_lock);
6061
6062 if (ioa_cfg->sdt_state == ABORT_DUMP)
6063 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6065
6066 ipr_free_all_resources(ioa_cfg);
6067
6068 LEAVE;
6069}
6070
6071/**
6072 * ipr_remove - IOA hot plug remove entry point
6073 * @pdev: pci device struct
6074 *
6075 * Adapter hot plug remove entry point.
6076 *
6077 * Return value:
6078 * none
6079 **/
6080static void ipr_remove(struct pci_dev *pdev)
6081{
6082 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6083
6084 ENTER;
6085
1da177e4
LT
6086 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6087 &ipr_trace_attr);
6088 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6089 &ipr_dump_attr);
6090 scsi_remove_host(ioa_cfg->host);
6091
6092 __ipr_remove(pdev);
6093
6094 LEAVE;
6095}
6096
6097/**
6098 * ipr_probe - Adapter hot plug add entry point
6099 *
6100 * Return value:
6101 * 0 on success / non-zero on failure
6102 **/
6103static int __devinit ipr_probe(struct pci_dev *pdev,
6104 const struct pci_device_id *dev_id)
6105{
6106 struct ipr_ioa_cfg *ioa_cfg;
6107 int rc;
6108
6109 rc = ipr_probe_ioa(pdev, dev_id);
6110
6111 if (rc)
6112 return rc;
6113
6114 ioa_cfg = pci_get_drvdata(pdev);
6115 rc = ipr_probe_ioa_part2(ioa_cfg);
6116
6117 if (rc) {
6118 __ipr_remove(pdev);
6119 return rc;
6120 }
6121
6122 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6123
6124 if (rc) {
6125 __ipr_remove(pdev);
6126 return rc;
6127 }
6128
6129 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6130 &ipr_trace_attr);
6131
6132 if (rc) {
6133 scsi_remove_host(ioa_cfg->host);
6134 __ipr_remove(pdev);
6135 return rc;
6136 }
6137
6138 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6139 &ipr_dump_attr);
6140
6141 if (rc) {
6142 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6143 &ipr_trace_attr);
6144 scsi_remove_host(ioa_cfg->host);
6145 __ipr_remove(pdev);
6146 return rc;
6147 }
6148
6149 scsi_scan_host(ioa_cfg->host);
6150 ipr_scan_vsets(ioa_cfg);
6151 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6152 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 6153 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
6154 schedule_work(&ioa_cfg->work_q);
6155 return 0;
6156}
6157
6158/**
6159 * ipr_shutdown - Shutdown handler.
d18c3db5 6160 * @pdev: pci device struct
1da177e4
LT
6161 *
6162 * This function is invoked upon system shutdown/reboot. It will issue
6163 * an adapter shutdown to the adapter to flush the write cache.
6164 *
6165 * Return value:
6166 * none
6167 **/
d18c3db5 6168static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 6169{
d18c3db5 6170 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
6171 unsigned long lock_flags = 0;
6172
6173 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6174 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6176 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6177}
6178
6179static struct pci_device_id ipr_pci_table[] __devinitdata = {
6180 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6181 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6182 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6183 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6184 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6185 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6186 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6188 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6190 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6191 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6194 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6196 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6197 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6200 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6201 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6202 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6203 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6204 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6206 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6207 { }
6208};
6209MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6210
6211static struct pci_driver ipr_driver = {
6212 .name = IPR_NAME,
6213 .id_table = ipr_pci_table,
6214 .probe = ipr_probe,
6215 .remove = ipr_remove,
d18c3db5 6216 .shutdown = ipr_shutdown,
1da177e4
LT
6217};
6218
6219/**
6220 * ipr_init - Module entry point
6221 *
6222 * Return value:
6223 * 0 on success / negative value on failure
6224 **/
6225static int __init ipr_init(void)
6226{
6227 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6228 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6229
6230 return pci_module_init(&ipr_driver);
6231}
6232
6233/**
6234 * ipr_exit - Module unload
6235 *
6236 * Module unload entry point.
6237 *
6238 * Return value:
6239 * none
6240 **/
6241static void __exit ipr_exit(void)
6242{
6243 pci_unregister_driver(&ipr_driver);
6244}
6245
6246module_init(ipr_init);
6247module_exit(ipr_exit);