]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/pmcraid.c
[SCSI] qla2xxx: Update version number to 8.03.01-k9.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / pmcraid.c
CommitLineData
89a36810
AR
1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 *
729c8456
AR
4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
89a36810
AR
6 *
7 * Copyright (C) 2008, 2009 PMC Sierra Inc
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
22 * USA
23 *
24 */
25#include <linux/fs.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/kernel.h>
30#include <linux/ioport.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33#include <linux/wait.h>
34#include <linux/spinlock.h>
35#include <linux/sched.h>
36#include <linux/interrupt.h>
37#include <linux/blkdev.h>
38#include <linux/firmware.h>
39#include <linux/module.h>
40#include <linux/moduleparam.h>
41#include <linux/hdreg.h>
42#include <linux/version.h>
43#include <linux/io.h>
44#include <asm/irq.h>
45#include <asm/processor.h>
46#include <linux/libata.h>
47#include <linux/mutex.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_host.h>
34876402 50#include <scsi/scsi_device.h>
89a36810
AR
51#include <scsi/scsi_tcq.h>
52#include <scsi/scsi_eh.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsicam.h>
55
56#include "pmcraid.h"
57
58/*
59 * Module configuration parameters
60 */
61static unsigned int pmcraid_debug_log;
62static unsigned int pmcraid_disable_aen;
63static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
64
65/*
66 * Data structures to support multiple adapters by the LLD.
67 * pmcraid_adapter_count - count of configured adapters
68 */
69static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
70
71/*
72 * Supporting user-level control interface through IOCTL commands.
73 * pmcraid_major - major number to use
74 * pmcraid_minor - minor number(s) to use
75 */
76static unsigned int pmcraid_major;
77static struct class *pmcraid_class;
78DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
79
80/*
81 * Module parameters
82 */
729c8456 83MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
89a36810
AR
84MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
85MODULE_LICENSE("GPL");
86MODULE_VERSION(PMCRAID_DRIVER_VERSION);
87
88module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
89MODULE_PARM_DESC(log_level,
90 "Enables firmware error code logging, default :1 high-severity"
91 " errors, 2: all errors including high-severity errors,"
92 " 0: disables logging");
93
94module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
95MODULE_PARM_DESC(debug,
96 "Enable driver verbose message logging. Set 1 to enable."
97 "(default: 0)");
98
99module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
100MODULE_PARM_DESC(disable_aen,
101 "Disable driver aen notifications to apps. Set 1 to disable."
102 "(default: 0)");
103
104/* chip specific constants for PMC MaxRAID controllers (same for
105 * 0x5220 and 0x8010
106 */
107static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
108 {
109 .ioastatus = 0x0,
110 .ioarrin = 0x00040,
111 .mailbox = 0x7FC30,
112 .global_intr_mask = 0x00034,
113 .ioa_host_intr = 0x0009C,
114 .ioa_host_intr_clr = 0x000A0,
115 .ioa_host_mask = 0x7FC28,
116 .ioa_host_mask_clr = 0x7FC28,
117 .host_ioa_intr = 0x00020,
118 .host_ioa_intr_clr = 0x00020,
119 .transop_timeout = 300
120 }
121};
122
123/*
124 * PCI device ids supported by pmcraid driver
125 */
126static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
127 { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
128 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
129 },
130 {}
131};
132
133MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
134
135
136
137/**
138 * pmcraid_slave_alloc - Prepare for commands to a device
139 * @scsi_dev: scsi device struct
140 *
141 * This function is called by mid-layer prior to sending any command to the new
142 * device. Stores resource entry details of the device in scsi_device struct.
143 * Queuecommand uses the resource handle and other details to fill up IOARCB
144 * while sending commands to the device.
145 *
146 * Return value:
147 * 0 on success / -ENXIO if device does not exist
148 */
149static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
150{
151 struct pmcraid_resource_entry *temp, *res = NULL;
152 struct pmcraid_instance *pinstance;
153 u8 target, bus, lun;
154 unsigned long lock_flags;
155 int rc = -ENXIO;
156 pinstance = shost_priv(scsi_dev->host);
157
158 /* Driver exposes VSET and GSCSI resources only; all other device types
159 * are not exposed. Resource list is synchronized using resource lock
160 * so any traversal or modifications to the list should be done inside
161 * this lock
162 */
163 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
164 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
165
729c8456 166 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
89a36810
AR
167 if (RES_IS_VSET(temp->cfg_entry)) {
168 target = temp->cfg_entry.unique_flags1;
729c8456 169 if (target > PMCRAID_MAX_VSET_TARGETS)
89a36810
AR
170 continue;
171 bus = PMCRAID_VSET_BUS_ID;
172 lun = 0;
173 } else if (RES_IS_GSCSI(temp->cfg_entry)) {
174 target = RES_TARGET(temp->cfg_entry.resource_address);
175 bus = PMCRAID_PHYS_BUS_ID;
176 lun = RES_LUN(temp->cfg_entry.resource_address);
177 } else {
178 continue;
179 }
180
181 if (bus == scsi_dev->channel &&
182 target == scsi_dev->id &&
183 lun == scsi_dev->lun) {
184 res = temp;
185 break;
186 }
187 }
188
189 if (res) {
190 res->scsi_dev = scsi_dev;
191 scsi_dev->hostdata = res;
192 res->change_detected = 0;
193 atomic_set(&res->read_failures, 0);
194 atomic_set(&res->write_failures, 0);
195 rc = 0;
196 }
197 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
198 return rc;
199}
200
201/**
202 * pmcraid_slave_configure - Configures a SCSI device
203 * @scsi_dev: scsi device struct
204 *
205 * This fucntion is executed by SCSI mid layer just after a device is first
206 * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
207 * timeout value (default 30s) will be over-written to a higher value (60s)
208 * and max_sectors value will be over-written to 512. It also sets queue depth
209 * to host->cmd_per_lun value
210 *
211 * Return value:
212 * 0 on success
213 */
214static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
215{
216 struct pmcraid_resource_entry *res = scsi_dev->hostdata;
217
218 if (!res)
219 return 0;
220
221 /* LLD exposes VSETs and Enclosure devices only */
222 if (RES_IS_GSCSI(res->cfg_entry) &&
223 scsi_dev->type != TYPE_ENCLOSURE)
224 return -ENXIO;
225
226 pmcraid_info("configuring %x:%x:%x:%x\n",
227 scsi_dev->host->unique_id,
228 scsi_dev->channel,
229 scsi_dev->id,
230 scsi_dev->lun);
231
232 if (RES_IS_GSCSI(res->cfg_entry)) {
233 scsi_dev->allow_restart = 1;
234 } else if (RES_IS_VSET(res->cfg_entry)) {
235 scsi_dev->allow_restart = 1;
236 blk_queue_rq_timeout(scsi_dev->request_queue,
237 PMCRAID_VSET_IO_TIMEOUT);
238 blk_queue_max_sectors(scsi_dev->request_queue,
239 PMCRAID_VSET_MAX_SECTORS);
240 }
241
242 if (scsi_dev->tagged_supported &&
243 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
244 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
245 scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
246 scsi_dev->host->cmd_per_lun);
247 } else {
248 scsi_adjust_queue_depth(scsi_dev, 0,
249 scsi_dev->host->cmd_per_lun);
250 }
251
252 return 0;
253}
254
255/**
256 * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
257 *
258 * @scsi_dev: scsi device struct
259 *
260 * This is called by mid-layer before removing a device. Pointer assignments
261 * done in pmcraid_slave_alloc will be reset to NULL here.
262 *
263 * Return value
264 * none
265 */
266static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
267{
268 struct pmcraid_resource_entry *res;
269
270 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
271
272 if (res)
273 res->scsi_dev = NULL;
274
275 scsi_dev->hostdata = NULL;
276}
277
278/**
279 * pmcraid_change_queue_depth - Change the device's queue depth
280 * @scsi_dev: scsi device struct
281 * @depth: depth to set
e881a172 282 * @reason: calling context
89a36810
AR
283 *
284 * Return value
285 * actual depth set
286 */
e881a172
MC
287static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
288 int reason)
89a36810 289{
e881a172
MC
290 if (reason != SCSI_QDEPTH_DEFAULT)
291 return -EOPNOTSUPP;
292
89a36810
AR
293 if (depth > PMCRAID_MAX_CMD_PER_LUN)
294 depth = PMCRAID_MAX_CMD_PER_LUN;
295
296 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
297
298 return scsi_dev->queue_depth;
299}
300
301/**
302 * pmcraid_change_queue_type - Change the device's queue type
303 * @scsi_dev: scsi device struct
304 * @tag: type of tags to use
305 *
306 * Return value:
307 * actual queue type set
308 */
309static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
310{
311 struct pmcraid_resource_entry *res;
312
313 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
314
315 if ((res) && scsi_dev->tagged_supported &&
316 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
317 scsi_set_tag_type(scsi_dev, tag);
318
319 if (tag)
320 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
321 else
322 scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
323 } else
324 tag = 0;
325
326 return tag;
327}
328
329
330/**
331 * pmcraid_init_cmdblk - initializes a command block
332 *
333 * @cmd: pointer to struct pmcraid_cmd to be initialized
334 * @index: if >=0 first time initialization; otherwise reinitialization
335 *
336 * Return Value
337 * None
338 */
339void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
340{
341 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
342 dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
343
344 if (index >= 0) {
345 /* first time initialization (called from probe) */
346 u32 ioasa_offset =
347 offsetof(struct pmcraid_control_block, ioasa);
348
349 cmd->index = index;
350 ioarcb->response_handle = cpu_to_le32(index << 2);
351 ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
352 ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
353 ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
354 } else {
355 /* re-initialization of various lengths, called once command is
356 * processed by IOA
357 */
358 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
359 ioarcb->request_flags0 = 0;
360 ioarcb->request_flags1 = 0;
361 ioarcb->cmd_timeout = 0;
362 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
363 ioarcb->ioadl_bus_addr = 0;
364 ioarcb->ioadl_length = 0;
365 ioarcb->data_transfer_length = 0;
366 ioarcb->add_cmd_param_length = 0;
367 ioarcb->add_cmd_param_offset = 0;
368 cmd->ioa_cb->ioasa.ioasc = 0;
369 cmd->ioa_cb->ioasa.residual_data_length = 0;
370 cmd->u.time_left = 0;
371 }
372
373 cmd->cmd_done = NULL;
374 cmd->scsi_cmd = NULL;
375 cmd->release = 0;
376 cmd->completion_req = 0;
377 cmd->dma_handle = 0;
378 init_timer(&cmd->timer);
379}
380
381/**
382 * pmcraid_reinit_cmdblk - reinitialize a command block
383 *
384 * @cmd: pointer to struct pmcraid_cmd to be reinitialized
385 *
386 * Return Value
387 * None
388 */
389static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
390{
391 pmcraid_init_cmdblk(cmd, -1);
392}
393
394/**
395 * pmcraid_get_free_cmd - get a free cmd block from command block pool
396 * @pinstance: adapter instance structure
397 *
398 * Return Value:
399 * returns pointer to cmd block or NULL if no blocks are available
400 */
401static struct pmcraid_cmd *pmcraid_get_free_cmd(
402 struct pmcraid_instance *pinstance
403)
404{
405 struct pmcraid_cmd *cmd = NULL;
406 unsigned long lock_flags;
407
408 /* free cmd block list is protected by free_pool_lock */
409 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
410
411 if (!list_empty(&pinstance->free_cmd_pool)) {
412 cmd = list_entry(pinstance->free_cmd_pool.next,
413 struct pmcraid_cmd, free_list);
414 list_del(&cmd->free_list);
415 }
416 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
417
418 /* Initialize the command block before giving it the caller */
419 if (cmd != NULL)
420 pmcraid_reinit_cmdblk(cmd);
421 return cmd;
422}
423
424/**
425 * pmcraid_return_cmd - return a completed command block back into free pool
426 * @cmd: pointer to the command block
427 *
428 * Return Value:
429 * nothing
430 */
431void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
432{
433 struct pmcraid_instance *pinstance = cmd->drv_inst;
434 unsigned long lock_flags;
435
436 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
437 list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
438 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
439}
440
441/**
442 * pmcraid_read_interrupts - reads IOA interrupts
443 *
444 * @pinstance: pointer to adapter instance structure
445 *
446 * Return value
447 * interrupts read from IOA
448 */
449static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
450{
451 return ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
452}
453
454/**
455 * pmcraid_disable_interrupts - Masks and clears all specified interrupts
456 *
457 * @pinstance: pointer to per adapter instance structure
458 * @intrs: interrupts to disable
459 *
460 * Return Value
461 * None
462 */
463static void pmcraid_disable_interrupts(
464 struct pmcraid_instance *pinstance,
465 u32 intrs
466)
467{
468 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
469 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
470
471 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
472 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
473 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
474 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
475}
476
477/**
478 * pmcraid_enable_interrupts - Enables specified interrupts
479 *
480 * @pinstance: pointer to per adapter instance structure
481 * @intr: interrupts to enable
482 *
483 * Return Value
484 * None
485 */
486static void pmcraid_enable_interrupts(
487 struct pmcraid_instance *pinstance,
488 u32 intrs
489)
490{
491 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
492 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
493
494 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
495 iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
496 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
497
498 pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
499 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
500 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
501}
502
503/**
504 * pmcraid_reset_type - Determine the required reset type
505 * @pinstance: pointer to adapter instance structure
506 *
507 * IOA requires hard reset if any of the following conditions is true.
508 * 1. If HRRQ valid interrupt is not masked
509 * 2. IOA reset alert doorbell is set
510 * 3. If there are any error interrupts
511 */
512static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
513{
514 u32 mask;
515 u32 intrs;
516 u32 alerts;
517
518 mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
519 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
520 alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
521
522 if ((mask & INTRS_HRRQ_VALID) == 0 ||
523 (alerts & DOORBELL_IOA_RESET_ALERT) ||
524 (intrs & PMCRAID_ERROR_INTERRUPTS)) {
525 pmcraid_info("IOA requires hard reset\n");
526 pinstance->ioa_hard_reset = 1;
527 }
528
529 /* If unit check is active, trigger the dump */
530 if (intrs & INTRS_IOA_UNIT_CHECK)
531 pinstance->ioa_unit_check = 1;
532}
533
534/**
535 * pmcraid_bist_done - completion function for PCI BIST
536 * @cmd: pointer to reset command
537 * Return Value
538 * none
539 */
540
541static void pmcraid_ioa_reset(struct pmcraid_cmd *);
542
543static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
544{
545 struct pmcraid_instance *pinstance = cmd->drv_inst;
546 unsigned long lock_flags;
547 int rc;
548 u16 pci_reg;
549
550 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
551
552 /* If PCI config space can't be accessed wait for another two secs */
553 if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
554 cmd->u.time_left > 0) {
555 pmcraid_info("BIST not complete, waiting another 2 secs\n");
556 cmd->timer.expires = jiffies + cmd->u.time_left;
557 cmd->u.time_left = 0;
558 cmd->timer.data = (unsigned long)cmd;
559 cmd->timer.function =
560 (void (*)(unsigned long))pmcraid_bist_done;
561 add_timer(&cmd->timer);
562 } else {
563 cmd->u.time_left = 0;
564 pmcraid_info("BIST is complete, proceeding with reset\n");
565 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
566 pmcraid_ioa_reset(cmd);
567 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
568 }
569}
570
571/**
572 * pmcraid_start_bist - starts BIST
573 * @cmd: pointer to reset cmd
574 * Return Value
575 * none
576 */
577static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
578{
579 struct pmcraid_instance *pinstance = cmd->drv_inst;
580 u32 doorbells, intrs;
581
582 /* proceed with bist and wait for 2 seconds */
583 iowrite32(DOORBELL_IOA_START_BIST,
584 pinstance->int_regs.host_ioa_interrupt_reg);
585 doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
586 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
587 pmcraid_info("doorbells after start bist: %x intrs: %x \n",
588 doorbells, intrs);
589
590 cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
591 cmd->timer.data = (unsigned long)cmd;
592 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
593 cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
594 add_timer(&cmd->timer);
595}
596
597/**
598 * pmcraid_reset_alert_done - completion routine for reset_alert
599 * @cmd: pointer to command block used in reset sequence
600 * Return value
601 * None
602 */
603static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
604{
605 struct pmcraid_instance *pinstance = cmd->drv_inst;
606 u32 status = ioread32(pinstance->ioa_status);
607 unsigned long lock_flags;
608
609 /* if the critical operation in progress bit is set or the wait times
610 * out, invoke reset engine to proceed with hard reset. If there is
611 * some more time to wait, restart the timer
612 */
613 if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
614 cmd->u.time_left <= 0) {
615 pmcraid_info("critical op is reset proceeding with reset\n");
616 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
617 pmcraid_ioa_reset(cmd);
618 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
619 } else {
620 pmcraid_info("critical op is not yet reset waiting again\n");
621 /* restart timer if some more time is available to wait */
622 cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
623 cmd->timer.data = (unsigned long)cmd;
624 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
625 cmd->timer.function =
626 (void (*)(unsigned long))pmcraid_reset_alert_done;
627 add_timer(&cmd->timer);
628 }
629}
630
631/**
632 * pmcraid_reset_alert - alerts IOA for a possible reset
633 * @cmd : command block to be used for reset sequence.
634 *
635 * Return Value
636 * returns 0 if pci config-space is accessible and RESET_DOORBELL is
637 * successfully written to IOA. Returns non-zero in case pci_config_space
638 * is not accessible
639 */
640static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
641{
642 struct pmcraid_instance *pinstance = cmd->drv_inst;
643 u32 doorbells;
644 int rc;
645 u16 pci_reg;
646
647 /* If we are able to access IOA PCI config space, alert IOA that we are
648 * going to reset it soon. This enables IOA to preserv persistent error
649 * data if any. In case memory space is not accessible, proceed with
650 * BIST or slot_reset
651 */
652 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
653 if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
654
655 /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
656 * reset IOA doesn't generate any interrupts when CRITICAL
657 * OPERATION bit is reset. A timer is started to wait for this
658 * bit to be reset.
659 */
660 cmd->u.time_left = PMCRAID_RESET_TIMEOUT;
661 cmd->timer.data = (unsigned long)cmd;
662 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
663 cmd->timer.function =
664 (void (*)(unsigned long))pmcraid_reset_alert_done;
665 add_timer(&cmd->timer);
666
667 iowrite32(DOORBELL_IOA_RESET_ALERT,
668 pinstance->int_regs.host_ioa_interrupt_reg);
669 doorbells =
670 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
671 pmcraid_info("doorbells after reset alert: %x\n", doorbells);
672 } else {
673 pmcraid_info("PCI config is not accessible starting BIST\n");
674 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
675 pmcraid_start_bist(cmd);
676 }
677}
678
679/**
680 * pmcraid_timeout_handler - Timeout handler for internally generated ops
681 *
682 * @cmd : pointer to command structure, that got timedout
683 *
684 * This function blocks host requests and initiates an adapter reset.
685 *
686 * Return value:
687 * None
688 */
689static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
690{
691 struct pmcraid_instance *pinstance = cmd->drv_inst;
692 unsigned long lock_flags;
693
34876402 694 dev_info(&pinstance->pdev->dev,
89a36810
AR
695 "Adapter being reset due to command timeout.\n");
696
697 /* Command timeouts result in hard reset sequence. The command that got
698 * timed out may be the one used as part of reset sequence. In this
699 * case restart reset sequence using the same command block even if
700 * reset is in progress. Otherwise fail this command and get a free
701 * command block to restart the reset sequence.
702 */
703 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
704 if (!pinstance->ioa_reset_in_progress) {
705 pinstance->ioa_reset_attempts = 0;
706 cmd = pmcraid_get_free_cmd(pinstance);
707
708 /* If we are out of command blocks, just return here itself.
709 * Some other command's timeout handler can do the reset job
710 */
711 if (cmd == NULL) {
712 spin_unlock_irqrestore(pinstance->host->host_lock,
713 lock_flags);
714 pmcraid_err("no free cmnd block for timeout handler\n");
715 return;
716 }
717
718 pinstance->reset_cmd = cmd;
719 pinstance->ioa_reset_in_progress = 1;
720 } else {
721 pmcraid_info("reset is already in progress\n");
722
723 if (pinstance->reset_cmd != cmd) {
724 /* This command should have been given to IOA, this
725 * command will be completed by fail_outstanding_cmds
726 * anyway
727 */
728 pmcraid_err("cmd is pending but reset in progress\n");
729 }
730
731 /* If this command was being used as part of the reset
732 * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
733 * causes fail_outstanding_commands not to return the command
734 * block back to free pool
735 */
736 if (cmd == pinstance->reset_cmd)
737 cmd->cmd_done = pmcraid_ioa_reset;
738
739 }
740
741 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
742 scsi_block_requests(pinstance->host);
743 pmcraid_reset_alert(cmd);
744 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
745}
746
747/**
748 * pmcraid_internal_done - completion routine for internally generated cmds
749 *
750 * @cmd: command that got response from IOA
751 *
752 * Return Value:
753 * none
754 */
755static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
756{
757 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
758 cmd->ioa_cb->ioarcb.cdb[0],
759 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
760
761 /* Some of the internal commands are sent with callers blocking for the
762 * response. Same will be indicated as part of cmd->completion_req
763 * field. Response path needs to wake up any waiters waiting for cmd
764 * completion if this flag is set.
765 */
766 if (cmd->completion_req) {
767 cmd->completion_req = 0;
768 complete(&cmd->wait_for_completion);
769 }
770
771 /* most of the internal commands are completed by caller itself, so
772 * no need to return the command block back to free pool until we are
773 * required to do so (e.g once done with initialization).
774 */
775 if (cmd->release) {
776 cmd->release = 0;
777 pmcraid_return_cmd(cmd);
778 }
779}
780
781/**
782 * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
783 *
784 * @cmd: command that got response from IOA
785 *
786 * This routine is called after driver re-reads configuration table due to a
787 * lost CCN. It returns the command block back to free pool and schedules
788 * worker thread to add/delete devices into the system.
789 *
790 * Return Value:
791 * none
792 */
793static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
794{
795 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
796 cmd->ioa_cb->ioarcb.cdb[0],
797 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
798
799 if (cmd->release) {
800 cmd->release = 0;
801 pmcraid_return_cmd(cmd);
802 }
803 pmcraid_info("scheduling worker for config table reinitialization\n");
804 schedule_work(&cmd->drv_inst->worker_q);
805}
806
807/**
808 * pmcraid_erp_done - Process completion of SCSI error response from device
809 * @cmd: pmcraid_command
810 *
811 * This function copies the sense buffer into the scsi_cmd struct and completes
812 * scsi_cmd by calling scsi_done function.
813 *
814 * Return value:
815 * none
816 */
817static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
818{
819 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
820 struct pmcraid_instance *pinstance = cmd->drv_inst;
821 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
822
823 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
824 scsi_cmd->result |= (DID_ERROR << 16);
34876402
AR
825 scmd_printk(KERN_INFO, scsi_cmd,
826 "command CDB[0] = %x failed with IOASC: 0x%08X\n",
827 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
89a36810
AR
828 }
829
830 /* if we had allocated sense buffers for request sense, copy the sense
831 * release the buffers
832 */
833 if (cmd->sense_buffer != NULL) {
834 memcpy(scsi_cmd->sense_buffer,
835 cmd->sense_buffer,
836 SCSI_SENSE_BUFFERSIZE);
837 pci_free_consistent(pinstance->pdev,
838 SCSI_SENSE_BUFFERSIZE,
839 cmd->sense_buffer, cmd->sense_buffer_dma);
840 cmd->sense_buffer = NULL;
841 cmd->sense_buffer_dma = 0;
842 }
843
844 scsi_dma_unmap(scsi_cmd);
845 pmcraid_return_cmd(cmd);
846 scsi_cmd->scsi_done(scsi_cmd);
847}
848
849/**
850 * pmcraid_fire_command - sends an IOA command to adapter
851 *
852 * This function adds the given block into pending command list
853 * and returns without waiting
854 *
855 * @cmd : command to be sent to the device
856 *
857 * Return Value
858 * None
859 */
860static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
861{
862 struct pmcraid_instance *pinstance = cmd->drv_inst;
863 unsigned long lock_flags;
864
865 /* Add this command block to pending cmd pool. We do this prior to
866 * writting IOARCB to ioarrin because IOA might complete the command
867 * by the time we are about to add it to the list. Response handler
868 * (isr/tasklet) looks for cmb block in the pending pending list.
869 */
870 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
871 list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
872 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
873 atomic_inc(&pinstance->outstanding_cmds);
874
875 /* driver writes lower 32-bit value of IOARCB address only */
876 mb();
877 iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
878 pinstance->ioarrin);
879}
880
881/**
882 * pmcraid_send_cmd - fires a command to IOA
883 *
884 * This function also sets up timeout function, and command completion
885 * function
886 *
887 * @cmd: pointer to the command block to be fired to IOA
888 * @cmd_done: command completion function, called once IOA responds
889 * @timeout: timeout to wait for this command completion
890 * @timeout_func: timeout handler
891 *
892 * Return value
893 * none
894 */
895static void pmcraid_send_cmd(
896 struct pmcraid_cmd *cmd,
897 void (*cmd_done) (struct pmcraid_cmd *),
898 unsigned long timeout,
899 void (*timeout_func) (struct pmcraid_cmd *)
900)
901{
902 /* initialize done function */
903 cmd->cmd_done = cmd_done;
904
905 if (timeout_func) {
906 /* setup timeout handler */
907 cmd->timer.data = (unsigned long)cmd;
908 cmd->timer.expires = jiffies + timeout;
909 cmd->timer.function = (void (*)(unsigned long))timeout_func;
910 add_timer(&cmd->timer);
911 }
912
913 /* fire the command to IOA */
914 _pmcraid_fire_command(cmd);
915}
916
917/**
918 * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
919 *
920 * @cmd: pointer to the command block used as part of reset sequence
921 *
922 * Return Value
923 * None
924 */
925static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
926{
927 pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
928 cmd->ioa_cb->ioarcb.cdb[0],
929 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
930
931 /* Note that commands sent during reset require next command to be sent
932 * to IOA. Hence reinit the done function as well as timeout function
933 */
934 pmcraid_reinit_cmdblk(cmd);
935 cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
936 cmd->ioa_cb->ioarcb.resource_handle =
937 cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
938 cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
939 cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
940
941 /* fire shutdown command to hardware. */
942 pmcraid_info("firing normal shutdown command (%d) to IOA\n",
943 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
944
945 pmcraid_send_cmd(cmd, pmcraid_ioa_reset,
946 PMCRAID_SHUTDOWN_TIMEOUT,
947 pmcraid_timeout_handler);
948}
949
950/**
951 * pmcraid_identify_hrrq - registers host rrq buffers with IOA
952 * @cmd: pointer to command block to be used for identify hrrq
953 *
954 * Return Value
955 * 0 in case of success, otherwise non-zero failure code
956 */
957
958static void pmcraid_querycfg(struct pmcraid_cmd *);
959
960static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
961{
962 struct pmcraid_instance *pinstance = cmd->drv_inst;
963 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
964 int index = 0;
965 __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
966 u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
967
968 pmcraid_reinit_cmdblk(cmd);
969
970 /* Initialize ioarcb */
971 ioarcb->request_type = REQ_TYPE_IOACMD;
972 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
973
974 /* initialize the hrrq number where IOA will respond to this command */
975 ioarcb->hrrq_id = index;
976 ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
977 ioarcb->cdb[1] = index;
978
979 /* IOA expects 64-bit pci address to be written in B.E format
980 * (i.e cdb[2]=MSByte..cdb[9]=LSB.
981 */
982 pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n",
983 hrrq_addr, ioarcb->ioarcb_bus_addr);
984
985 memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
986 memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
987
988 /* Subsequent commands require HRRQ identification to be successful.
989 * Note that this gets called even during reset from SCSI mid-layer
990 * or tasklet
991 */
992 pmcraid_send_cmd(cmd, pmcraid_querycfg,
993 PMCRAID_INTERNAL_TIMEOUT,
994 pmcraid_timeout_handler);
995}
996
997static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
998static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
999
1000/**
1001 * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
1002 *
1003 * @cmd: initialized command block pointer
1004 *
1005 * Return Value
1006 * none
1007 */
1008static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
1009{
1010 if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
1011 atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
1012 else
1013 atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
1014
1015 pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
1016}
1017
1018/**
1019 * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
1020 *
1021 * @pinstance: pointer to adapter instance structure
1022 * @type: HCAM type
1023 *
1024 * Return Value
1025 * pointer to initialized pmcraid_cmd structure or NULL
1026 */
1027static struct pmcraid_cmd *pmcraid_init_hcam
1028(
1029 struct pmcraid_instance *pinstance,
1030 u8 type
1031)
1032{
1033 struct pmcraid_cmd *cmd;
1034 struct pmcraid_ioarcb *ioarcb;
1035 struct pmcraid_ioadl_desc *ioadl;
1036 struct pmcraid_hostrcb *hcam;
1037 void (*cmd_done) (struct pmcraid_cmd *);
1038 dma_addr_t dma;
1039 int rcb_size;
1040
1041 cmd = pmcraid_get_free_cmd(pinstance);
1042
1043 if (!cmd) {
1044 pmcraid_err("no free command blocks for hcam\n");
1045 return cmd;
1046 }
1047
1048 if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1049 rcb_size = sizeof(struct pmcraid_hcam_ccn);
1050 cmd_done = pmcraid_process_ccn;
1051 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1052 hcam = &pinstance->ccn;
1053 } else {
1054 rcb_size = sizeof(struct pmcraid_hcam_ldn);
1055 cmd_done = pmcraid_process_ldn;
1056 dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
1057 hcam = &pinstance->ldn;
1058 }
1059
1060 /* initialize command pointer used for HCAM registration */
1061 hcam->cmd = cmd;
1062
1063 ioarcb = &cmd->ioa_cb->ioarcb;
1064 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1065 offsetof(struct pmcraid_ioarcb,
1066 add_data.u.ioadl[0]));
1067 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1068 ioadl = ioarcb->add_data.u.ioadl;
1069
1070 /* Initialize ioarcb */
1071 ioarcb->request_type = REQ_TYPE_HCAM;
1072 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1073 ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
1074 ioarcb->cdb[1] = type;
1075 ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
1076 ioarcb->cdb[8] = (rcb_size) & 0xFF;
1077
1078 ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1079
88197966 1080 ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
89a36810
AR
1081 ioadl[0].data_len = cpu_to_le32(rcb_size);
1082 ioadl[0].address = cpu_to_le32(dma);
1083
1084 cmd->cmd_done = cmd_done;
1085 return cmd;
1086}
1087
1088/**
1089 * pmcraid_send_hcam - Send an HCAM to IOA
1090 * @pinstance: ioa config struct
1091 * @type: HCAM type
1092 *
1093 * This function will send a Host Controlled Async command to IOA.
1094 *
1095 * Return value:
1096 * none
1097 */
1098static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1099{
1100 struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
1101 pmcraid_send_hcam_cmd(cmd);
1102}
1103
1104
1105/**
1106 * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
1107 *
1108 * @cmd: pointer to cmd that is used as cancelling command
1109 * @cmd_to_cancel: pointer to the command that needs to be cancelled
1110 */
1111static void pmcraid_prepare_cancel_cmd(
1112 struct pmcraid_cmd *cmd,
1113 struct pmcraid_cmd *cmd_to_cancel
1114)
1115{
1116 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1117 __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
1118
1119 /* Get the resource handle to where the command to be aborted has been
1120 * sent.
1121 */
1122 ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
1123 ioarcb->request_type = REQ_TYPE_IOACMD;
1124 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
1125 ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
1126
1127 /* IOARCB address of the command to be cancelled is given in
1128 * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
1129 * IOARCB address are not masked.
1130 */
1131 ioarcb_addr = cpu_to_be64(ioarcb_addr);
1132 memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
1133}
1134
1135/**
1136 * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
1137 *
1138 * @cmd: command to be used as cancelling command
1139 * @type: HCAM type
1140 * @cmd_done: op done function for the cancelling command
1141 */
1142static void pmcraid_cancel_hcam(
1143 struct pmcraid_cmd *cmd,
1144 u8 type,
1145 void (*cmd_done) (struct pmcraid_cmd *)
1146)
1147{
1148 struct pmcraid_instance *pinstance;
1149 struct pmcraid_hostrcb *hcam;
1150
1151 pinstance = cmd->drv_inst;
1152 hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
1153 &pinstance->ldn : &pinstance->ccn;
1154
1155 /* prepare for cancelling previous hcam command. If the HCAM is
1156 * currently not pending with IOA, we would have hcam->cmd as non-null
1157 */
1158 if (hcam->cmd == NULL)
1159 return;
1160
1161 pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
1162
1163 /* writing to IOARRIN must be protected by host_lock, as mid-layer
1164 * schedule queuecommand while we are doing this
1165 */
1166 pmcraid_send_cmd(cmd, cmd_done,
1167 PMCRAID_INTERNAL_TIMEOUT,
1168 pmcraid_timeout_handler);
1169}
1170
1171/**
1172 * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
1173 *
1174 * @cmd: command block to be used for cancelling the HCAM
1175 */
1176static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
1177{
1178 pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
1179 cmd->ioa_cb->ioarcb.cdb[0],
1180 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
1181
1182 pmcraid_reinit_cmdblk(cmd);
1183
1184 pmcraid_cancel_hcam(cmd,
1185 PMCRAID_HCAM_CODE_CONFIG_CHANGE,
1186 pmcraid_ioa_shutdown);
1187}
1188
1189/**
1190 * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
1191 *
1192 * @cmd: command block to be used for cancelling the HCAM
1193 */
1194static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1195{
1196 pmcraid_cancel_hcam(cmd,
1197 PMCRAID_HCAM_CODE_LOG_DATA,
1198 pmcraid_cancel_ccn);
1199}
1200
1201/**
1202 * pmcraid_expose_resource - check if the resource can be exposed to OS
1203 *
1204 * @cfgte: pointer to configuration table entry of the resource
1205 *
1206 * Return value:
1207 * true if resource can be added to midlayer, false(0) otherwise
1208 */
1209static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1210{
1211 int retval = 0;
1212
1213 if (cfgte->resource_type == RES_TYPE_VSET)
729c8456 1214 retval = ((cfgte->unique_flags1 & 0x80) == 0);
89a36810
AR
1215 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1216 retval = (RES_BUS(cfgte->resource_address) !=
1217 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1218 return retval;
1219}
1220
1221/* attributes supported by pmcraid_event_family */
1222enum {
1223 PMCRAID_AEN_ATTR_UNSPEC,
1224 PMCRAID_AEN_ATTR_EVENT,
1225 __PMCRAID_AEN_ATTR_MAX,
1226};
1227#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
1228
1229/* commands supported by pmcraid_event_family */
1230enum {
1231 PMCRAID_AEN_CMD_UNSPEC,
1232 PMCRAID_AEN_CMD_EVENT,
1233 __PMCRAID_AEN_CMD_MAX,
1234};
1235#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1236
1237static struct genl_family pmcraid_event_family = {
1238 .id = GENL_ID_GENERATE,
1239 .name = "pmcraid",
1240 .version = 1,
1241 .maxattr = PMCRAID_AEN_ATTR_MAX
1242};
1243
1244/**
1245 * pmcraid_netlink_init - registers pmcraid_event_family
1246 *
1247 * Return value:
1248 * 0 if the pmcraid_event_family is successfully registered
1249 * with netlink generic, non-zero otherwise
1250 */
1251static int pmcraid_netlink_init(void)
1252{
1253 int result;
1254
1255 result = genl_register_family(&pmcraid_event_family);
1256
1257 if (result)
1258 return result;
1259
1260 pmcraid_info("registered NETLINK GENERIC group: %d\n",
1261 pmcraid_event_family.id);
1262
1263 return result;
1264}
1265
1266/**
1267 * pmcraid_netlink_release - unregisters pmcraid_event_family
1268 *
1269 * Return value:
1270 * none
1271 */
1272static void pmcraid_netlink_release(void)
1273{
1274 genl_unregister_family(&pmcraid_event_family);
1275}
1276
1277/**
1278 * pmcraid_notify_aen - sends event msg to user space application
1279 * @pinstance: pointer to adapter instance structure
1280 * @type: HCAM type
1281 *
1282 * Return value:
1283 * 0 if success, error value in case of any failure.
1284 */
1285static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1286{
1287 struct sk_buff *skb;
1288 struct pmcraid_aen_msg *aen_msg;
1289 void *msg_header;
1290 int data_size, total_size;
1291 int result;
1292
1293
1294 if (type == PMCRAID_HCAM_CODE_LOG_DATA) {
1295 aen_msg = pinstance->ldn.msg;
1296 data_size = pinstance->ldn.hcam->data_len;
1297 } else {
1298 aen_msg = pinstance->ccn.msg;
1299 data_size = pinstance->ccn.hcam->data_len;
1300 }
1301
1302 data_size += sizeof(struct pmcraid_hcam_hdr);
1303 aen_msg->hostno = (pinstance->host->unique_id << 16 |
1304 MINOR(pinstance->cdev.dev));
1305 aen_msg->length = data_size;
1306 data_size += sizeof(*aen_msg);
1307
1308 total_size = nla_total_size(data_size);
1309 skb = genlmsg_new(total_size, GFP_ATOMIC);
1310
1311
1312 if (!skb) {
1313 pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
1314 total_size);
1315 return -ENOMEM;
1316 }
1317
1318 /* add the genetlink message header */
1319 msg_header = genlmsg_put(skb, 0, 0,
1320 &pmcraid_event_family, 0,
1321 PMCRAID_AEN_CMD_EVENT);
1322 if (!msg_header) {
1323 pmcraid_err("failed to copy command details\n");
1324 nlmsg_free(skb);
1325 return -ENOMEM;
1326 }
1327
1328 result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1329
1330 if (result) {
1331 pmcraid_err("failed to copy AEN attribute data \n");
1332 nlmsg_free(skb);
1333 return -EINVAL;
1334 }
1335
1336 /* send genetlink multicast message to notify appplications */
1337 result = genlmsg_end(skb, msg_header);
1338
1339 if (result < 0) {
1340 pmcraid_err("genlmsg_end failed\n");
1341 nlmsg_free(skb);
1342 return result;
1343 }
1344
1345 result =
1346 genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
1347
1348 /* If there are no listeners, genlmsg_multicast may return non-zero
1349 * value.
1350 */
1351 if (result)
1352 pmcraid_info("failed to send %s event message %x!\n",
1353 type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN",
1354 result);
1355 return result;
1356}
1357
1358/**
1359 * pmcraid_handle_config_change - Handle a config change from the adapter
1360 * @pinstance: pointer to per adapter instance structure
1361 *
1362 * Return value:
1363 * none
1364 */
729c8456 1365
89a36810
AR
1366static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1367{
1368 struct pmcraid_config_table_entry *cfg_entry;
1369 struct pmcraid_hcam_ccn *ccn_hcam;
1370 struct pmcraid_cmd *cmd;
1371 struct pmcraid_cmd *cfgcmd;
1372 struct pmcraid_resource_entry *res = NULL;
89a36810
AR
1373 unsigned long lock_flags;
1374 unsigned long host_lock_flags;
729c8456
AR
1375 u32 new_entry = 1;
1376 u32 hidden_entry = 0;
89a36810
AR
1377 int rc;
1378
1379 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1380 cfg_entry = &ccn_hcam->cfg_entry;
1381
1382 pmcraid_info
1383 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
1384 pinstance->ccn.hcam->ilid,
1385 pinstance->ccn.hcam->op_code,
1386 pinstance->ccn.hcam->notification_type,
1387 pinstance->ccn.hcam->notification_lost,
1388 pinstance->ccn.hcam->flags,
1389 pinstance->host->unique_id,
1390 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1391 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1392 RES_BUS(cfg_entry->resource_address)),
1393 RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 :
1394 RES_TARGET(cfg_entry->resource_address),
1395 RES_LUN(cfg_entry->resource_address));
1396
1397
1398 /* If this HCAM indicates a lost notification, read the config table */
1399 if (pinstance->ccn.hcam->notification_lost) {
1400 cfgcmd = pmcraid_get_free_cmd(pinstance);
1401 if (cfgcmd) {
1402 pmcraid_info("lost CCN, reading config table\b");
1403 pinstance->reinit_cfg_table = 1;
1404 pmcraid_querycfg(cfgcmd);
1405 } else {
1406 pmcraid_err("lost CCN, no free cmd for querycfg\n");
1407 }
1408 goto out_notify_apps;
1409 }
1410
1411 /* If this resource is not going to be added to mid-layer, just notify
729c8456
AR
1412 * applications and return. If this notification is about hiding a VSET
1413 * resource, check if it was exposed already.
89a36810 1414 */
729c8456
AR
1415 if (pinstance->ccn.hcam->notification_type ==
1416 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1417 cfg_entry->resource_type == RES_TYPE_VSET &&
1418 cfg_entry->unique_flags1 & 0x80) {
1419 hidden_entry = 1;
1420 } else if (!pmcraid_expose_resource(cfg_entry))
89a36810
AR
1421 goto out_notify_apps;
1422
1423 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1424 list_for_each_entry(res, &pinstance->used_res_q, queue) {
1425 rc = memcmp(&res->cfg_entry.resource_address,
1426 &cfg_entry->resource_address,
1427 sizeof(cfg_entry->resource_address));
1428 if (!rc) {
1429 new_entry = 0;
1430 break;
1431 }
1432 }
1433
1434 if (new_entry) {
1435
729c8456
AR
1436 if (hidden_entry) {
1437 spin_unlock_irqrestore(&pinstance->resource_lock,
1438 lock_flags);
1439 goto out_notify_apps;
1440 }
1441
89a36810
AR
1442 /* If there are more number of resources than what driver can
1443 * manage, do not notify the applications about the CCN. Just
1444 * ignore this notifications and re-register the same HCAM
1445 */
1446 if (list_empty(&pinstance->free_res_q)) {
1447 spin_unlock_irqrestore(&pinstance->resource_lock,
1448 lock_flags);
1449 pmcraid_err("too many resources attached\n");
1450 spin_lock_irqsave(pinstance->host->host_lock,
1451 host_lock_flags);
1452 pmcraid_send_hcam(pinstance,
1453 PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1454 spin_unlock_irqrestore(pinstance->host->host_lock,
1455 host_lock_flags);
1456 return;
1457 }
1458
1459 res = list_entry(pinstance->free_res_q.next,
1460 struct pmcraid_resource_entry, queue);
1461
1462 list_del(&res->queue);
1463 res->scsi_dev = NULL;
1464 res->reset_progress = 0;
1465 list_add_tail(&res->queue, &pinstance->used_res_q);
1466 }
1467
1468 memcpy(&res->cfg_entry, cfg_entry,
1469 sizeof(struct pmcraid_config_table_entry));
1470
1471 if (pinstance->ccn.hcam->notification_type ==
729c8456 1472 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
89a36810 1473 if (res->scsi_dev) {
729c8456 1474 res->cfg_entry.unique_flags1 &= 0x7F;
89a36810
AR
1475 res->change_detected = RES_CHANGE_DEL;
1476 res->cfg_entry.resource_handle =
1477 PMCRAID_INVALID_RES_HANDLE;
1478 schedule_work(&pinstance->worker_q);
1479 } else {
1480 /* This may be one of the non-exposed resources */
1481 list_move_tail(&res->queue, &pinstance->free_res_q);
1482 }
1483 } else if (!res->scsi_dev) {
1484 res->change_detected = RES_CHANGE_ADD;
1485 schedule_work(&pinstance->worker_q);
1486 }
1487 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
1488
1489out_notify_apps:
1490
1491 /* Notify configuration changes to registered applications.*/
1492 if (!pmcraid_disable_aen)
1493 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1494
1495 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1496 if (cmd)
1497 pmcraid_send_hcam_cmd(cmd);
1498}
1499
1500/**
1501 * pmcraid_get_error_info - return error string for an ioasc
1502 * @ioasc: ioasc code
1503 * Return Value
1504 * none
1505 */
1506static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1507{
1508 int i;
1509 for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
1510 if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
1511 return &pmcraid_ioasc_error_table[i];
1512 }
1513 return NULL;
1514}
1515
1516/**
1517 * pmcraid_ioasc_logger - log IOASC information based user-settings
1518 * @ioasc: ioasc code
1519 * @cmd: pointer to command that resulted in 'ioasc'
1520 */
1521void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1522{
1523 struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1524
1525 if (error_info == NULL ||
1526 cmd->drv_inst->current_log_level < error_info->log_level)
1527 return;
1528
1529 /* log the error string */
1530 pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n",
1531 cmd->ioa_cb->ioarcb.cdb[0],
1532 cmd->ioa_cb->ioarcb.resource_handle,
1533 le32_to_cpu(ioasc), error_info->error_string);
1534}
1535
1536/**
1537 * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
1538 *
1539 * @pinstance: pointer to per adapter instance structure
1540 *
1541 * Return value:
1542 * none
1543 */
1544static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1545{
1546 struct pmcraid_hcam_ldn *hcam_ldn;
1547 u32 ioasc;
1548
1549 hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1550
1551 pmcraid_info
1552 ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
1553 pinstance->ldn.hcam->ilid,
1554 pinstance->ldn.hcam->op_code,
1555 pinstance->ldn.hcam->notification_type,
1556 pinstance->ldn.hcam->notification_lost,
1557 pinstance->ldn.hcam->flags,
1558 pinstance->ldn.hcam->overlay_id);
1559
1560 /* log only the errors, no need to log informational log entries */
1561 if (pinstance->ldn.hcam->notification_type !=
1562 NOTIFICATION_TYPE_ERROR_LOG)
1563 return;
1564
1565 if (pinstance->ldn.hcam->notification_lost ==
1566 HOSTRCB_NOTIFICATIONS_LOST)
34876402 1567 dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
89a36810
AR
1568
1569 ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1570
1571 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1572 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
34876402 1573 dev_info(&pinstance->pdev->dev,
89a36810
AR
1574 "UnitAttention due to IOA Bus Reset\n");
1575 scsi_report_bus_reset(
1576 pinstance->host,
1577 RES_BUS(hcam_ldn->error_log.fd_ra));
1578 }
1579
1580 return;
1581}
1582
1583/**
1584 * pmcraid_process_ccn - Op done function for a CCN.
1585 * @cmd: pointer to command struct
1586 *
1587 * This function is the op done function for a configuration
1588 * change notification
1589 *
1590 * Return value:
1591 * none
1592 */
1593static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1594{
1595 struct pmcraid_instance *pinstance = cmd->drv_inst;
1596 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1597 unsigned long lock_flags;
1598
1599 pinstance->ccn.cmd = NULL;
1600 pmcraid_return_cmd(cmd);
1601
1602 /* If driver initiated IOA reset happened while this hcam was pending
1603 * with IOA, or IOA bringdown sequence is in progress, no need to
1604 * re-register the hcam
1605 */
1606 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1607 atomic_read(&pinstance->ccn.ignore) == 1) {
1608 return;
1609 } else if (ioasc) {
34876402 1610 dev_info(&pinstance->pdev->dev,
89a36810
AR
1611 "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1612 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1613 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1614 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1615 } else {
1616 pmcraid_handle_config_change(pinstance);
1617 }
1618}
1619
1620/**
1621 * pmcraid_process_ldn - op done function for an LDN
1622 * @cmd: pointer to command block
1623 *
1624 * Return value
1625 * none
1626 */
1627static void pmcraid_initiate_reset(struct pmcraid_instance *);
1628
1629static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1630{
1631 struct pmcraid_instance *pinstance = cmd->drv_inst;
1632 struct pmcraid_hcam_ldn *ldn_hcam =
1633 (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1634 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1635 u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
1636 unsigned long lock_flags;
1637
1638 /* return the command block back to freepool */
1639 pinstance->ldn.cmd = NULL;
1640 pmcraid_return_cmd(cmd);
1641
1642 /* If driver initiated IOA reset happened while this hcam was pending
1643 * with IOA, no need to re-register the hcam as reset engine will do it
1644 * once reset sequence is complete
1645 */
1646 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1647 atomic_read(&pinstance->ccn.ignore) == 1) {
1648 return;
1649 } else if (!ioasc) {
1650 pmcraid_handle_error_log(pinstance);
1651 if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
1652 spin_lock_irqsave(pinstance->host->host_lock,
1653 lock_flags);
1654 pmcraid_initiate_reset(pinstance);
1655 spin_unlock_irqrestore(pinstance->host->host_lock,
1656 lock_flags);
1657 return;
1658 }
1659 } else {
34876402 1660 dev_info(&pinstance->pdev->dev,
89a36810
AR
1661 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1662 }
1663 /* send netlink message for HCAM notification if enabled */
1664 if (!pmcraid_disable_aen)
1665 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1666
1667 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1668 if (cmd)
1669 pmcraid_send_hcam_cmd(cmd);
1670}
1671
1672/**
1673 * pmcraid_register_hcams - register HCAMs for CCN and LDN
1674 *
1675 * @pinstance: pointer per adapter instance structure
1676 *
1677 * Return Value
1678 * none
1679 */
1680static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
1681{
1682 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1683 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1684}
1685
1686/**
1687 * pmcraid_unregister_hcams - cancel HCAMs registered already
1688 * @cmd: pointer to command used as part of reset sequence
1689 */
1690static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1691{
1692 struct pmcraid_instance *pinstance = cmd->drv_inst;
1693
1694 /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
1695 * handling hcam response though it is not necessary. In order to
1696 * prevent this, set 'ignore', so that bring-down sequence doesn't
1697 * re-send any more hcams
1698 */
1699 atomic_set(&pinstance->ccn.ignore, 1);
1700 atomic_set(&pinstance->ldn.ignore, 1);
1701
1702 /* If adapter reset was forced as part of runtime reset sequence,
1703 * start the reset sequence.
1704 */
1705 if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) {
1706 pinstance->force_ioa_reset = 0;
1707 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1708 pmcraid_reset_alert(cmd);
1709 return;
1710 }
1711
1712 /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
1713 * one after the other. So CCN cancellation will be triggered by
1714 * pmcraid_cancel_ldn itself.
1715 */
1716 pmcraid_cancel_ldn(cmd);
1717}
1718
1719/**
1720 * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
1721 * @pinstance: pointer to adapter instance structure
1722 * Return Value
1723 * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
1724 */
1725static void pmcraid_reinit_buffers(struct pmcraid_instance *);
1726
1727static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1728{
1729 u32 intrs;
1730
1731 pmcraid_reinit_buffers(pinstance);
1732 intrs = pmcraid_read_interrupts(pinstance);
1733
1734 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1735
1736 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1737 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1738 pinstance->int_regs.ioa_host_interrupt_mask_reg);
1739 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1740 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1741 return 1;
1742 } else {
1743 return 0;
1744 }
1745}
1746
1747/**
1748 * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
1749 * @cmd : pointer to reset command block
1750 *
1751 * Return Value
1752 * none
1753 */
1754static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1755{
1756 struct pmcraid_instance *pinstance = cmd->drv_inst;
1757 u32 int_reg;
1758 u32 doorbell;
1759
1760 /* There will be an interrupt when Transition to Operational bit is
1761 * set so tasklet would execute next reset task. The timeout handler
1762 * would re-initiate a reset
1763 */
1764 cmd->cmd_done = pmcraid_ioa_reset;
1765 cmd->timer.data = (unsigned long)cmd;
1766 cmd->timer.expires = jiffies +
1767 msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
1768 cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
1769
1770 if (!timer_pending(&cmd->timer))
1771 add_timer(&cmd->timer);
1772
1773 /* Enable destructive diagnostics on IOA if it is not yet in
1774 * operational state
1775 */
1776 doorbell = DOORBELL_RUNTIME_RESET |
1777 DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1778
1779 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
1780 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
1781 pmcraid_info("Waiting for IOA to become operational %x:%x\n",
1782 ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1783 int_reg);
1784}
1785
1786/**
1787 * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
1788 *
1789 * @pinstance: pointer to adapter instance structure
1790 *
1791 * Return Value
1792 * none
1793 */
1794static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
1795{
1796 pmcraid_info("%s is not yet implemented\n", __func__);
1797}
1798
1799/**
1800 * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
1801 * @pinstance: pointer to adapter instance structure
1802 *
1803 * This function fails all outstanding ops. If they are submitted to IOA
1804 * already, it sends cancel all messages if IOA is still accepting IOARCBs,
1805 * otherwise just completes the commands and returns the cmd blocks to free
1806 * pool.
1807 *
1808 * Return value:
1809 * none
1810 */
1811static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
1812{
1813 struct pmcraid_cmd *cmd, *temp;
1814 unsigned long lock_flags;
1815
1816 /* pending command list is protected by pending_pool_lock. Its
1817 * traversal must be done as within this lock
1818 */
1819 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1820 list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
1821 free_list) {
1822 list_del(&cmd->free_list);
1823 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
1824 lock_flags);
1825 cmd->ioa_cb->ioasa.ioasc =
1826 cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
1827 cmd->ioa_cb->ioasa.ilid =
1828 cpu_to_be32(PMCRAID_DRIVER_ILID);
1829
1830 /* In case the command timer is still running */
1831 del_timer(&cmd->timer);
1832
1833 /* If this is an IO command, complete it by invoking scsi_done
1834 * function. If this is one of the internal commands other
1835 * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
1836 * complete it
1837 */
1838 if (cmd->scsi_cmd) {
1839
1840 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
1841 __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
1842
1843 scsi_cmd->result |= DID_ERROR << 16;
1844
1845 scsi_dma_unmap(scsi_cmd);
1846 pmcraid_return_cmd(cmd);
1847
89a36810
AR
1848 pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
1849 le32_to_cpu(resp) >> 2,
1850 cmd->ioa_cb->ioarcb.cdb[0],
1851 scsi_cmd->result);
1852 scsi_cmd->scsi_done(scsi_cmd);
1853 } else if (cmd->cmd_done == pmcraid_internal_done ||
1854 cmd->cmd_done == pmcraid_erp_done) {
1855 cmd->cmd_done(cmd);
1856 } else if (cmd->cmd_done != pmcraid_ioa_reset) {
1857 pmcraid_return_cmd(cmd);
1858 }
1859
1860 atomic_dec(&pinstance->outstanding_cmds);
1861 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1862 }
1863
1864 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
1865}
1866
1867/**
1868 * pmcraid_ioa_reset - Implementation of IOA reset logic
1869 *
1870 * @cmd: pointer to the cmd block to be used for entire reset process
1871 *
1872 * This function executes most of the steps required for IOA reset. This gets
1873 * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
1874 * 'eh_' thread. Access to variables used for controling the reset sequence is
1875 * synchronized using host lock. Various functions called during reset process
1876 * would make use of a single command block, pointer to which is also stored in
1877 * adapter instance structure.
1878 *
1879 * Return Value
1880 * None
1881 */
1882static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
1883{
1884 struct pmcraid_instance *pinstance = cmd->drv_inst;
1885 u8 reset_complete = 0;
1886
1887 pinstance->ioa_reset_in_progress = 1;
1888
1889 if (pinstance->reset_cmd != cmd) {
1890 pmcraid_err("reset is called with different command block\n");
1891 pinstance->reset_cmd = cmd;
1892 }
1893
1894 pmcraid_info("reset_engine: state = %d, command = %p\n",
1895 pinstance->ioa_state, cmd);
1896
1897 switch (pinstance->ioa_state) {
1898
1899 case IOA_STATE_DEAD:
1900 /* If IOA is offline, whatever may be the reset reason, just
1901 * return. callers might be waiting on the reset wait_q, wake
1902 * up them
1903 */
1904 pmcraid_err("IOA is offline no reset is possible\n");
1905 reset_complete = 1;
1906 break;
1907
1908 case IOA_STATE_IN_BRINGDOWN:
1909 /* we enter here, once ioa shutdown command is processed by IOA
1910 * Alert IOA for a possible reset. If reset alert fails, IOA
1911 * goes through hard-reset
1912 */
1913 pmcraid_disable_interrupts(pinstance, ~0);
1914 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1915 pmcraid_reset_alert(cmd);
1916 break;
1917
1918 case IOA_STATE_UNKNOWN:
1919 /* We may be called during probe or resume. Some pre-processing
1920 * is required for prior to reset
1921 */
1922 scsi_block_requests(pinstance->host);
1923
1924 /* If asked to reset while IOA was processing responses or
1925 * there are any error responses then IOA may require
1926 * hard-reset.
1927 */
1928 if (pinstance->ioa_hard_reset == 0) {
1929 if (ioread32(pinstance->ioa_status) &
1930 INTRS_TRANSITION_TO_OPERATIONAL) {
1931 pmcraid_info("sticky bit set, bring-up\n");
1932 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
1933 pmcraid_reinit_cmdblk(cmd);
1934 pmcraid_identify_hrrq(cmd);
1935 } else {
1936 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
1937 pmcraid_soft_reset(cmd);
1938 }
1939 } else {
1940 /* Alert IOA of a possible reset and wait for critical
1941 * operation in progress bit to reset
1942 */
1943 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1944 pmcraid_reset_alert(cmd);
1945 }
1946 break;
1947
1948 case IOA_STATE_IN_RESET_ALERT:
1949 /* If critical operation in progress bit is reset or wait gets
1950 * timed out, reset proceeds with starting BIST on the IOA.
1951 * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
1952 * they are 3 or more, reset engine marks IOA dead and returns
1953 */
1954 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
1955 pmcraid_start_bist(cmd);
1956 break;
1957
1958 case IOA_STATE_IN_HARD_RESET:
1959 pinstance->ioa_reset_attempts++;
1960
1961 /* retry reset if we haven't reached maximum allowed limit */
1962 if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
1963 pinstance->ioa_reset_attempts = 0;
1964 pmcraid_err("IOA didn't respond marking it as dead\n");
1965 pinstance->ioa_state = IOA_STATE_DEAD;
1966 reset_complete = 1;
1967 break;
1968 }
1969
1970 /* Once either bist or pci reset is done, restore PCI config
1971 * space. If this fails, proceed with hard reset again
1972 */
1973
1974 if (pci_restore_state(pinstance->pdev)) {
1975 pmcraid_info("config-space error resetting again\n");
1976 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1977 pmcraid_reset_alert(cmd);
1978 break;
1979 }
1980
1981 /* fail all pending commands */
1982 pmcraid_fail_outstanding_cmds(pinstance);
1983
1984 /* check if unit check is active, if so extract dump */
1985 if (pinstance->ioa_unit_check) {
1986 pmcraid_info("unit check is active\n");
1987 pinstance->ioa_unit_check = 0;
1988 pmcraid_get_dump(pinstance);
1989 pinstance->ioa_reset_attempts--;
1990 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1991 pmcraid_reset_alert(cmd);
1992 break;
1993 }
1994
1995 /* if the reset reason is to bring-down the ioa, we might be
1996 * done with the reset restore pci_config_space and complete
1997 * the reset
1998 */
1999 if (pinstance->ioa_bringdown) {
2000 pmcraid_info("bringing down the adapter\n");
2001 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2002 pinstance->ioa_bringdown = 0;
2003 pinstance->ioa_state = IOA_STATE_UNKNOWN;
2004 reset_complete = 1;
2005 } else {
2006 /* bring-up IOA, so proceed with soft reset
2007 * Reinitialize hrrq_buffers and their indices also
2008 * enable interrupts after a pci_restore_state
2009 */
2010 if (pmcraid_reset_enable_ioa(pinstance)) {
2011 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2012 pmcraid_info("bringing up the adapter\n");
2013 pmcraid_reinit_cmdblk(cmd);
2014 pmcraid_identify_hrrq(cmd);
2015 } else {
2016 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
2017 pmcraid_soft_reset(cmd);
2018 }
2019 }
2020 break;
2021
2022 case IOA_STATE_IN_SOFT_RESET:
2023 /* TRANSITION TO OPERATIONAL is on so start initialization
2024 * sequence
2025 */
2026 pmcraid_info("In softreset proceeding with bring-up\n");
2027 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2028
2029 /* Initialization commands start with HRRQ identification. From
2030 * now on tasklet completes most of the commands as IOA is up
2031 * and intrs are enabled
2032 */
2033 pmcraid_identify_hrrq(cmd);
2034 break;
2035
2036 case IOA_STATE_IN_BRINGUP:
2037 /* we are done with bringing up of IOA, change the ioa_state to
2038 * operational and wake up any waiters
2039 */
2040 pinstance->ioa_state = IOA_STATE_OPERATIONAL;
2041 reset_complete = 1;
2042 break;
2043
2044 case IOA_STATE_OPERATIONAL:
2045 default:
2046 /* When IOA is operational and a reset is requested, check for
2047 * the reset reason. If reset is to bring down IOA, unregister
2048 * HCAMs and initiate shutdown; if adapter reset is forced then
2049 * restart reset sequence again
2050 */
2051 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2052 pinstance->force_ioa_reset == 0) {
2053 reset_complete = 1;
2054 } else {
2055 if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
2056 pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
2057 pmcraid_reinit_cmdblk(cmd);
2058 pmcraid_unregister_hcams(cmd);
2059 }
2060 break;
2061 }
2062
2063 /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
2064 * OPERATIONAL. Reset all control variables used during reset, wake up
2065 * any waiting threads and let the SCSI mid-layer send commands. Note
2066 * that host_lock must be held before invoking scsi_report_bus_reset.
2067 */
2068 if (reset_complete) {
2069 pinstance->ioa_reset_in_progress = 0;
2070 pinstance->ioa_reset_attempts = 0;
2071 pinstance->reset_cmd = NULL;
2072 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2073 pinstance->ioa_bringdown = 0;
2074 pmcraid_return_cmd(cmd);
2075
2076 /* If target state is to bring up the adapter, proceed with
2077 * hcam registration and resource exposure to mid-layer.
2078 */
2079 if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
2080 pmcraid_register_hcams(pinstance);
2081
2082 wake_up_all(&pinstance->reset_wait_q);
2083 }
2084
2085 return;
2086}
2087
2088/**
2089 * pmcraid_initiate_reset - initiates reset sequence. This is called from
2090 * ISR/tasklet during error interrupts including IOA unit check. If reset
2091 * is already in progress, it just returns, otherwise initiates IOA reset
2092 * to bring IOA up to operational state.
2093 *
2094 * @pinstance: pointer to adapter instance structure
2095 *
2096 * Return value
2097 * none
2098 */
2099static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2100{
2101 struct pmcraid_cmd *cmd;
2102
2103 /* If the reset is already in progress, just return, otherwise start
2104 * reset sequence and return
2105 */
2106 if (!pinstance->ioa_reset_in_progress) {
2107 scsi_block_requests(pinstance->host);
2108 cmd = pmcraid_get_free_cmd(pinstance);
2109
2110 if (cmd == NULL) {
2111 pmcraid_err("no cmnd blocks for initiate_reset\n");
2112 return;
2113 }
2114
2115 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2116 pinstance->reset_cmd = cmd;
2117 pinstance->force_ioa_reset = 1;
2118 pmcraid_ioa_reset(cmd);
2119 }
2120}
2121
2122/**
2123 * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
2124 * or bringdown IOA
2125 * @pinstance: pointer adapter instance structure
2126 * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
2127 * @target_state: expected target state after reset
2128 *
2129 * Note: This command initiates reset and waits for its completion. Hence this
2130 * should not be called from isr/timer/tasklet functions (timeout handlers,
2131 * error response handlers and interrupt handlers).
2132 *
2133 * Return Value
2134 * 1 in case ioa_state is not target_state, 0 otherwise.
2135 */
2136static int pmcraid_reset_reload(
2137 struct pmcraid_instance *pinstance,
2138 u8 shutdown_type,
2139 u8 target_state
2140)
2141{
2142 struct pmcraid_cmd *reset_cmd = NULL;
2143 unsigned long lock_flags;
2144 int reset = 1;
2145
2146 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2147
2148 if (pinstance->ioa_reset_in_progress) {
2149 pmcraid_info("reset_reload: reset is already in progress\n");
2150
2151 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2152
2153 wait_event(pinstance->reset_wait_q,
2154 !pinstance->ioa_reset_in_progress);
2155
2156 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2157
2158 if (pinstance->ioa_state == IOA_STATE_DEAD) {
2159 spin_unlock_irqrestore(pinstance->host->host_lock,
2160 lock_flags);
2161 pmcraid_info("reset_reload: IOA is dead\n");
2162 return reset;
2163 } else if (pinstance->ioa_state == target_state) {
2164 reset = 0;
2165 }
2166 }
2167
2168 if (reset) {
2169 pmcraid_info("reset_reload: proceeding with reset\n");
2170 scsi_block_requests(pinstance->host);
2171 reset_cmd = pmcraid_get_free_cmd(pinstance);
2172
2173 if (reset_cmd == NULL) {
2174 pmcraid_err("no free cmnd for reset_reload\n");
2175 spin_unlock_irqrestore(pinstance->host->host_lock,
2176 lock_flags);
2177 return reset;
2178 }
2179
2180 if (shutdown_type == SHUTDOWN_NORMAL)
2181 pinstance->ioa_bringdown = 1;
2182
2183 pinstance->ioa_shutdown_type = shutdown_type;
2184 pinstance->reset_cmd = reset_cmd;
2185 pinstance->force_ioa_reset = reset;
2186 pmcraid_info("reset_reload: initiating reset\n");
2187 pmcraid_ioa_reset(reset_cmd);
2188 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2189 pmcraid_info("reset_reload: waiting for reset to complete\n");
2190 wait_event(pinstance->reset_wait_q,
2191 !pinstance->ioa_reset_in_progress);
2192
2193 pmcraid_info("reset_reload: reset is complete !! \n");
2194 scsi_unblock_requests(pinstance->host);
2195 if (pinstance->ioa_state == target_state)
2196 reset = 0;
2197 }
2198
2199 return reset;
2200}
2201
2202/**
2203 * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
2204 *
2205 * @pinstance: pointer to adapter instance structure
2206 *
2207 * Return Value
2208 * whatever is returned from pmcraid_reset_reload
2209 */
2210static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2211{
2212 return pmcraid_reset_reload(pinstance,
2213 SHUTDOWN_NORMAL,
2214 IOA_STATE_UNKNOWN);
2215}
2216
2217/**
2218 * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
2219 *
2220 * @pinstance: pointer to adapter instance structure
2221 *
2222 * Return Value
2223 * whatever is returned from pmcraid_reset_reload
2224 */
2225static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2226{
2227 return pmcraid_reset_reload(pinstance,
2228 SHUTDOWN_NONE,
2229 IOA_STATE_OPERATIONAL);
2230}
2231
2232/**
2233 * pmcraid_request_sense - Send request sense to a device
2234 * @cmd: pmcraid command struct
2235 *
2236 * This function sends a request sense to a device as a result of a check
2237 * condition. This method re-uses the same command block that failed earlier.
2238 */
2239static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2240{
2241 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2242 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2243
2244 /* allocate DMAable memory for sense buffers */
2245 cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
2246 SCSI_SENSE_BUFFERSIZE,
2247 &cmd->sense_buffer_dma);
2248
2249 if (cmd->sense_buffer == NULL) {
2250 pmcraid_err
2251 ("couldn't allocate sense buffer for request sense\n");
2252 pmcraid_erp_done(cmd);
2253 return;
2254 }
2255
2256 /* re-use the command block */
2257 memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
2258 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2259 ioarcb->request_flags0 = (SYNC_COMPLETE |
2260 NO_LINK_DESCS |
2261 INHIBIT_UL_CHECK);
2262 ioarcb->request_type = REQ_TYPE_SCSI;
2263 ioarcb->cdb[0] = REQUEST_SENSE;
2264 ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2265
2266 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
2267 offsetof(struct pmcraid_ioarcb,
2268 add_data.u.ioadl[0]));
2269 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
2270
2271 ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2272
2273 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2274 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
88197966 2275 ioadl->flags = IOADL_FLAGS_LAST_DESC;
89a36810
AR
2276
2277 /* request sense might be called as part of error response processing
2278 * which runs in tasklets context. It is possible that mid-layer might
2279 * schedule queuecommand during this time, hence, writting to IOARRIN
2280 * must be protect by host_lock
2281 */
2282 pmcraid_send_cmd(cmd, pmcraid_erp_done,
2283 PMCRAID_REQUEST_SENSE_TIMEOUT,
2284 pmcraid_timeout_handler);
2285}
2286
2287/**
2288 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2289 * @cmd: command that failed
2290 * @sense: true if request_sense is required after cancel all
2291 *
2292 * This function sends a cancel all to a device to clear the queue.
2293 */
2294static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2295{
2296 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2297 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2298 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2299 void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2300 : pmcraid_request_sense;
2301
2302 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2303 ioarcb->request_flags0 = SYNC_OVERRIDE;
2304 ioarcb->request_type = REQ_TYPE_IOACMD;
2305 ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
2306
2307 if (RES_IS_GSCSI(res->cfg_entry))
2308 ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
2309
2310 ioarcb->ioadl_bus_addr = 0;
2311 ioarcb->ioadl_length = 0;
2312 ioarcb->data_transfer_length = 0;
2313 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
2314
2315 /* writing to IOARRIN must be protected by host_lock, as mid-layer
2316 * schedule queuecommand while we are doing this
2317 */
2318 pmcraid_send_cmd(cmd, cmd_done,
2319 PMCRAID_REQUEST_SENSE_TIMEOUT,
2320 pmcraid_timeout_handler);
2321}
2322
2323/**
2324 * pmcraid_frame_auto_sense: frame fixed format sense information
2325 *
2326 * @cmd: pointer to failing command block
2327 *
2328 * Return value
2329 * none
2330 */
2331static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
2332{
2333 u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
2334 struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
2335 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2336 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2337 u32 failing_lba = 0;
2338
2339 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
2340 cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
2341
2342 if (RES_IS_VSET(res->cfg_entry) &&
2343 ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
2344 ioasa->u.vset.failing_lba_hi != 0) {
2345
2346 sense_buf[0] = 0x72;
2347 sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2348 sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2349 sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2350
2351 sense_buf[7] = 12;
2352 sense_buf[8] = 0;
2353 sense_buf[9] = 0x0A;
2354 sense_buf[10] = 0x80;
2355
2356 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
2357
2358 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
2359 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
2360 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
2361 sense_buf[15] = failing_lba & 0x000000ff;
2362
2363 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
2364
2365 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
2366 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
2367 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
2368 sense_buf[19] = failing_lba & 0x000000ff;
2369 } else {
2370 sense_buf[0] = 0x70;
2371 sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2372 sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2373 sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2374
2375 if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
2376 if (RES_IS_VSET(res->cfg_entry))
2377 failing_lba =
2378 le32_to_cpu(ioasa->u.
2379 vset.failing_lba_lo);
2380 sense_buf[0] |= 0x80;
2381 sense_buf[3] = (failing_lba >> 24) & 0xff;
2382 sense_buf[4] = (failing_lba >> 16) & 0xff;
2383 sense_buf[5] = (failing_lba >> 8) & 0xff;
2384 sense_buf[6] = failing_lba & 0xff;
2385 }
2386
2387 sense_buf[7] = 6; /* additional length */
2388 }
2389}
2390
2391/**
2392 * pmcraid_error_handler - Error response handlers for a SCSI op
2393 * @cmd: pointer to pmcraid_cmd that has failed
2394 *
2395 * This function determines whether or not to initiate ERP on the affected
2396 * device. This is called from a tasklet, which doesn't hold any locks.
2397 *
2398 * Return value:
2399 * 0 it caller can complete the request, otherwise 1 where in error
2400 * handler itself completes the request and returns the command block
2401 * back to free-pool
2402 */
2403static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2404{
2405 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2406 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2407 struct pmcraid_instance *pinstance = cmd->drv_inst;
2408 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2409 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2410 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2411 u32 sense_copied = 0;
2412
2413 if (!res) {
2414 pmcraid_info("resource pointer is NULL\n");
2415 return 0;
2416 }
2417
2418 /* If this was a SCSI read/write command keep count of errors */
2419 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
2420 atomic_inc(&res->read_failures);
2421 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
2422 atomic_inc(&res->write_failures);
2423
2424 if (!RES_IS_GSCSI(res->cfg_entry) &&
2425 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
2426 pmcraid_frame_auto_sense(cmd);
2427 }
2428
2429 /* Log IOASC/IOASA information based on user settings */
2430 pmcraid_ioasc_logger(ioasc, cmd);
2431
2432 switch (masked_ioasc) {
2433
2434 case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
2435 scsi_cmd->result |= (DID_ABORT << 16);
2436 break;
2437
2438 case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
2439 case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
2440 scsi_cmd->result |= (DID_NO_CONNECT << 16);
2441 break;
2442
2443 case PMCRAID_IOASC_NR_SYNC_REQUIRED:
2444 res->sync_reqd = 1;
2445 scsi_cmd->result |= (DID_IMM_RETRY << 16);
2446 break;
2447
2448 case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
2449 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
2450 break;
2451
2452 case PMCRAID_IOASC_UA_BUS_WAS_RESET:
2453 case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
2454 if (!res->reset_progress)
2455 scsi_report_bus_reset(pinstance->host,
2456 scsi_cmd->device->channel);
2457 scsi_cmd->result |= (DID_ERROR << 16);
2458 break;
2459
2460 case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
2461 scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
2462 res->sync_reqd = 1;
2463
2464 /* if check_condition is not active return with error otherwise
2465 * get/frame the sense buffer
2466 */
2467 if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
2468 SAM_STAT_CHECK_CONDITION &&
2469 PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
2470 return 0;
2471
2472 /* If we have auto sense data as part of IOASA pass it to
2473 * mid-layer
2474 */
2475 if (ioasa->auto_sense_length != 0) {
2476 short sense_len = ioasa->auto_sense_length;
2477 int data_size = min_t(u16, le16_to_cpu(sense_len),
2478 SCSI_SENSE_BUFFERSIZE);
2479
2480 memcpy(scsi_cmd->sense_buffer,
2481 ioasa->sense_data,
2482 data_size);
2483 sense_copied = 1;
2484 }
2485
2486 if (RES_IS_GSCSI(res->cfg_entry)) {
2487 pmcraid_cancel_all(cmd, sense_copied);
2488 } else if (sense_copied) {
2489 pmcraid_erp_done(cmd);
2490 return 0;
2491 } else {
2492 pmcraid_request_sense(cmd);
2493 }
2494
2495 return 1;
2496
2497 case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
2498 break;
2499
2500 default:
2501 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
2502 scsi_cmd->result |= (DID_ERROR << 16);
2503 break;
2504 }
2505 return 0;
2506}
2507
2508/**
2509 * pmcraid_reset_device - device reset handler functions
2510 *
2511 * @scsi_cmd: scsi command struct
2512 * @modifier: reset modifier indicating the reset sequence to be performed
2513 *
2514 * This function issues a device reset to the affected device.
2515 * A LUN reset will be sent to the device first. If that does
2516 * not work, a target reset will be sent.
2517 *
2518 * Return value:
2519 * SUCCESS / FAILED
2520 */
2521static int pmcraid_reset_device(
2522 struct scsi_cmnd *scsi_cmd,
2523 unsigned long timeout,
2524 u8 modifier
2525)
2526{
2527 struct pmcraid_cmd *cmd;
2528 struct pmcraid_instance *pinstance;
2529 struct pmcraid_resource_entry *res;
2530 struct pmcraid_ioarcb *ioarcb;
2531 unsigned long lock_flags;
2532 u32 ioasc;
2533
2534 pinstance =
2535 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2536 res = scsi_cmd->device->hostdata;
2537
2538 if (!res) {
34876402
AR
2539 sdev_printk(KERN_ERR, scsi_cmd->device,
2540 "reset_device: NULL resource pointer\n");
89a36810
AR
2541 return FAILED;
2542 }
2543
2544 /* If adapter is currently going through reset/reload, return failed.
2545 * This will force the mid-layer to call _eh_bus/host reset, which
2546 * will then go to sleep and wait for the reset to complete
2547 */
2548 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2549 if (pinstance->ioa_reset_in_progress ||
2550 pinstance->ioa_state == IOA_STATE_DEAD) {
2551 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2552 return FAILED;
2553 }
2554
2555 res->reset_progress = 1;
2556 pmcraid_info("Resetting %s resource with addr %x\n",
2557 ((modifier & RESET_DEVICE_LUN) ? "LUN" :
2558 ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
2559 le32_to_cpu(res->cfg_entry.resource_address));
2560
2561 /* get a free cmd block */
2562 cmd = pmcraid_get_free_cmd(pinstance);
2563
2564 if (cmd == NULL) {
2565 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2566 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2567 return FAILED;
2568 }
2569
2570 ioarcb = &cmd->ioa_cb->ioarcb;
2571 ioarcb->resource_handle = res->cfg_entry.resource_handle;
2572 ioarcb->request_type = REQ_TYPE_IOACMD;
2573 ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
2574
2575 /* Initialize reset modifier bits */
2576 if (modifier)
2577 modifier = ENABLE_RESET_MODIFIER | modifier;
2578
2579 ioarcb->cdb[1] = modifier;
2580
2581 init_completion(&cmd->wait_for_completion);
2582 cmd->completion_req = 1;
2583
2584 pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
2585 cmd->ioa_cb->ioarcb.cdb[0],
2586 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
2587 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2588
2589 pmcraid_send_cmd(cmd,
2590 pmcraid_internal_done,
2591 timeout,
2592 pmcraid_timeout_handler);
2593
2594 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2595
2596 /* RESET_DEVICE command completes after all pending IOARCBs are
2597 * completed. Once this command is completed, pmcraind_internal_done
2598 * will wake up the 'completion' queue.
2599 */
2600 wait_for_completion(&cmd->wait_for_completion);
2601
2602 /* complete the command here itself and return the command block
2603 * to free list
2604 */
2605 pmcraid_return_cmd(cmd);
2606 res->reset_progress = 0;
2607 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2608
2609 /* set the return value based on the returned ioasc */
2610 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2611}
2612
2613/**
2614 * _pmcraid_io_done - helper for pmcraid_io_done function
2615 *
2616 * @cmd: pointer to pmcraid command struct
2617 * @reslen: residual data length to be set in the ioasa
2618 * @ioasc: ioasc either returned by IOA or set by driver itself.
2619 *
2620 * This function is invoked by pmcraid_io_done to complete mid-layer
2621 * scsi ops.
2622 *
2623 * Return value:
2624 * 0 if caller is required to return it to free_pool. Returns 1 if
2625 * caller need not worry about freeing command block as error handler
2626 * will take care of that.
2627 */
2628
2629static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
2630{
2631 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2632 int rc = 0;
2633
2634 scsi_set_resid(scsi_cmd, reslen);
2635
2636 pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
2637 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
2638 cmd->ioa_cb->ioarcb.cdb[0],
2639 ioasc, scsi_cmd->result);
2640
2641 if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
2642 rc = pmcraid_error_handler(cmd);
2643
2644 if (rc == 0) {
2645 scsi_dma_unmap(scsi_cmd);
2646 scsi_cmd->scsi_done(scsi_cmd);
2647 }
2648
2649 return rc;
2650}
2651
2652/**
2653 * pmcraid_io_done - SCSI completion function
2654 *
2655 * @cmd: pointer to pmcraid command struct
2656 *
2657 * This function is invoked by tasklet/mid-layer error handler to completing
2658 * the SCSI ops sent from mid-layer.
2659 *
2660 * Return value
2661 * none
2662 */
2663
2664static void pmcraid_io_done(struct pmcraid_cmd *cmd)
2665{
2666 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2667 u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
2668
2669 if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
2670 pmcraid_return_cmd(cmd);
2671}
2672
2673/**
2674 * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
2675 *
2676 * @cmd: command block of the command to be aborted
2677 *
2678 * Return Value:
2679 * returns pointer to command structure used as cancelling cmd
2680 */
2681static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2682{
2683 struct pmcraid_cmd *cancel_cmd;
2684 struct pmcraid_instance *pinstance;
2685 struct pmcraid_resource_entry *res;
2686
2687 pinstance = (struct pmcraid_instance *)cmd->drv_inst;
2688 res = cmd->scsi_cmd->device->hostdata;
2689
2690 cancel_cmd = pmcraid_get_free_cmd(pinstance);
2691
2692 if (cancel_cmd == NULL) {
2693 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2694 return NULL;
2695 }
2696
2697 pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
2698
2699 pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
2700 cmd->ioa_cb->ioarcb.cdb[0],
2701 cmd->ioa_cb->ioarcb.response_handle >> 2);
2702
2703 init_completion(&cancel_cmd->wait_for_completion);
2704 cancel_cmd->completion_req = 1;
2705
2706 pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2707 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2708 cmd->ioa_cb->ioarcb.cdb[0],
2709 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2710
2711 pmcraid_send_cmd(cancel_cmd,
2712 pmcraid_internal_done,
2713 PMCRAID_INTERNAL_TIMEOUT,
2714 pmcraid_timeout_handler);
2715 return cancel_cmd;
2716}
2717
2718/**
2719 * pmcraid_abort_complete - Waits for ABORT TASK completion
2720 *
2721 * @cancel_cmd: command block use as cancelling command
2722 *
2723 * Return Value:
2724 * returns SUCCESS if ABORT TASK has good completion
2725 * otherwise FAILED
2726 */
2727static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2728{
2729 struct pmcraid_resource_entry *res;
2730 u32 ioasc;
2731
2732 wait_for_completion(&cancel_cmd->wait_for_completion);
2733 res = cancel_cmd->u.res;
2734 cancel_cmd->u.res = NULL;
2735 ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2736
2737 /* If the abort task is not timed out we will get a Good completion
2738 * as sense_key, otherwise we may get one the following responses
2739 * due to subsquent bus reset or device reset. In case IOASC is
2740 * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
2741 */
2742 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
2743 ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
2744 if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
2745 res->sync_reqd = 1;
2746 ioasc = 0;
2747 }
2748
2749 /* complete the command here itself */
2750 pmcraid_return_cmd(cancel_cmd);
2751 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2752}
2753
2754/**
2755 * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
2756 *
2757 * @scsi_cmd: scsi command struct given by mid-layer. When this is called
2758 * mid-layer ensures that no other commands are queued. This
2759 * never gets called under interrupt, but a separate eh thread.
2760 *
2761 * Return value:
2762 * SUCCESS / FAILED
2763 */
2764static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2765{
2766 struct pmcraid_instance *pinstance;
2767 struct pmcraid_cmd *cmd;
2768 struct pmcraid_resource_entry *res;
2769 unsigned long host_lock_flags;
2770 unsigned long pending_lock_flags;
2771 struct pmcraid_cmd *cancel_cmd = NULL;
2772 int cmd_found = 0;
2773 int rc = FAILED;
2774
2775 pinstance =
2776 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2777
34876402
AR
2778 scmd_printk(KERN_INFO, scsi_cmd,
2779 "I/O command timed out, aborting it.\n");
89a36810
AR
2780
2781 res = scsi_cmd->device->hostdata;
2782
2783 if (res == NULL)
2784 return rc;
2785
2786 /* If we are currently going through reset/reload, return failed.
2787 * This will force the mid-layer to eventually call
2788 * pmcraid_eh_host_reset which will then go to sleep and wait for the
2789 * reset to complete
2790 */
2791 spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
2792
2793 if (pinstance->ioa_reset_in_progress ||
2794 pinstance->ioa_state == IOA_STATE_DEAD) {
2795 spin_unlock_irqrestore(pinstance->host->host_lock,
2796 host_lock_flags);
2797 return rc;
2798 }
2799
2800 /* loop over pending cmd list to find cmd corresponding to this
2801 * scsi_cmd. Note that this command might not have been completed
2802 * already. locking: all pending commands are protected with
2803 * pending_pool_lock.
2804 */
2805 spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
2806 list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
2807
2808 if (cmd->scsi_cmd == scsi_cmd) {
2809 cmd_found = 1;
2810 break;
2811 }
2812 }
2813
2814 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
2815 pending_lock_flags);
2816
2817 /* If the command to be aborted was given to IOA and still pending with
2818 * it, send ABORT_TASK to abort this and wait for its completion
2819 */
2820 if (cmd_found)
2821 cancel_cmd = pmcraid_abort_cmd(cmd);
2822
2823 spin_unlock_irqrestore(pinstance->host->host_lock,
2824 host_lock_flags);
2825
2826 if (cancel_cmd) {
2827 cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata;
2828 rc = pmcraid_abort_complete(cancel_cmd);
2829 }
2830
2831 return cmd_found ? rc : SUCCESS;
2832}
2833
2834/**
2835 * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
2836 *
2837 * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
2838 *
2839 * All these routines invokve pmcraid_reset_device with appropriate parameters.
2840 * Since these are called from mid-layer EH thread, no other IO will be queued
2841 * to the resource being reset. However, control path (IOCTL) may be active so
2842 * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
2843 * takes care by locking/unlocking host_lock.
2844 *
2845 * Return value
2846 * SUCCESS or FAILED
2847 */
2848static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2849{
34876402
AR
2850 scmd_printk(KERN_INFO, scmd,
2851 "resetting device due to an I/O command timeout.\n");
89a36810
AR
2852 return pmcraid_reset_device(scmd,
2853 PMCRAID_INTERNAL_TIMEOUT,
2854 RESET_DEVICE_LUN);
2855}
2856
2857static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
2858{
34876402
AR
2859 scmd_printk(KERN_INFO, scmd,
2860 "Doing bus reset due to an I/O command timeout.\n");
89a36810
AR
2861 return pmcraid_reset_device(scmd,
2862 PMCRAID_RESET_BUS_TIMEOUT,
2863 RESET_DEVICE_BUS);
2864}
2865
2866static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
2867{
34876402
AR
2868 scmd_printk(KERN_INFO, scmd,
2869 "Doing target reset due to an I/O command timeout.\n");
89a36810
AR
2870 return pmcraid_reset_device(scmd,
2871 PMCRAID_INTERNAL_TIMEOUT,
2872 RESET_DEVICE_TARGET);
2873}
2874
2875/**
2876 * pmcraid_eh_host_reset_handler - adapter reset handler callback
2877 *
2878 * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
2879 *
2880 * Initiates adapter reset to bring it up to operational state
2881 *
2882 * Return value
2883 * SUCCESS or FAILED
2884 */
2885static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
2886{
2887 unsigned long interval = 10000; /* 10 seconds interval */
2888 int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
2889 struct pmcraid_instance *pinstance =
2890 (struct pmcraid_instance *)(scmd->device->host->hostdata);
2891
2892
2893 /* wait for an additional 150 seconds just in case firmware could come
2894 * up and if it could complete all the pending commands excluding the
2895 * two HCAM (CCN and LDN).
2896 */
2897 while (waits--) {
2898 if (atomic_read(&pinstance->outstanding_cmds) <=
2899 PMCRAID_MAX_HCAM_CMD)
2900 return SUCCESS;
2901 msleep(interval);
2902 }
2903
2904 dev_err(&pinstance->pdev->dev,
2905 "Adapter being reset due to an I/O command timeout.\n");
2906 return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
2907}
2908
2909/**
2910 * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
2911 * @scsi_cmd: scsi command struct
2912 *
2913 * Return value
2914 * number of tags or 0 if the task is not tagged
2915 */
2916static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
2917{
2918 char tag[2];
2919 u8 rc = 0;
2920
2921 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
2922 switch (tag[0]) {
2923 case MSG_SIMPLE_TAG:
2924 rc = TASK_TAG_SIMPLE;
2925 break;
2926 case MSG_HEAD_TAG:
2927 rc = TASK_TAG_QUEUE_HEAD;
2928 break;
2929 case MSG_ORDERED_TAG:
2930 rc = TASK_TAG_ORDERED;
2931 break;
2932 };
2933 }
2934
2935 return rc;
2936}
2937
2938
2939/**
2940 * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
2941 * @cmd: pmcraid command struct
2942 * @sgcount: count of scatter-gather elements
2943 *
2944 * Return value
2945 * returns pointer pmcraid_ioadl_desc, initialized to point to internal
2946 * or external IOADLs
2947 */
2948struct pmcraid_ioadl_desc *
2949pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
2950{
2951 struct pmcraid_ioadl_desc *ioadl;
2952 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2953 int ioadl_count = 0;
2954
2955 if (ioarcb->add_cmd_param_length)
2956 ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
2957 ioarcb->ioadl_length =
2958 sizeof(struct pmcraid_ioadl_desc) * sgcount;
2959
2960 if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
2961 /* external ioadls start at offset 0x80 from control_block
2962 * structure, re-using 24 out of 27 ioadls part of IOARCB.
2963 * It is necessary to indicate to firmware that driver is
2964 * using ioadls to be treated as external to IOARCB.
2965 */
2966 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
2967 ioarcb->ioadl_bus_addr =
2968 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2969 offsetof(struct pmcraid_ioarcb,
2970 add_data.u.ioadl[3]));
2971 ioadl = &ioarcb->add_data.u.ioadl[3];
2972 } else {
2973 ioarcb->ioadl_bus_addr =
2974 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2975 offsetof(struct pmcraid_ioarcb,
2976 add_data.u.ioadl[ioadl_count]));
2977
2978 ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
2979 ioarcb->ioarcb_bus_addr |=
2980 DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
2981 }
2982
2983 return ioadl;
2984}
2985
2986/**
2987 * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
2988 * @pinstance: pointer to adapter instance structure
2989 * @cmd: pmcraid command struct
2990 *
2991 * This function is invoked by queuecommand entry point while sending a command
2992 * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
2993 *
2994 * Return value:
2995 * 0 on success or -1 on failure
2996 */
2997static int pmcraid_build_ioadl(
2998 struct pmcraid_instance *pinstance,
2999 struct pmcraid_cmd *cmd
3000)
3001{
3002 int i, nseg;
3003 struct scatterlist *sglist;
3004
3005 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
3006 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
3007 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
3008
3009 u32 length = scsi_bufflen(scsi_cmd);
3010
3011 if (!length)
3012 return 0;
3013
3014 nseg = scsi_dma_map(scsi_cmd);
3015
3016 if (nseg < 0) {
34876402 3017 scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
89a36810
AR
3018 return -1;
3019 } else if (nseg > PMCRAID_MAX_IOADLS) {
3020 scsi_dma_unmap(scsi_cmd);
34876402 3021 scmd_printk(KERN_ERR, scsi_cmd,
89a36810
AR
3022 "sg count is (%d) more than allowed!\n", nseg);
3023 return -1;
3024 }
3025
3026 /* Initialize IOARCB data transfer length fields */
3027 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
3028 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
3029
3030 ioarcb->request_flags0 |= NO_LINK_DESCS;
3031 ioarcb->data_transfer_length = cpu_to_le32(length);
3032 ioadl = pmcraid_init_ioadls(cmd, nseg);
3033
3034 /* Initialize IOADL descriptor addresses */
3035 scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
3036 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
3037 ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
3038 ioadl[i].flags = 0;
3039 }
3040 /* setup last descriptor */
88197966 3041 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
89a36810
AR
3042
3043 return 0;
3044}
3045
3046/**
3047 * pmcraid_free_sglist - Frees an allocated SG buffer list
3048 * @sglist: scatter/gather list pointer
3049 *
3050 * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3051 *
3052 * Return value:
3053 * none
3054 */
3055static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3056{
3057 int i;
3058
3059 for (i = 0; i < sglist->num_sg; i++)
3060 __free_pages(sg_page(&(sglist->scatterlist[i])),
3061 sglist->order);
3062
3063 kfree(sglist);
3064}
3065
3066/**
3067 * pmcraid_alloc_sglist - Allocates memory for a SG list
3068 * @buflen: buffer length
3069 *
3070 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3071 * list.
3072 *
3073 * Return value
3074 * pointer to sglist / NULL on failure
3075 */
3076static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3077{
3078 struct pmcraid_sglist *sglist;
3079 struct scatterlist *scatterlist;
3080 struct page *page;
3081 int num_elem, i, j;
3082 int sg_size;
3083 int order;
3084 int bsize_elem;
3085
3086 sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
3087 order = (sg_size > 0) ? get_order(sg_size) : 0;
3088 bsize_elem = PAGE_SIZE * (1 << order);
3089
3090 /* Determine the actual number of sg entries needed */
3091 if (buflen % bsize_elem)
3092 num_elem = (buflen / bsize_elem) + 1;
3093 else
3094 num_elem = buflen / bsize_elem;
3095
3096 /* Allocate a scatter/gather list for the DMA */
3097 sglist = kzalloc(sizeof(struct pmcraid_sglist) +
3098 (sizeof(struct scatterlist) * (num_elem - 1)),
3099 GFP_KERNEL);
3100
3101 if (sglist == NULL)
3102 return NULL;
3103
3104 scatterlist = sglist->scatterlist;
3105 sg_init_table(scatterlist, num_elem);
3106 sglist->order = order;
3107 sglist->num_sg = num_elem;
3108 sg_size = buflen;
3109
3110 for (i = 0; i < num_elem; i++) {
3111 page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
3112 if (!page) {
3113 for (j = i - 1; j >= 0; j--)
3114 __free_pages(sg_page(&scatterlist[j]), order);
3115 kfree(sglist);
3116 return NULL;
3117 }
3118
3119 sg_set_page(&scatterlist[i], page,
3120 sg_size < bsize_elem ? sg_size : bsize_elem, 0);
3121 sg_size -= bsize_elem;
3122 }
3123
3124 return sglist;
3125}
3126
3127/**
3128 * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
3129 * @sglist: scatter/gather list pointer
3130 * @buffer: buffer pointer
3131 * @len: buffer length
3132 * @direction: data transfer direction
3133 *
3134 * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
3135 *
3136 * Return value:
3137 * 0 on success / other on failure
3138 */
3139static int pmcraid_copy_sglist(
3140 struct pmcraid_sglist *sglist,
3141 unsigned long buffer,
3142 u32 len,
3143 int direction
3144)
3145{
3146 struct scatterlist *scatterlist;
3147 void *kaddr;
3148 int bsize_elem;
3149 int i;
3150 int rc = 0;
3151
3152 /* Determine the actual number of bytes per element */
3153 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3154
3155 scatterlist = sglist->scatterlist;
3156
3157 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3158 struct page *page = sg_page(&scatterlist[i]);
3159
3160 kaddr = kmap(page);
3161 if (direction == DMA_TO_DEVICE)
3162 rc = __copy_from_user(kaddr,
3163 (void *)buffer,
3164 bsize_elem);
3165 else
3166 rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
3167
3168 kunmap(page);
3169
3170 if (rc) {
3171 pmcraid_err("failed to copy user data into sg list\n");
3172 return -EFAULT;
3173 }
3174
3175 scatterlist[i].length = bsize_elem;
3176 }
3177
3178 if (len % bsize_elem) {
3179 struct page *page = sg_page(&scatterlist[i]);
3180
3181 kaddr = kmap(page);
3182
3183 if (direction == DMA_TO_DEVICE)
3184 rc = __copy_from_user(kaddr,
3185 (void *)buffer,
3186 len % bsize_elem);
3187 else
3188 rc = __copy_to_user((void *)buffer,
3189 kaddr,
3190 len % bsize_elem);
3191
3192 kunmap(page);
3193
3194 scatterlist[i].length = len % bsize_elem;
3195 }
3196
3197 if (rc) {
3198 pmcraid_err("failed to copy user data into sg list\n");
3199 rc = -EFAULT;
3200 }
3201
3202 return rc;
3203}
3204
3205/**
3206 * pmcraid_queuecommand - Queue a mid-layer request
3207 * @scsi_cmd: scsi command struct
3208 * @done: done function
3209 *
3210 * This function queues a request generated by the mid-layer. Midlayer calls
3211 * this routine within host->lock. Some of the functions called by queuecommand
3212 * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
3213 *
3214 * Return value:
3215 * 0 on success
3216 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3217 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3218 */
3219static int pmcraid_queuecommand(
3220 struct scsi_cmnd *scsi_cmd,
3221 void (*done) (struct scsi_cmnd *)
3222)
3223{
3224 struct pmcraid_instance *pinstance;
3225 struct pmcraid_resource_entry *res;
3226 struct pmcraid_ioarcb *ioarcb;
3227 struct pmcraid_cmd *cmd;
3228 int rc = 0;
3229
3230 pinstance =
3231 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3232
3233 scsi_cmd->scsi_done = done;
3234 res = scsi_cmd->device->hostdata;
3235 scsi_cmd->result = (DID_OK << 16);
3236
3237 /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
3238 * the command
3239 */
3240 if (pinstance->ioa_state == IOA_STATE_DEAD) {
3241 pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
3242 scsi_cmd->result = (DID_NO_CONNECT << 16);
3243 scsi_cmd->scsi_done(scsi_cmd);
3244 return 0;
3245 }
3246
3247 /* If IOA reset is in progress, can't queue the commands */
3248 if (pinstance->ioa_reset_in_progress)
3249 return SCSI_MLQUEUE_HOST_BUSY;
3250
3251 /* initialize the command and IOARCB to be sent to IOA */
3252 cmd = pmcraid_get_free_cmd(pinstance);
3253
3254 if (cmd == NULL) {
3255 pmcraid_err("free command block is not available\n");
3256 return SCSI_MLQUEUE_HOST_BUSY;
3257 }
3258
3259 cmd->scsi_cmd = scsi_cmd;
3260 ioarcb = &(cmd->ioa_cb->ioarcb);
3261 memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3262 ioarcb->resource_handle = res->cfg_entry.resource_handle;
3263 ioarcb->request_type = REQ_TYPE_SCSI;
3264
3265 cmd->cmd_done = pmcraid_io_done;
3266
3267 if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
3268 if (scsi_cmd->underflow == 0)
3269 ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
3270
3271 if (res->sync_reqd) {
3272 ioarcb->request_flags0 |= SYNC_COMPLETE;
3273 res->sync_reqd = 0;
3274 }
3275
3276 ioarcb->request_flags0 |= NO_LINK_DESCS;
3277 ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
3278
3279 if (RES_IS_GSCSI(res->cfg_entry))
3280 ioarcb->request_flags1 |= DELAY_AFTER_RESET;
3281 }
3282
3283 rc = pmcraid_build_ioadl(pinstance, cmd);
3284
3285 pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
3286 le32_to_cpu(ioarcb->response_handle) >> 2,
3287 scsi_cmd->cmnd[0], pinstance->host->unique_id,
3288 RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3289 PMCRAID_PHYS_BUS_ID,
3290 RES_IS_VSET(res->cfg_entry) ?
3291 res->cfg_entry.unique_flags1 :
3292 RES_TARGET(res->cfg_entry.resource_address),
3293 RES_LUN(res->cfg_entry.resource_address));
3294
3295 if (likely(rc == 0)) {
3296 _pmcraid_fire_command(cmd);
3297 } else {
3298 pmcraid_err("queuecommand could not build ioadl\n");
3299 pmcraid_return_cmd(cmd);
3300 rc = SCSI_MLQUEUE_HOST_BUSY;
3301 }
3302
3303 return rc;
3304}
3305
3306/**
3307 * pmcraid_open -char node "open" entry, allowed only users with admin access
3308 */
3309static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3310{
3311 struct pmcraid_instance *pinstance;
3312
3313 if (!capable(CAP_SYS_ADMIN))
3314 return -EACCES;
3315
3316 /* Populate adapter instance * pointer for use by ioctl */
3317 pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
3318 filep->private_data = pinstance;
3319
3320 return 0;
3321}
3322
3323/**
3324 * pmcraid_release - char node "release" entry point
3325 */
3326static int pmcraid_chr_release(struct inode *inode, struct file *filep)
3327{
3328 struct pmcraid_instance *pinstance =
3329 ((struct pmcraid_instance *)filep->private_data);
3330
3331 filep->private_data = NULL;
3332 fasync_helper(-1, filep, 0, &pinstance->aen_queue);
3333
3334 return 0;
3335}
3336
3337/**
3338 * pmcraid_fasync - Async notifier registration from applications
3339 *
3340 * This function adds the calling process to a driver global queue. When an
3341 * event occurs, SIGIO will be sent to all processes in this queue.
3342 */
3343static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3344{
3345 struct pmcraid_instance *pinstance;
3346 int rc;
3347
3348 pinstance = (struct pmcraid_instance *)filep->private_data;
3349 mutex_lock(&pinstance->aen_queue_lock);
3350 rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3351 mutex_unlock(&pinstance->aen_queue_lock);
3352
3353 return rc;
3354}
3355
3356
3357/**
3358 * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
3359 * commands sent over IOCTL interface
3360 *
3361 * @cmd : pointer to struct pmcraid_cmd
3362 * @buflen : length of the request buffer
3363 * @direction : data transfer direction
3364 *
3365 * Return value
af901ca1 3366 * 0 on success, non-zero error code on failure
89a36810
AR
3367 */
3368static int pmcraid_build_passthrough_ioadls(
3369 struct pmcraid_cmd *cmd,
3370 int buflen,
3371 int direction
3372)
3373{
3374 struct pmcraid_sglist *sglist = NULL;
3375 struct scatterlist *sg = NULL;
3376 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3377 struct pmcraid_ioadl_desc *ioadl;
3378 int i;
3379
3380 sglist = pmcraid_alloc_sglist(buflen);
3381
3382 if (!sglist) {
3383 pmcraid_err("can't allocate memory for passthrough SGls\n");
3384 return -ENOMEM;
3385 }
3386
3387 sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
3388 sglist->scatterlist,
3389 sglist->num_sg, direction);
3390
3391 if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
3392 dev_err(&cmd->drv_inst->pdev->dev,
3393 "Failed to map passthrough buffer!\n");
3394 pmcraid_free_sglist(sglist);
3395 return -EIO;
3396 }
3397
3398 cmd->sglist = sglist;
3399 ioarcb->request_flags0 |= NO_LINK_DESCS;
3400
3401 ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
3402
3403 /* Initialize IOADL descriptor addresses */
3404 for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
3405 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
3406 ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
3407 ioadl[i].flags = 0;
3408 }
3409
3410 /* setup the last descriptor */
88197966 3411 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
89a36810
AR
3412
3413 return 0;
3414}
3415
3416
3417/**
3418 * pmcraid_release_passthrough_ioadls - release passthrough ioadls
3419 *
3420 * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
3421 * @buflen: size of the request buffer
3422 * @direction: data transfer direction
3423 *
3424 * Return value
af901ca1 3425 * 0 on success, non-zero error code on failure
89a36810
AR
3426 */
3427static void pmcraid_release_passthrough_ioadls(
3428 struct pmcraid_cmd *cmd,
3429 int buflen,
3430 int direction
3431)
3432{
3433 struct pmcraid_sglist *sglist = cmd->sglist;
3434
3435 if (buflen > 0) {
3436 pci_unmap_sg(cmd->drv_inst->pdev,
3437 sglist->scatterlist,
3438 sglist->num_sg,
3439 direction);
3440 pmcraid_free_sglist(sglist);
3441 cmd->sglist = NULL;
3442 }
3443}
3444
3445/**
3446 * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
3447 *
3448 * @pinstance: pointer to adapter instance structure
3449 * @cmd: ioctl code
3450 * @arg: pointer to pmcraid_passthrough_buffer user buffer
3451 *
3452 * Return value
af901ca1 3453 * 0 on success, non-zero error code on failure
89a36810
AR
3454 */
3455static long pmcraid_ioctl_passthrough(
3456 struct pmcraid_instance *pinstance,
3457 unsigned int ioctl_cmd,
3458 unsigned int buflen,
3459 unsigned long arg
3460)
3461{
3462 struct pmcraid_passthrough_ioctl_buffer *buffer;
3463 struct pmcraid_ioarcb *ioarcb;
3464 struct pmcraid_cmd *cmd;
3465 struct pmcraid_cmd *cancel_cmd;
3466 unsigned long request_buffer;
3467 unsigned long request_offset;
3468 unsigned long lock_flags;
3469 int request_size;
3470 int buffer_size;
3471 u8 access, direction;
3472 int rc = 0;
3473
3474 /* If IOA reset is in progress, wait 10 secs for reset to complete */
3475 if (pinstance->ioa_reset_in_progress) {
3476 rc = wait_event_interruptible_timeout(
3477 pinstance->reset_wait_q,
3478 !pinstance->ioa_reset_in_progress,
3479 msecs_to_jiffies(10000));
3480
3481 if (!rc)
3482 return -ETIMEDOUT;
3483 else if (rc < 0)
3484 return -ERESTARTSYS;
3485 }
3486
3487 /* If adapter is not in operational state, return error */
3488 if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
3489 pmcraid_err("IOA is not operational\n");
3490 return -ENOTTY;
3491 }
3492
3493 buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
3494 buffer = kmalloc(buffer_size, GFP_KERNEL);
3495
3496 if (!buffer) {
3497 pmcraid_err("no memory for passthrough buffer\n");
3498 return -ENOMEM;
3499 }
3500
3501 request_offset =
3502 offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
3503
3504 request_buffer = arg + request_offset;
3505
3506 rc = __copy_from_user(buffer,
3507 (struct pmcraid_passthrough_ioctl_buffer *) arg,
3508 sizeof(struct pmcraid_passthrough_ioctl_buffer));
3509 if (rc) {
3510 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3511 rc = -EFAULT;
3512 goto out_free_buffer;
3513 }
3514
3515 request_size = buffer->ioarcb.data_transfer_length;
3516
3517 if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
3518 access = VERIFY_READ;
3519 direction = DMA_TO_DEVICE;
3520 } else {
3521 access = VERIFY_WRITE;
3522 direction = DMA_FROM_DEVICE;
3523 }
3524
3525 if (request_size > 0) {
3526 rc = access_ok(access, arg, request_offset + request_size);
3527
3528 if (!rc) {
3529 rc = -EFAULT;
3530 goto out_free_buffer;
3531 }
3532 }
3533
3534 /* check if we have any additional command parameters */
3535 if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
3536 rc = -EINVAL;
3537 goto out_free_buffer;
3538 }
3539
3540 cmd = pmcraid_get_free_cmd(pinstance);
3541
3542 if (!cmd) {
3543 pmcraid_err("free command block is not available\n");
3544 rc = -ENOMEM;
3545 goto out_free_buffer;
3546 }
3547
3548 cmd->scsi_cmd = NULL;
3549 ioarcb = &(cmd->ioa_cb->ioarcb);
3550
3551 /* Copy the user-provided IOARCB stuff field by field */
3552 ioarcb->resource_handle = buffer->ioarcb.resource_handle;
3553 ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
3554 ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
3555 ioarcb->request_type = buffer->ioarcb.request_type;
3556 ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
3557 ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
3558 memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
3559
3560 if (buffer->ioarcb.add_cmd_param_length) {
3561 ioarcb->add_cmd_param_length =
3562 buffer->ioarcb.add_cmd_param_length;
3563 ioarcb->add_cmd_param_offset =
3564 buffer->ioarcb.add_cmd_param_offset;
3565 memcpy(ioarcb->add_data.u.add_cmd_params,
3566 buffer->ioarcb.add_data.u.add_cmd_params,
3567 buffer->ioarcb.add_cmd_param_length);
3568 }
3569
3570 if (request_size) {
3571 rc = pmcraid_build_passthrough_ioadls(cmd,
3572 request_size,
3573 direction);
3574 if (rc) {
3575 pmcraid_err("couldn't build passthrough ioadls\n");
3576 goto out_free_buffer;
3577 }
3578 }
3579
3580 /* If data is being written into the device, copy the data from user
3581 * buffers
3582 */
3583 if (direction == DMA_TO_DEVICE && request_size > 0) {
3584 rc = pmcraid_copy_sglist(cmd->sglist,
3585 request_buffer,
3586 request_size,
3587 direction);
3588 if (rc) {
3589 pmcraid_err("failed to copy user buffer\n");
3590 goto out_free_sglist;
3591 }
3592 }
3593
3594 /* passthrough ioctl is a blocking command so, put the user to sleep
3595 * until timeout. Note that a timeout value of 0 means, do timeout.
3596 */
3597 cmd->cmd_done = pmcraid_internal_done;
3598 init_completion(&cmd->wait_for_completion);
3599 cmd->completion_req = 1;
3600
3601 pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
3602 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3603 cmd->ioa_cb->ioarcb.cdb[0],
3604 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
3605
3606 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3607 _pmcraid_fire_command(cmd);
3608 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3609
3610 /* If command timeout is specified put caller to wait till that time,
3611 * otherwise it would be blocking wait. If command gets timed out, it
3612 * will be aborted.
3613 */
3614 if (buffer->ioarcb.cmd_timeout == 0) {
3615 wait_for_completion(&cmd->wait_for_completion);
3616 } else if (!wait_for_completion_timeout(
3617 &cmd->wait_for_completion,
3618 msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
3619
3620 pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
3621 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
3622 cmd->ioa_cb->ioarcb.cdb[0]);
3623
3624 rc = -ETIMEDOUT;
3625 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3626 cancel_cmd = pmcraid_abort_cmd(cmd);
3627 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3628
3629 if (cancel_cmd) {
3630 wait_for_completion(&cancel_cmd->wait_for_completion);
3631 pmcraid_return_cmd(cancel_cmd);
3632 }
3633
3634 goto out_free_sglist;
3635 }
3636
3637 /* If the command failed for any reason, copy entire IOASA buffer and
3638 * return IOCTL success. If copying IOASA to user-buffer fails, return
3639 * EFAULT
3640 */
3641 if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) {
3642
3643 void *ioasa =
3644 (void *)(arg +
3645 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
3646
3647 pmcraid_info("command failed with %x\n",
3648 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
3649 if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3650 sizeof(struct pmcraid_ioasa))) {
3651 pmcraid_err("failed to copy ioasa buffer to user\n");
3652 rc = -EFAULT;
3653 }
3654 }
3655 /* If the data transfer was from device, copy the data onto user
3656 * buffers
3657 */
3658 else if (direction == DMA_FROM_DEVICE && request_size > 0) {
3659 rc = pmcraid_copy_sglist(cmd->sglist,
3660 request_buffer,
3661 request_size,
3662 direction);
3663 if (rc) {
3664 pmcraid_err("failed to copy user buffer\n");
3665 rc = -EFAULT;
3666 }
3667 }
3668
3669out_free_sglist:
3670 pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3671 pmcraid_return_cmd(cmd);
3672
3673out_free_buffer:
3674 kfree(buffer);
3675
3676 return rc;
3677}
3678
3679
3680
3681
3682/**
3683 * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
3684 *
3685 * @pinstance: pointer to adapter instance structure
3686 * @cmd: ioctl command passed in
3687 * @buflen: length of user_buffer
3688 * @user_buffer: user buffer pointer
3689 *
3690 * Return Value
3691 * 0 in case of success, otherwise appropriate error code
3692 */
3693static long pmcraid_ioctl_driver(
3694 struct pmcraid_instance *pinstance,
3695 unsigned int cmd,
3696 unsigned int buflen,
3697 void __user *user_buffer
3698)
3699{
3700 int rc = -ENOSYS;
3701
3702 if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
3703 pmcraid_err("ioctl_driver: access fault in request buffer \n");
3704 return -EFAULT;
3705 }
3706
3707 switch (cmd) {
3708 case PMCRAID_IOCTL_RESET_ADAPTER:
3709 pmcraid_reset_bringup(pinstance);
3710 rc = 0;
3711 break;
3712
3713 default:
3714 break;
3715 }
3716
3717 return rc;
3718}
3719
3720/**
3721 * pmcraid_check_ioctl_buffer - check for proper access to user buffer
3722 *
3723 * @cmd: ioctl command
3724 * @arg: user buffer
3725 * @hdr: pointer to kernel memory for pmcraid_ioctl_header
3726 *
3727 * Return Value
3728 * negetive error code if there are access issues, otherwise zero.
3729 * Upon success, returns ioctl header copied out of user buffer.
3730 */
3731
3732static int pmcraid_check_ioctl_buffer(
3733 int cmd,
3734 void __user *arg,
3735 struct pmcraid_ioctl_header *hdr
3736)
3737{
3738 int rc = 0;
3739 int access = VERIFY_READ;
3740
3741 if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
3742 pmcraid_err("couldn't copy ioctl header from user buffer\n");
3743 return -EFAULT;
3744 }
3745
3746 /* check for valid driver signature */
3747 rc = memcmp(hdr->signature,
3748 PMCRAID_IOCTL_SIGNATURE,
3749 sizeof(hdr->signature));
3750 if (rc) {
3751 pmcraid_err("signature verification failed\n");
3752 return -EINVAL;
3753 }
3754
3755 /* buffer length can't be negetive */
3756 if (hdr->buffer_length < 0) {
3757 pmcraid_err("ioctl: invalid buffer length specified\n");
3758 return -EINVAL;
3759 }
3760
3761 /* check for appropriate buffer access */
3762 if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
3763 access = VERIFY_WRITE;
3764
3765 rc = access_ok(access,
3766 (arg + sizeof(struct pmcraid_ioctl_header)),
3767 hdr->buffer_length);
3768 if (!rc) {
3769 pmcraid_err("access failed for user buffer of size %d\n",
3770 hdr->buffer_length);
3771 return -EFAULT;
3772 }
3773
3774 return 0;
3775}
3776
3777/**
3778 * pmcraid_ioctl - char node ioctl entry point
3779 */
3780static long pmcraid_chr_ioctl(
3781 struct file *filep,
3782 unsigned int cmd,
3783 unsigned long arg
3784)
3785{
3786 struct pmcraid_instance *pinstance = NULL;
3787 struct pmcraid_ioctl_header *hdr = NULL;
3788 int retval = -ENOTTY;
3789
3790 hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
3791
3792 if (!hdr) {
3793 pmcraid_err("faile to allocate memory for ioctl header\n");
3794 return -ENOMEM;
3795 }
3796
3797 retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
3798
3799 if (retval) {
3800 pmcraid_info("chr_ioctl: header check failed\n");
3801 kfree(hdr);
3802 return retval;
3803 }
3804
3805 pinstance = (struct pmcraid_instance *)filep->private_data;
3806
3807 if (!pinstance) {
3808 pmcraid_info("adapter instance is not found\n");
3809 kfree(hdr);
3810 return -ENOTTY;
3811 }
3812
3813 switch (_IOC_TYPE(cmd)) {
3814
3815 case PMCRAID_PASSTHROUGH_IOCTL:
3816 /* If ioctl code is to download microcode, we need to block
3817 * mid-layer requests.
3818 */
3819 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3820 scsi_block_requests(pinstance->host);
3821
3822 retval = pmcraid_ioctl_passthrough(pinstance,
3823 cmd,
3824 hdr->buffer_length,
3825 arg);
3826
3827 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3828 scsi_unblock_requests(pinstance->host);
3829 break;
3830
3831 case PMCRAID_DRIVER_IOCTL:
3832 arg += sizeof(struct pmcraid_ioctl_header);
3833 retval = pmcraid_ioctl_driver(pinstance,
3834 cmd,
3835 hdr->buffer_length,
3836 (void __user *)arg);
3837 break;
3838
3839 default:
3840 retval = -ENOTTY;
3841 break;
3842 }
3843
3844 kfree(hdr);
3845
3846 return retval;
3847}
3848
3849/**
3850 * File operations structure for management interface
3851 */
3852static const struct file_operations pmcraid_fops = {
3853 .owner = THIS_MODULE,
3854 .open = pmcraid_chr_open,
3855 .release = pmcraid_chr_release,
3856 .fasync = pmcraid_chr_fasync,
3857 .unlocked_ioctl = pmcraid_chr_ioctl,
3858#ifdef CONFIG_COMPAT
3859 .compat_ioctl = pmcraid_chr_ioctl,
3860#endif
3861};
3862
3863
3864
3865
3866/**
3867 * pmcraid_show_log_level - Display adapter's error logging level
3868 * @dev: class device struct
3869 * @buf: buffer
3870 *
3871 * Return value:
3872 * number of bytes printed to buffer
3873 */
3874static ssize_t pmcraid_show_log_level(
3875 struct device *dev,
3876 struct device_attribute *attr,
3877 char *buf)
3878{
3879 struct Scsi_Host *shost = class_to_shost(dev);
3880 struct pmcraid_instance *pinstance =
3881 (struct pmcraid_instance *)shost->hostdata;
3882 return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
3883}
3884
3885/**
3886 * pmcraid_store_log_level - Change the adapter's error logging level
3887 * @dev: class device struct
3888 * @buf: buffer
3889 * @count: not used
3890 *
3891 * Return value:
3892 * number of bytes printed to buffer
3893 */
3894static ssize_t pmcraid_store_log_level(
3895 struct device *dev,
3896 struct device_attribute *attr,
3897 const char *buf,
3898 size_t count
3899)
3900{
3901 struct Scsi_Host *shost;
3902 struct pmcraid_instance *pinstance;
3903 unsigned long val;
3904
3905 if (strict_strtoul(buf, 10, &val))
3906 return -EINVAL;
3907 /* log-level should be from 0 to 2 */
3908 if (val > 2)
3909 return -EINVAL;
3910
3911 shost = class_to_shost(dev);
3912 pinstance = (struct pmcraid_instance *)shost->hostdata;
3913 pinstance->current_log_level = val;
3914
3915 return strlen(buf);
3916}
3917
3918static struct device_attribute pmcraid_log_level_attr = {
3919 .attr = {
3920 .name = "log_level",
3921 .mode = S_IRUGO | S_IWUSR,
3922 },
3923 .show = pmcraid_show_log_level,
3924 .store = pmcraid_store_log_level,
3925};
3926
3927/**
3928 * pmcraid_show_drv_version - Display driver version
3929 * @dev: class device struct
3930 * @buf: buffer
3931 *
3932 * Return value:
3933 * number of bytes printed to buffer
3934 */
3935static ssize_t pmcraid_show_drv_version(
3936 struct device *dev,
3937 struct device_attribute *attr,
3938 char *buf
3939)
3940{
3941 return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n",
3942 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
3943}
3944
3945static struct device_attribute pmcraid_driver_version_attr = {
3946 .attr = {
3947 .name = "drv_version",
3948 .mode = S_IRUGO,
3949 },
3950 .show = pmcraid_show_drv_version,
3951};
3952
3953/**
3954 * pmcraid_show_io_adapter_id - Display driver assigned adapter id
3955 * @dev: class device struct
3956 * @buf: buffer
3957 *
3958 * Return value:
3959 * number of bytes printed to buffer
3960 */
3961static ssize_t pmcraid_show_adapter_id(
3962 struct device *dev,
3963 struct device_attribute *attr,
3964 char *buf
3965)
3966{
3967 struct Scsi_Host *shost = class_to_shost(dev);
3968 struct pmcraid_instance *pinstance =
3969 (struct pmcraid_instance *)shost->hostdata;
3970 u32 adapter_id = (pinstance->pdev->bus->number << 8) |
3971 pinstance->pdev->devfn;
3972 u32 aen_group = pmcraid_event_family.id;
3973
3974 return snprintf(buf, PAGE_SIZE,
3975 "adapter id: %d\nminor: %d\naen group: %d\n",
3976 adapter_id, MINOR(pinstance->cdev.dev), aen_group);
3977}
3978
3979static struct device_attribute pmcraid_adapter_id_attr = {
3980 .attr = {
3981 .name = "adapter_id",
3982 .mode = S_IRUGO | S_IWUSR,
3983 },
3984 .show = pmcraid_show_adapter_id,
3985};
3986
3987static struct device_attribute *pmcraid_host_attrs[] = {
3988 &pmcraid_log_level_attr,
3989 &pmcraid_driver_version_attr,
3990 &pmcraid_adapter_id_attr,
3991 NULL,
3992};
3993
3994
3995/* host template structure for pmcraid driver */
3996static struct scsi_host_template pmcraid_host_template = {
3997 .module = THIS_MODULE,
3998 .name = PMCRAID_DRIVER_NAME,
3999 .queuecommand = pmcraid_queuecommand,
4000 .eh_abort_handler = pmcraid_eh_abort_handler,
4001 .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
4002 .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
4003 .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
4004 .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
4005
4006 .slave_alloc = pmcraid_slave_alloc,
4007 .slave_configure = pmcraid_slave_configure,
4008 .slave_destroy = pmcraid_slave_destroy,
4009 .change_queue_depth = pmcraid_change_queue_depth,
4010 .change_queue_type = pmcraid_change_queue_type,
4011 .can_queue = PMCRAID_MAX_IO_CMD,
4012 .this_id = -1,
4013 .sg_tablesize = PMCRAID_MAX_IOADLS,
4014 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4015 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4016 .use_clustering = ENABLE_CLUSTERING,
4017 .shost_attrs = pmcraid_host_attrs,
4018 .proc_name = PMCRAID_DRIVER_NAME
4019};
4020
4021/**
4022 * pmcraid_isr_common - Common interrupt handler routine
4023 *
4024 * @pinstance: pointer to adapter instance
4025 * @intrs: active interrupts (contents of ioa_host_interrupt register)
4026 * @hrrq_id: Host RRQ index
4027 *
4028 * Return Value
4029 * none
4030 */
4031static void pmcraid_isr_common(
4032 struct pmcraid_instance *pinstance,
4033 u32 intrs,
4034 int hrrq_id
4035)
4036{
4037 u32 intrs_clear =
4038 (intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs
4039 : INTRS_HRRQ_VALID;
4040 iowrite32(intrs_clear,
4041 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4042 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4043
4044 /* hrrq valid bit was set, schedule tasklet to handle the response */
4045 if (intrs_clear == INTRS_HRRQ_VALID)
4046 tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4047}
4048
4049/**
4050 * pmcraid_isr - implements interrupt handling routine
4051 *
4052 * @irq: interrupt vector number
4053 * @dev_id: pointer hrrq_vector
4054 *
4055 * Return Value
4056 * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4057 */
4058static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4059{
4060 struct pmcraid_isr_param *hrrq_vector;
4061 struct pmcraid_instance *pinstance;
4062 unsigned long lock_flags;
4063 u32 intrs;
4064
4065 /* In case of legacy interrupt mode where interrupts are shared across
4066 * isrs, it may be possible that the current interrupt is not from IOA
4067 */
4068 if (!dev_id) {
4069 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4070 return IRQ_NONE;
4071 }
4072
4073 hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4074 pinstance = hrrq_vector->drv_inst;
4075
4076 /* Acquire the lock (currently host_lock) while processing interrupts.
4077 * This interval is small as most of the response processing is done by
4078 * tasklet without the lock.
4079 */
4080 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4081 intrs = pmcraid_read_interrupts(pinstance);
4082
4083 if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) {
4084 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4085 return IRQ_NONE;
4086 }
4087
4088 /* Any error interrupts including unit_check, initiate IOA reset.
4089 * In case of unit check indicate to reset_sequence that IOA unit
4090 * checked and prepare for a dump during reset sequence
4091 */
4092 if (intrs & PMCRAID_ERROR_INTERRUPTS) {
4093
4094 if (intrs & INTRS_IOA_UNIT_CHECK)
4095 pinstance->ioa_unit_check = 1;
4096
4097 iowrite32(intrs,
4098 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4099 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4100 intrs);
4101 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4102 pmcraid_initiate_reset(pinstance);
4103 } else {
4104 pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id);
4105 }
4106
4107 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4108
4109 return IRQ_HANDLED;
4110}
4111
4112
4113/**
4114 * pmcraid_worker_function - worker thread function
4115 *
4116 * @workp: pointer to struct work queue
4117 *
4118 * Return Value
4119 * None
4120 */
4121
4122static void pmcraid_worker_function(struct work_struct *workp)
4123{
4124 struct pmcraid_instance *pinstance;
4125 struct pmcraid_resource_entry *res;
4126 struct pmcraid_resource_entry *temp;
4127 struct scsi_device *sdev;
4128 unsigned long lock_flags;
4129 unsigned long host_lock_flags;
4130 u8 bus, target, lun;
4131
4132 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
4133 /* add resources only after host is added into system */
4134 if (!atomic_read(&pinstance->expose_resources))
4135 return;
4136
4137 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4138 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4139
4140 if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
4141 sdev = res->scsi_dev;
4142
4143 /* host_lock must be held before calling
4144 * scsi_device_get
4145 */
4146 spin_lock_irqsave(pinstance->host->host_lock,
4147 host_lock_flags);
4148 if (!scsi_device_get(sdev)) {
4149 spin_unlock_irqrestore(
4150 pinstance->host->host_lock,
4151 host_lock_flags);
4152 pmcraid_info("deleting %x from midlayer\n",
4153 res->cfg_entry.resource_address);
4154 list_move_tail(&res->queue,
4155 &pinstance->free_res_q);
4156 spin_unlock_irqrestore(
4157 &pinstance->resource_lock,
4158 lock_flags);
4159 scsi_remove_device(sdev);
4160 scsi_device_put(sdev);
4161 spin_lock_irqsave(&pinstance->resource_lock,
4162 lock_flags);
4163 res->change_detected = 0;
4164 } else {
4165 spin_unlock_irqrestore(
4166 pinstance->host->host_lock,
4167 host_lock_flags);
4168 }
4169 }
4170 }
4171
4172 list_for_each_entry(res, &pinstance->used_res_q, queue) {
4173
4174 if (res->change_detected == RES_CHANGE_ADD) {
4175
4176 if (!pmcraid_expose_resource(&res->cfg_entry))
4177 continue;
4178
4179 if (RES_IS_VSET(res->cfg_entry)) {
4180 bus = PMCRAID_VSET_BUS_ID;
4181 target = res->cfg_entry.unique_flags1;
4182 lun = PMCRAID_VSET_LUN_ID;
4183 } else {
4184 bus = PMCRAID_PHYS_BUS_ID;
4185 target =
4186 RES_TARGET(
4187 res->cfg_entry.resource_address);
4188 lun = RES_LUN(res->cfg_entry.resource_address);
4189 }
4190
4191 res->change_detected = 0;
4192 spin_unlock_irqrestore(&pinstance->resource_lock,
4193 lock_flags);
4194 scsi_add_device(pinstance->host, bus, target, lun);
4195 spin_lock_irqsave(&pinstance->resource_lock,
4196 lock_flags);
4197 }
4198 }
4199
4200 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4201}
4202
4203/**
4204 * pmcraid_tasklet_function - Tasklet function
4205 *
4206 * @instance: pointer to msix param structure
4207 *
4208 * Return Value
4209 * None
4210 */
4211void pmcraid_tasklet_function(unsigned long instance)
4212{
4213 struct pmcraid_isr_param *hrrq_vector;
4214 struct pmcraid_instance *pinstance;
4215 unsigned long hrrq_lock_flags;
4216 unsigned long pending_lock_flags;
4217 unsigned long host_lock_flags;
4218 spinlock_t *lockp; /* hrrq buffer lock */
4219 int id;
4220 u32 intrs;
4221 __le32 resp;
4222
4223 hrrq_vector = (struct pmcraid_isr_param *)instance;
4224 pinstance = hrrq_vector->drv_inst;
4225 id = hrrq_vector->hrrq_id;
4226 lockp = &(pinstance->hrrq_lock[id]);
4227 intrs = pmcraid_read_interrupts(pinstance);
4228
4229 /* If interrupts was as part of the ioa initialization, clear and mask
4230 * it. Delete the timer and wakeup the reset engine to proceed with
4231 * reset sequence
4232 */
4233 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4234 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4235 pinstance->int_regs.ioa_host_interrupt_mask_reg);
4236 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4237 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4238
4239 if (pinstance->reset_cmd != NULL) {
4240 del_timer(&pinstance->reset_cmd->timer);
4241 spin_lock_irqsave(pinstance->host->host_lock,
4242 host_lock_flags);
4243 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
4244 spin_unlock_irqrestore(pinstance->host->host_lock,
4245 host_lock_flags);
4246 }
4247 return;
4248 }
4249
4250 /* loop through each of the commands responded by IOA. Each HRRQ buf is
4251 * protected by its own lock. Traversals must be done within this lock
4252 * as there may be multiple tasklets running on multiple CPUs. Note
4253 * that the lock is held just for picking up the response handle and
4254 * manipulating hrrq_curr/toggle_bit values.
4255 */
4256 spin_lock_irqsave(lockp, hrrq_lock_flags);
4257
4258 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4259
4260 while ((resp & HRRQ_TOGGLE_BIT) ==
4261 pinstance->host_toggle_bit[id]) {
4262
4263 int cmd_index = resp >> 2;
4264 struct pmcraid_cmd *cmd = NULL;
4265
4266 if (cmd_index < PMCRAID_MAX_CMD) {
4267 cmd = pinstance->cmd_list[cmd_index];
4268 } else {
4269 /* In case of invalid response handle, initiate IOA
4270 * reset sequence.
4271 */
4272 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4273
4274 pmcraid_err("Invalid response %d initiating reset\n",
4275 cmd_index);
4276
4277 spin_lock_irqsave(pinstance->host->host_lock,
4278 host_lock_flags);
4279 pmcraid_initiate_reset(pinstance);
4280 spin_unlock_irqrestore(pinstance->host->host_lock,
4281 host_lock_flags);
4282
4283 spin_lock_irqsave(lockp, hrrq_lock_flags);
4284 break;
4285 }
4286
4287 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4288 pinstance->hrrq_curr[id]++;
4289 } else {
4290 pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
4291 pinstance->host_toggle_bit[id] ^= 1u;
4292 }
4293
4294 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4295
4296 spin_lock_irqsave(&pinstance->pending_pool_lock,
4297 pending_lock_flags);
4298 list_del(&cmd->free_list);
4299 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
4300 pending_lock_flags);
4301 del_timer(&cmd->timer);
4302 atomic_dec(&pinstance->outstanding_cmds);
4303
4304 if (cmd->cmd_done == pmcraid_ioa_reset) {
4305 spin_lock_irqsave(pinstance->host->host_lock,
4306 host_lock_flags);
4307 cmd->cmd_done(cmd);
4308 spin_unlock_irqrestore(pinstance->host->host_lock,
4309 host_lock_flags);
4310 } else if (cmd->cmd_done != NULL) {
4311 cmd->cmd_done(cmd);
4312 }
4313 /* loop over until we are done with all responses */
4314 spin_lock_irqsave(lockp, hrrq_lock_flags);
4315 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4316 }
4317
4318 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4319}
4320
4321/**
4322 * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
4323 * @pinstance: pointer to adapter instance structure
4324 *
4325 * This routine un-registers registered interrupt handler and
4326 * also frees irqs/vectors.
4327 *
4328 * Retun Value
4329 * None
4330 */
4331static
4332void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4333{
4334 free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0]));
4335}
4336
4337/**
4338 * pmcraid_register_interrupt_handler - registers interrupt handler
4339 * @pinstance: pointer to per-adapter instance structure
4340 *
4341 * Return Value
4342 * 0 on success, non-zero error code otherwise.
4343 */
4344static int
4345pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4346{
4347 struct pci_dev *pdev = pinstance->pdev;
4348
4349 pinstance->hrrq_vector[0].hrrq_id = 0;
4350 pinstance->hrrq_vector[0].drv_inst = pinstance;
4351 pinstance->hrrq_vector[0].vector = 0;
4352 pinstance->num_hrrq = 1;
4353 return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4354 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4355}
4356
4357/**
4358 * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
4359 * @pinstance: per adapter instance structure pointer
4360 * @max_index: number of buffer blocks to release
4361 *
4362 * Return Value
4363 * None
4364 */
4365static void
4366pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
4367{
4368 int i;
4369 for (i = 0; i < max_index; i++) {
4370 kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
4371 pinstance->cmd_list[i] = NULL;
4372 }
4373 kmem_cache_destroy(pinstance->cmd_cachep);
4374 pinstance->cmd_cachep = NULL;
4375}
4376
4377/**
4378 * pmcraid_release_control_blocks - releases buffers alloced for control blocks
4379 * @pinstance: pointer to per adapter instance structure
4380 * @max_index: number of buffers (from 0 onwards) to release
4381 *
4382 * This function assumes that the command blocks for which control blocks are
4383 * linked are not released.
4384 *
4385 * Return Value
4386 * None
4387 */
4388static void
4389pmcraid_release_control_blocks(
4390 struct pmcraid_instance *pinstance,
4391 int max_index
4392)
4393{
4394 int i;
4395
4396 if (pinstance->control_pool == NULL)
4397 return;
4398
4399 for (i = 0; i < max_index; i++) {
4400 pci_pool_free(pinstance->control_pool,
4401 pinstance->cmd_list[i]->ioa_cb,
4402 pinstance->cmd_list[i]->ioa_cb_bus_addr);
4403 pinstance->cmd_list[i]->ioa_cb = NULL;
4404 pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
4405 }
4406 pci_pool_destroy(pinstance->control_pool);
4407 pinstance->control_pool = NULL;
4408}
4409
4410/**
4411 * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
4412 * @pinstance - pointer to per adapter instance structure
4413 *
4414 * Allocates memory for command blocks using kernel slab allocator.
4415 *
4416 * Return Value
4417 * 0 in case of success; -ENOMEM in case of failure
4418 */
4419static int __devinit
4420pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
4421{
4422 int i;
4423
4424 sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
4425 pinstance->host->unique_id);
4426
4427
4428 pinstance->cmd_cachep = kmem_cache_create(
4429 pinstance->cmd_pool_name,
4430 sizeof(struct pmcraid_cmd), 0,
4431 SLAB_HWCACHE_ALIGN, NULL);
4432 if (!pinstance->cmd_cachep)
4433 return -ENOMEM;
4434
4435 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4436 pinstance->cmd_list[i] =
4437 kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
4438 if (!pinstance->cmd_list[i]) {
4439 pmcraid_release_cmd_blocks(pinstance, i);
4440 return -ENOMEM;
4441 }
4442 }
4443 return 0;
4444}
4445
4446/**
4447 * pmcraid_allocate_control_blocks - allocates memory control blocks
4448 * @pinstance : pointer to per adapter instance structure
4449 *
4450 * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
4451 * and IOASAs. This is called after command blocks are already allocated.
4452 *
4453 * Return Value
4454 * 0 in case it can allocate all control blocks, otherwise -ENOMEM
4455 */
4456static int __devinit
4457pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
4458{
4459 int i;
4460
4461 sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
4462 pinstance->host->unique_id);
4463
4464 pinstance->control_pool =
4465 pci_pool_create(pinstance->ctl_pool_name,
4466 pinstance->pdev,
4467 sizeof(struct pmcraid_control_block),
4468 PMCRAID_IOARCB_ALIGNMENT, 0);
4469
4470 if (!pinstance->control_pool)
4471 return -ENOMEM;
4472
4473 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4474 pinstance->cmd_list[i]->ioa_cb =
4475 pci_pool_alloc(
4476 pinstance->control_pool,
4477 GFP_KERNEL,
4478 &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
4479
4480 if (!pinstance->cmd_list[i]->ioa_cb) {
4481 pmcraid_release_control_blocks(pinstance, i);
4482 return -ENOMEM;
4483 }
4484 memset(pinstance->cmd_list[i]->ioa_cb, 0,
4485 sizeof(struct pmcraid_control_block));
4486 }
4487 return 0;
4488}
4489
4490/**
4491 * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
4492 * @pinstance: pointer to per adapter instance structure
4493 * @maxindex: size of hrrq buffer pointer array
4494 *
4495 * Return Value
4496 * None
4497 */
4498static void
4499pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4500{
4501 int i;
4502 for (i = 0; i < maxindex; i++) {
4503
4504 pci_free_consistent(pinstance->pdev,
4505 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4506 pinstance->hrrq_start[i],
4507 pinstance->hrrq_start_bus_addr[i]);
4508
4509 /* reset pointers and toggle bit to zeros */
4510 pinstance->hrrq_start[i] = NULL;
4511 pinstance->hrrq_start_bus_addr[i] = 0;
4512 pinstance->host_toggle_bit[i] = 0;
4513 }
4514}
4515
4516/**
4517 * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
4518 * @pinstance: pointer to per adapter instance structure
4519 *
4520 * Return value
4521 * 0 hrrq buffers are allocated, -ENOMEM otherwise.
4522 */
4523static int __devinit
4524pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4525{
4526 int i;
4527 int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq;
4528
4529 for (i = 0; i < pinstance->num_hrrq; i++) {
4530 int buffer_size = HRRQ_ENTRY_SIZE * buf_count;
4531
4532 pinstance->hrrq_start[i] =
4533 pci_alloc_consistent(
4534 pinstance->pdev,
4535 buffer_size,
4536 &(pinstance->hrrq_start_bus_addr[i]));
4537
4538 if (pinstance->hrrq_start[i] == 0) {
4539 pmcraid_err("could not allocate host rrq: %d\n", i);
4540 pmcraid_release_host_rrqs(pinstance, i);
4541 return -ENOMEM;
4542 }
4543
4544 memset(pinstance->hrrq_start[i], 0, buffer_size);
4545 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4546 pinstance->hrrq_end[i] =
4547 pinstance->hrrq_start[i] + buf_count - 1;
4548 pinstance->host_toggle_bit[i] = 1;
4549 spin_lock_init(&pinstance->hrrq_lock[i]);
4550 }
4551 return 0;
4552}
4553
4554/**
4555 * pmcraid_release_hcams - release HCAM buffers
4556 *
4557 * @pinstance: pointer to per adapter instance structure
4558 *
4559 * Return value
4560 * none
4561 */
4562static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4563{
4564 if (pinstance->ccn.msg != NULL) {
4565 pci_free_consistent(pinstance->pdev,
4566 PMCRAID_AEN_HDR_SIZE +
4567 sizeof(struct pmcraid_hcam_ccn),
4568 pinstance->ccn.msg,
4569 pinstance->ccn.baddr);
4570
4571 pinstance->ccn.msg = NULL;
4572 pinstance->ccn.hcam = NULL;
4573 pinstance->ccn.baddr = 0;
4574 }
4575
4576 if (pinstance->ldn.msg != NULL) {
4577 pci_free_consistent(pinstance->pdev,
4578 PMCRAID_AEN_HDR_SIZE +
4579 sizeof(struct pmcraid_hcam_ldn),
4580 pinstance->ldn.msg,
4581 pinstance->ldn.baddr);
4582
4583 pinstance->ldn.msg = NULL;
4584 pinstance->ldn.hcam = NULL;
4585 pinstance->ldn.baddr = 0;
4586 }
4587}
4588
4589/**
4590 * pmcraid_allocate_hcams - allocates HCAM buffers
4591 * @pinstance : pointer to per adapter instance structure
4592 *
4593 * Return Value:
4594 * 0 in case of successful allocation, non-zero otherwise
4595 */
4596static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4597{
4598 pinstance->ccn.msg = pci_alloc_consistent(
4599 pinstance->pdev,
4600 PMCRAID_AEN_HDR_SIZE +
4601 sizeof(struct pmcraid_hcam_ccn),
4602 &(pinstance->ccn.baddr));
4603
4604 pinstance->ldn.msg = pci_alloc_consistent(
4605 pinstance->pdev,
4606 PMCRAID_AEN_HDR_SIZE +
4607 sizeof(struct pmcraid_hcam_ldn),
4608 &(pinstance->ldn.baddr));
4609
4610 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4611 pmcraid_release_hcams(pinstance);
4612 } else {
4613 pinstance->ccn.hcam =
4614 (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
4615 pinstance->ldn.hcam =
4616 (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
4617
4618 atomic_set(&pinstance->ccn.ignore, 0);
4619 atomic_set(&pinstance->ldn.ignore, 0);
4620 }
4621
4622 return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
4623}
4624
4625/**
4626 * pmcraid_release_config_buffers - release config.table buffers
4627 * @pinstance: pointer to per adapter instance structure
4628 *
4629 * Return Value
4630 * none
4631 */
4632static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4633{
4634 if (pinstance->cfg_table != NULL &&
4635 pinstance->cfg_table_bus_addr != 0) {
4636 pci_free_consistent(pinstance->pdev,
4637 sizeof(struct pmcraid_config_table),
4638 pinstance->cfg_table,
4639 pinstance->cfg_table_bus_addr);
4640 pinstance->cfg_table = NULL;
4641 pinstance->cfg_table_bus_addr = 0;
4642 }
4643
4644 if (pinstance->res_entries != NULL) {
4645 int i;
4646
4647 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4648 list_del(&pinstance->res_entries[i].queue);
4649 kfree(pinstance->res_entries);
4650 pinstance->res_entries = NULL;
4651 }
4652
4653 pmcraid_release_hcams(pinstance);
4654}
4655
4656/**
4657 * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
4658 * @pinstance : pointer to per adapter instance structure
4659 *
4660 * Return Value
4661 * 0 for successful allocation, -ENOMEM for any failure
4662 */
4663static int __devinit
4664pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4665{
4666 int i;
4667
4668 pinstance->res_entries =
4669 kzalloc(sizeof(struct pmcraid_resource_entry) *
4670 PMCRAID_MAX_RESOURCES, GFP_KERNEL);
4671
4672 if (NULL == pinstance->res_entries) {
4673 pmcraid_err("failed to allocate memory for resource table\n");
4674 return -ENOMEM;
4675 }
4676
4677 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4678 list_add_tail(&pinstance->res_entries[i].queue,
4679 &pinstance->free_res_q);
4680
4681 pinstance->cfg_table =
4682 pci_alloc_consistent(pinstance->pdev,
4683 sizeof(struct pmcraid_config_table),
4684 &pinstance->cfg_table_bus_addr);
4685
4686 if (NULL == pinstance->cfg_table) {
4687 pmcraid_err("couldn't alloc DMA memory for config table\n");
4688 pmcraid_release_config_buffers(pinstance);
4689 return -ENOMEM;
4690 }
4691
4692 if (pmcraid_allocate_hcams(pinstance)) {
4693 pmcraid_err("could not alloc DMA memory for HCAMS\n");
4694 pmcraid_release_config_buffers(pinstance);
4695 return -ENOMEM;
4696 }
4697
4698 return 0;
4699}
4700
4701/**
4702 * pmcraid_init_tasklets - registers tasklets for response handling
4703 *
4704 * @pinstance: pointer adapter instance structure
4705 *
4706 * Return value
4707 * none
4708 */
4709static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
4710{
4711 int i;
4712 for (i = 0; i < pinstance->num_hrrq; i++)
4713 tasklet_init(&pinstance->isr_tasklet[i],
4714 pmcraid_tasklet_function,
4715 (unsigned long)&pinstance->hrrq_vector[i]);
4716}
4717
4718/**
4719 * pmcraid_kill_tasklets - destroys tasklets registered for response handling
4720 *
4721 * @pinstance: pointer to adapter instance structure
4722 *
4723 * Return value
4724 * none
4725 */
4726static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
4727{
4728 int i;
4729 for (i = 0; i < pinstance->num_hrrq; i++)
4730 tasklet_kill(&pinstance->isr_tasklet[i]);
4731}
4732
4733/**
4734 * pmcraid_init_buffers - allocates memory and initializes various structures
4735 * @pinstance: pointer to per adapter instance structure
4736 *
4737 * This routine pre-allocates memory based on the type of block as below:
4738 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
4739 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
4740 * config-table entries : DMAable memory using pci_alloc_consistent
4741 * HostRRQs : DMAable memory, using pci_alloc_consistent
4742 *
4743 * Return Value
4744 * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
4745 */
4746static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
4747{
4748 int i;
4749
4750 if (pmcraid_allocate_host_rrqs(pinstance)) {
4751 pmcraid_err("couldn't allocate memory for %d host rrqs\n",
4752 pinstance->num_hrrq);
4753 return -ENOMEM;
4754 }
4755
4756 if (pmcraid_allocate_config_buffers(pinstance)) {
4757 pmcraid_err("couldn't allocate memory for config buffers\n");
4758 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4759 return -ENOMEM;
4760 }
4761
4762 if (pmcraid_allocate_cmd_blocks(pinstance)) {
4763 pmcraid_err("couldn't allocate memory for cmd blocks \n");
4764 pmcraid_release_config_buffers(pinstance);
4765 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4766 return -ENOMEM;
4767 }
4768
4769 if (pmcraid_allocate_control_blocks(pinstance)) {
4770 pmcraid_err("couldn't allocate memory control blocks \n");
4771 pmcraid_release_config_buffers(pinstance);
4772 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4773 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4774 return -ENOMEM;
4775 }
4776
4777 /* Initialize all the command blocks and add them to free pool. No
4778 * need to lock (free_pool_lock) as this is done in initialization
4779 * itself
4780 */
4781 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4782 struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
4783 pmcraid_init_cmdblk(cmdp, i);
4784 cmdp->drv_inst = pinstance;
4785 list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
4786 }
4787
4788 return 0;
4789}
4790
4791/**
4792 * pmcraid_reinit_buffers - resets various buffer pointers
4793 * @pinstance: pointer to adapter instance
4794 * Return value
4795 * none
4796 */
4797static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
4798{
4799 int i;
4800 int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4801
4802 for (i = 0; i < pinstance->num_hrrq; i++) {
4803 memset(pinstance->hrrq_start[i], 0, buffer_size);
4804 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4805 pinstance->hrrq_end[i] =
4806 pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4807 pinstance->host_toggle_bit[i] = 1;
4808 }
4809}
4810
4811/**
4812 * pmcraid_init_instance - initialize per instance data structure
4813 * @pdev: pointer to pci device structure
4814 * @host: pointer to Scsi_Host structure
4815 * @mapped_pci_addr: memory mapped IOA configuration registers
4816 *
4817 * Return Value
4818 * 0 on success, non-zero in case of any failure
4819 */
4820static int __devinit pmcraid_init_instance(
4821 struct pci_dev *pdev,
4822 struct Scsi_Host *host,
4823 void __iomem *mapped_pci_addr
4824)
4825{
4826 struct pmcraid_instance *pinstance =
4827 (struct pmcraid_instance *)host->hostdata;
4828
4829 pinstance->host = host;
4830 pinstance->pdev = pdev;
4831
4832 /* Initialize register addresses */
4833 pinstance->mapped_dma_addr = mapped_pci_addr;
4834
4835 /* Initialize chip-specific details */
4836 {
4837 struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
4838 struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
4839
4840 pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
4841
4842 pint_regs->ioa_host_interrupt_reg =
4843 mapped_pci_addr + chip_cfg->ioa_host_intr;
4844 pint_regs->ioa_host_interrupt_clr_reg =
4845 mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
4846 pint_regs->host_ioa_interrupt_reg =
4847 mapped_pci_addr + chip_cfg->host_ioa_intr;
4848 pint_regs->host_ioa_interrupt_clr_reg =
4849 mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
4850
4851 /* Current version of firmware exposes interrupt mask set
4852 * and mask clr registers through memory mapped bar0.
4853 */
4854 pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
4855 pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
4856 pint_regs->ioa_host_interrupt_mask_reg =
4857 mapped_pci_addr + chip_cfg->ioa_host_mask;
4858 pint_regs->ioa_host_interrupt_mask_clr_reg =
4859 mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
4860 pint_regs->global_interrupt_mask_reg =
4861 mapped_pci_addr + chip_cfg->global_intr_mask;
4862 };
4863
4864 pinstance->ioa_reset_attempts = 0;
4865 init_waitqueue_head(&pinstance->reset_wait_q);
4866
4867 atomic_set(&pinstance->outstanding_cmds, 0);
4868 atomic_set(&pinstance->expose_resources, 0);
4869
4870 INIT_LIST_HEAD(&pinstance->free_res_q);
4871 INIT_LIST_HEAD(&pinstance->used_res_q);
4872 INIT_LIST_HEAD(&pinstance->free_cmd_pool);
4873 INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
4874
4875 spin_lock_init(&pinstance->free_pool_lock);
4876 spin_lock_init(&pinstance->pending_pool_lock);
4877 spin_lock_init(&pinstance->resource_lock);
4878 mutex_init(&pinstance->aen_queue_lock);
4879
4880 /* Work-queue (Shared) for deferred processing error handling */
4881 INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
4882
4883 /* Initialize the default log_level */
4884 pinstance->current_log_level = pmcraid_log_level;
4885
4886 /* Setup variables required for reset engine */
4887 pinstance->ioa_state = IOA_STATE_UNKNOWN;
4888 pinstance->reset_cmd = NULL;
4889 return 0;
4890}
4891
4892/**
4893 * pmcraid_release_buffers - release per-adapter buffers allocated
4894 *
4895 * @pinstance: pointer to adapter soft state
4896 *
4897 * Return Value
4898 * none
4899 */
4900static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4901{
4902 pmcraid_release_config_buffers(pinstance);
4903 pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
4904 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4905 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4906
4907}
4908
4909/**
4910 * pmcraid_shutdown - shutdown adapter controller.
4911 * @pdev: pci device struct
4912 *
4913 * Issues an adapter shutdown to the card waits for its completion
4914 *
4915 * Return value
4916 * none
4917 */
4918static void pmcraid_shutdown(struct pci_dev *pdev)
4919{
4920 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4921 pmcraid_reset_bringdown(pinstance);
4922}
4923
4924
4925/**
4926 * pmcraid_get_minor - returns unused minor number from minor number bitmap
4927 */
4928static unsigned short pmcraid_get_minor(void)
4929{
4930 int minor;
4931
4932 minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
4933 __set_bit(minor, pmcraid_minor);
4934 return minor;
4935}
4936
4937/**
4938 * pmcraid_release_minor - releases given minor back to minor number bitmap
4939 */
4940static void pmcraid_release_minor(unsigned short minor)
4941{
4942 __clear_bit(minor, pmcraid_minor);
4943}
4944
4945/**
4946 * pmcraid_setup_chrdev - allocates a minor number and registers a char device
4947 *
4948 * @pinstance: pointer to adapter instance for which to register device
4949 *
4950 * Return value
4951 * 0 in case of success, otherwise non-zero
4952 */
4953static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
4954{
4955 int minor;
4956 int error;
4957
4958 minor = pmcraid_get_minor();
4959 cdev_init(&pinstance->cdev, &pmcraid_fops);
4960 pinstance->cdev.owner = THIS_MODULE;
4961
4962 error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
4963
4964 if (error)
4965 pmcraid_release_minor(minor);
4966 else
4967 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
4968 NULL, "pmcsas%u", minor);
4969 return error;
4970}
4971
4972/**
4973 * pmcraid_release_chrdev - unregisters per-adapter management interface
4974 *
4975 * @pinstance: pointer to adapter instance structure
4976 *
4977 * Return value
4978 * none
4979 */
4980static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
4981{
4982 pmcraid_release_minor(MINOR(pinstance->cdev.dev));
4983 device_destroy(pmcraid_class,
4984 MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
4985 cdev_del(&pinstance->cdev);
4986}
4987
4988/**
4989 * pmcraid_remove - IOA hot plug remove entry point
4990 * @pdev: pci device struct
4991 *
4992 * Return value
4993 * none
4994 */
4995static void __devexit pmcraid_remove(struct pci_dev *pdev)
4996{
4997 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4998
4999 /* remove the management interface (/dev file) for this device */
5000 pmcraid_release_chrdev(pinstance);
5001
5002 /* remove host template from scsi midlayer */
5003 scsi_remove_host(pinstance->host);
5004
5005 /* block requests from mid-layer */
5006 scsi_block_requests(pinstance->host);
5007
5008 /* initiate shutdown adapter */
5009 pmcraid_shutdown(pdev);
5010
5011 pmcraid_disable_interrupts(pinstance, ~0);
5012 flush_scheduled_work();
5013
5014 pmcraid_kill_tasklets(pinstance);
5015 pmcraid_unregister_interrupt_handler(pinstance);
5016 pmcraid_release_buffers(pinstance);
5017 iounmap(pinstance->mapped_dma_addr);
5018 pci_release_regions(pdev);
5019 scsi_host_put(pinstance->host);
5020 pci_disable_device(pdev);
5021
5022 return;
5023}
5024
5025#ifdef CONFIG_PM
5026/**
5027 * pmcraid_suspend - driver suspend entry point for power management
5028 * @pdev: PCI device structure
5029 * @state: PCI power state to suspend routine
5030 *
5031 * Return Value - 0 always
5032 */
5033static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
5034{
5035 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5036
5037 pmcraid_shutdown(pdev);
5038 pmcraid_disable_interrupts(pinstance, ~0);
5039 pmcraid_kill_tasklets(pinstance);
5040 pci_set_drvdata(pinstance->pdev, pinstance);
5041 pmcraid_unregister_interrupt_handler(pinstance);
5042 pci_save_state(pdev);
5043 pci_disable_device(pdev);
5044 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5045
5046 return 0;
5047}
5048
5049/**
5050 * pmcraid_resume - driver resume entry point PCI power management
5051 * @pdev: PCI device structure
5052 *
5053 * Return Value - 0 in case of success. Error code in case of any failure
5054 */
5055static int pmcraid_resume(struct pci_dev *pdev)
5056{
5057 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5058 struct Scsi_Host *host = pinstance->host;
5059 int rc;
5060 int hrrqs;
5061
5062 pci_set_power_state(pdev, PCI_D0);
5063 pci_enable_wake(pdev, PCI_D0, 0);
5064 pci_restore_state(pdev);
5065
5066 rc = pci_enable_device(pdev);
5067
5068 if (rc) {
34876402 5069 dev_err(&pdev->dev, "resume: Enable device failed\n");
89a36810
AR
5070 return rc;
5071 }
5072
5073 pci_set_master(pdev);
5074
5075 if ((sizeof(dma_addr_t) == 4) ||
5076 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5077 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5078
5079 if (rc == 0)
5080 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5081
5082 if (rc != 0) {
34876402 5083 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
89a36810
AR
5084 goto disable_device;
5085 }
5086
5087 atomic_set(&pinstance->outstanding_cmds, 0);
5088 hrrqs = pinstance->num_hrrq;
5089 rc = pmcraid_register_interrupt_handler(pinstance);
5090
5091 if (rc) {
34876402
AR
5092 dev_err(&pdev->dev,
5093 "resume: couldn't register interrupt handlers\n");
89a36810
AR
5094 rc = -ENODEV;
5095 goto release_host;
5096 }
5097
5098 pmcraid_init_tasklets(pinstance);
5099 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5100
5101 /* Start with hard reset sequence which brings up IOA to operational
5102 * state as well as completes the reset sequence.
5103 */
5104 pinstance->ioa_hard_reset = 1;
5105
5106 /* Start IOA firmware initialization and bring card to Operational
5107 * state.
5108 */
5109 if (pmcraid_reset_bringup(pinstance)) {
34876402 5110 dev_err(&pdev->dev, "couldn't initialize IOA \n");
89a36810
AR
5111 rc = -ENODEV;
5112 goto release_tasklets;
5113 }
5114
5115 return 0;
5116
5117release_tasklets:
5118 pmcraid_kill_tasklets(pinstance);
5119 pmcraid_unregister_interrupt_handler(pinstance);
5120
5121release_host:
5122 scsi_host_put(host);
5123
5124disable_device:
5125 pci_disable_device(pdev);
5126
5127 return rc;
5128}
5129
5130#else
5131
5132#define pmcraid_suspend NULL
5133#define pmcraid_resume NULL
5134
5135#endif /* CONFIG_PM */
5136
5137/**
5138 * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5139 * completion of the ioa reset
5140 * @cmd: pointer to reset command block
5141 */
5142static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
5143{
5144 struct pmcraid_instance *pinstance = cmd->drv_inst;
5145 unsigned long flags;
5146
5147 spin_lock_irqsave(pinstance->host->host_lock, flags);
5148 pmcraid_ioa_reset(cmd);
5149 spin_unlock_irqrestore(pinstance->host->host_lock, flags);
5150 scsi_unblock_requests(pinstance->host);
5151 schedule_work(&pinstance->worker_q);
5152}
5153
5154/**
5155 * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
5156 *
5157 * @cmd: pointer to pmcraid_cmd structure
5158 *
5159 * Return Value
5160 * 0 for success or non-zero for failure cases
5161 */
5162static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5163{
5164 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5165 void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
5166
5167 pmcraid_reinit_cmdblk(cmd);
5168
5169 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5170 ioarcb->request_type = REQ_TYPE_IOACMD;
5171 ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
5172 ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
5173
5174 /* If this was called as part of resource table reinitialization due to
5175 * lost CCN, it is enough to return the command block back to free pool
5176 * as part of set_supported_devs completion function.
5177 */
5178 if (cmd->drv_inst->reinit_cfg_table) {
5179 cmd->drv_inst->reinit_cfg_table = 0;
5180 cmd->release = 1;
5181 cmd_done = pmcraid_reinit_cfgtable_done;
5182 }
5183
5184 /* we will be done with the reset sequence after set supported devices,
5185 * setup the done function to return the command block back to free
5186 * pool
5187 */
5188 pmcraid_send_cmd(cmd,
5189 cmd_done,
5190 PMCRAID_SET_SUP_DEV_TIMEOUT,
5191 pmcraid_timeout_handler);
5192 return;
5193}
5194
5195/**
5196 * pmcraid_init_res_table - Initialize the resource table
5197 * @cmd: pointer to pmcraid command struct
5198 *
5199 * This function looks through the existing resource table, comparing
5200 * it with the config table. This function will take care of old/new
5201 * devices and schedule adding/removing them from the mid-layer
5202 * as appropriate.
5203 *
5204 * Return value
5205 * None
5206 */
5207static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5208{
5209 struct pmcraid_instance *pinstance = cmd->drv_inst;
5210 struct pmcraid_resource_entry *res, *temp;
5211 struct pmcraid_config_table_entry *cfgte;
5212 unsigned long lock_flags;
5213 int found, rc, i;
5214 LIST_HEAD(old_res);
5215
5216 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
34876402 5217 pmcraid_err("IOA requires microcode download\n");
89a36810
AR
5218
5219 /* resource list is protected by pinstance->resource_lock.
5220 * init_res_table can be called from probe (user-thread) or runtime
5221 * reset (timer/tasklet)
5222 */
5223 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
5224
5225 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
5226 list_move_tail(&res->queue, &old_res);
5227
5228 for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
5229 cfgte = &pinstance->cfg_table->entries[i];
5230
5231 if (!pmcraid_expose_resource(cfgte))
5232 continue;
5233
5234 found = 0;
5235
5236 /* If this entry was already detected and initialized */
5237 list_for_each_entry_safe(res, temp, &old_res, queue) {
5238
5239 rc = memcmp(&res->cfg_entry.resource_address,
5240 &cfgte->resource_address,
5241 sizeof(cfgte->resource_address));
5242 if (!rc) {
5243 list_move_tail(&res->queue,
5244 &pinstance->used_res_q);
5245 found = 1;
5246 break;
5247 }
5248 }
5249
5250 /* If this is new entry, initialize it and add it the queue */
5251 if (!found) {
5252
5253 if (list_empty(&pinstance->free_res_q)) {
34876402 5254 pmcraid_err("Too many devices attached\n");
89a36810
AR
5255 break;
5256 }
5257
5258 found = 1;
5259 res = list_entry(pinstance->free_res_q.next,
5260 struct pmcraid_resource_entry, queue);
5261
5262 res->scsi_dev = NULL;
5263 res->change_detected = RES_CHANGE_ADD;
5264 res->reset_progress = 0;
5265 list_move_tail(&res->queue, &pinstance->used_res_q);
5266 }
5267
5268 /* copy new configuration table entry details into driver
5269 * maintained resource entry
5270 */
5271 if (found) {
5272 memcpy(&res->cfg_entry, cfgte,
5273 sizeof(struct pmcraid_config_table_entry));
5274 pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5275 res->cfg_entry.resource_type,
5276 res->cfg_entry.unique_flags1,
5277 le32_to_cpu(res->cfg_entry.resource_address));
5278 }
5279 }
5280
5281 /* Detect any deleted entries, mark them for deletion from mid-layer */
5282 list_for_each_entry_safe(res, temp, &old_res, queue) {
5283
5284 if (res->scsi_dev) {
5285 res->change_detected = RES_CHANGE_DEL;
5286 res->cfg_entry.resource_handle =
5287 PMCRAID_INVALID_RES_HANDLE;
5288 list_move_tail(&res->queue, &pinstance->used_res_q);
5289 } else {
5290 list_move_tail(&res->queue, &pinstance->free_res_q);
5291 }
5292 }
5293
5294 /* release the resource list lock */
5295 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5296 pmcraid_set_supported_devs(cmd);
5297}
5298
5299/**
5300 * pmcraid_querycfg - Send a Query IOA Config to the adapter.
5301 * @cmd: pointer pmcraid_cmd struct
5302 *
5303 * This function sends a Query IOA Configuration command to the adapter to
5304 * retrieve the IOA configuration table.
5305 *
5306 * Return value:
5307 * none
5308 */
5309static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5310{
5311 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5312 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5313 struct pmcraid_instance *pinstance = cmd->drv_inst;
5314 int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5315
5316 ioarcb->request_type = REQ_TYPE_IOACMD;
5317 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5318
5319 ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
5320
5321 /* firmware requires 4-byte length field, specified in B.E format */
5322 memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
5323
5324 /* Since entire config table can be described by single IOADL, it can
5325 * be part of IOARCB itself
5326 */
5327 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5328 offsetof(struct pmcraid_ioarcb,
5329 add_data.u.ioadl[0]));
5330 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5331 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5332
5333 ioarcb->request_flags0 |= NO_LINK_DESCS;
5334 ioarcb->data_transfer_length =
5335 cpu_to_le32(sizeof(struct pmcraid_config_table));
5336
5337 ioadl = &(ioarcb->add_data.u.ioadl[0]);
88197966 5338 ioadl->flags = IOADL_FLAGS_LAST_DESC;
89a36810
AR
5339 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5340 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5341
5342 pmcraid_send_cmd(cmd, pmcraid_init_res_table,
5343 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5344}
5345
5346
5347/**
5348 * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver
5349 * @pdev: pointer to pci device structure
5350 * @dev_id: pointer to device ids structure
5351 *
5352 * Return Value
5353 * returns 0 if the device is claimed and successfully configured.
5354 * returns non-zero error code in case of any failure
5355 */
5356static int __devinit pmcraid_probe(
5357 struct pci_dev *pdev,
5358 const struct pci_device_id *dev_id
5359)
5360{
5361 struct pmcraid_instance *pinstance;
5362 struct Scsi_Host *host;
5363 void __iomem *mapped_pci_addr;
5364 int rc = PCIBIOS_SUCCESSFUL;
5365
5366 if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
5367 pmcraid_err
5368 ("maximum number(%d) of supported adapters reached\n",
5369 atomic_read(&pmcraid_adapter_count));
5370 return -ENOMEM;
5371 }
5372
5373 atomic_inc(&pmcraid_adapter_count);
5374 rc = pci_enable_device(pdev);
5375
5376 if (rc) {
5377 dev_err(&pdev->dev, "Cannot enable adapter\n");
5378 atomic_dec(&pmcraid_adapter_count);
5379 return rc;
5380 }
5381
5382 dev_info(&pdev->dev,
5383 "Found new IOA(%x:%x), Total IOA count: %d\n",
5384 pdev->vendor, pdev->device,
5385 atomic_read(&pmcraid_adapter_count));
5386
5387 rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
5388
5389 if (rc < 0) {
5390 dev_err(&pdev->dev,
5391 "Couldn't register memory range of registers\n");
5392 goto out_disable_device;
5393 }
5394
5395 mapped_pci_addr = pci_iomap(pdev, 0, 0);
5396
5397 if (!mapped_pci_addr) {
5398 dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
5399 rc = -ENOMEM;
5400 goto out_release_regions;
5401 }
5402
5403 pci_set_master(pdev);
5404
5405 /* Firmware requires the system bus address of IOARCB to be within
5406 * 32-bit addressable range though it has 64-bit IOARRIN register.
5407 * However, firmware supports 64-bit streaming DMA buffers, whereas
5408 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
5409 * returns memory within 4GB (if not, change this logic), coherent
5410 * buffers are within firmware acceptible address ranges.
5411 */
5412 if ((sizeof(dma_addr_t) == 4) ||
5413 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5414 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5415
5416 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5417 * bit mask for pci_alloc_consistent to return addresses within 4GB
5418 */
5419 if (rc == 0)
5420 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5421
5422 if (rc != 0) {
5423 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5424 goto cleanup_nomem;
5425 }
5426
5427 host = scsi_host_alloc(&pmcraid_host_template,
5428 sizeof(struct pmcraid_instance));
5429
5430 if (!host) {
5431 dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
5432 rc = -ENOMEM;
5433 goto cleanup_nomem;
5434 }
5435
5436 host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
5437 host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
5438 host->unique_id = host->host_no;
5439 host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
5440 host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
5441
5442 /* zero out entire instance structure */
5443 pinstance = (struct pmcraid_instance *)host->hostdata;
5444 memset(pinstance, 0, sizeof(*pinstance));
5445
5446 pinstance->chip_cfg =
5447 (struct pmcraid_chip_details *)(dev_id->driver_data);
5448
5449 rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
5450
5451 if (rc < 0) {
5452 dev_err(&pdev->dev, "failed to initialize adapter instance\n");
5453 goto out_scsi_host_put;
5454 }
5455
5456 pci_set_drvdata(pdev, pinstance);
5457
5458 /* Save PCI config-space for use following the reset */
5459 rc = pci_save_state(pinstance->pdev);
5460
5461 if (rc != 0) {
5462 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5463 goto out_scsi_host_put;
5464 }
5465
5466 pmcraid_disable_interrupts(pinstance, ~0);
5467
5468 rc = pmcraid_register_interrupt_handler(pinstance);
5469
5470 if (rc) {
34876402 5471 dev_err(&pdev->dev, "couldn't register interrupt handler\n");
89a36810
AR
5472 goto out_scsi_host_put;
5473 }
5474
5475 pmcraid_init_tasklets(pinstance);
5476
5477 /* allocate verious buffers used by LLD.*/
5478 rc = pmcraid_init_buffers(pinstance);
5479
5480 if (rc) {
5481 pmcraid_err("couldn't allocate memory blocks\n");
5482 goto out_unregister_isr;
5483 }
5484
5485 /* check the reset type required */
5486 pmcraid_reset_type(pinstance);
5487
5488 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5489
5490 /* Start IOA firmware initialization and bring card to Operational
5491 * state.
5492 */
5493 pmcraid_info("starting IOA initialization sequence\n");
5494 if (pmcraid_reset_bringup(pinstance)) {
34876402 5495 dev_err(&pdev->dev, "couldn't initialize IOA \n");
89a36810
AR
5496 rc = 1;
5497 goto out_release_bufs;
5498 }
5499
5500 /* Add adapter instance into mid-layer list */
5501 rc = scsi_add_host(pinstance->host, &pdev->dev);
5502 if (rc != 0) {
5503 pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
5504 goto out_release_bufs;
5505 }
5506
5507 scsi_scan_host(pinstance->host);
5508
5509 rc = pmcraid_setup_chrdev(pinstance);
5510
5511 if (rc != 0) {
5512 pmcraid_err("couldn't create mgmt interface, error: %x\n",
5513 rc);
5514 goto out_remove_host;
5515 }
5516
5517 /* Schedule worker thread to handle CCN and take care of adding and
5518 * removing devices to OS
5519 */
5520 atomic_set(&pinstance->expose_resources, 1);
5521 schedule_work(&pinstance->worker_q);
5522 return rc;
5523
5524out_remove_host:
5525 scsi_remove_host(host);
5526
5527out_release_bufs:
5528 pmcraid_release_buffers(pinstance);
5529
5530out_unregister_isr:
5531 pmcraid_kill_tasklets(pinstance);
5532 pmcraid_unregister_interrupt_handler(pinstance);
5533
5534out_scsi_host_put:
5535 scsi_host_put(host);
5536
5537cleanup_nomem:
5538 iounmap(mapped_pci_addr);
5539
5540out_release_regions:
5541 pci_release_regions(pdev);
5542
5543out_disable_device:
5544 atomic_dec(&pmcraid_adapter_count);
5545 pci_set_drvdata(pdev, NULL);
5546 pci_disable_device(pdev);
5547 return -ENODEV;
5548}
5549
5550/*
5551 * PCI driver structure of pcmraid driver
5552 */
5553static struct pci_driver pmcraid_driver = {
5554 .name = PMCRAID_DRIVER_NAME,
5555 .id_table = pmcraid_pci_table,
5556 .probe = pmcraid_probe,
5557 .remove = pmcraid_remove,
5558 .suspend = pmcraid_suspend,
5559 .resume = pmcraid_resume,
5560 .shutdown = pmcraid_shutdown
5561};
5562
89a36810
AR
5563/**
5564 * pmcraid_init - module load entry point
5565 */
5566static int __init pmcraid_init(void)
5567{
5568 dev_t dev;
5569 int error;
5570
5571 pmcraid_info("%s Device Driver version: %s %s\n",
5572 PMCRAID_DRIVER_NAME,
5573 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
5574
5575 error = alloc_chrdev_region(&dev, 0,
5576 PMCRAID_MAX_ADAPTERS,
5577 PMCRAID_DEVFILE);
5578
5579 if (error) {
5580 pmcraid_err("failed to get a major number for adapters\n");
5581 goto out_init;
5582 }
5583
5584 pmcraid_major = MAJOR(dev);
5585 pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
5586
5587 if (IS_ERR(pmcraid_class)) {
5588 error = PTR_ERR(pmcraid_class);
5589 pmcraid_err("failed to register with with sysfs, error = %x\n",
5590 error);
5591 goto out_unreg_chrdev;
5592 }
5593
89a36810
AR
5594 error = pmcraid_netlink_init();
5595
5596 if (error)
5597 goto out_unreg_chrdev;
5598
5599 error = pci_register_driver(&pmcraid_driver);
5600
5601 if (error == 0)
5602 goto out_init;
5603
5604 pmcraid_err("failed to register pmcraid driver, error = %x\n",
5605 error);
5606 class_destroy(pmcraid_class);
5607 pmcraid_netlink_release();
5608
5609out_unreg_chrdev:
5610 unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
34876402 5611
89a36810
AR
5612out_init:
5613 return error;
5614}
5615
5616/**
5617 * pmcraid_exit - module unload entry point
5618 */
5619static void __exit pmcraid_exit(void)
5620{
5621 pmcraid_netlink_release();
5622 class_destroy(pmcraid_class);
5623 unregister_chrdev_region(MKDEV(pmcraid_major, 0),
5624 PMCRAID_MAX_ADAPTERS);
5625 pci_unregister_driver(&pmcraid_driver);
5626}
5627
5628module_init(pmcraid_init);
5629module_exit(pmcraid_exit);