]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/megaraid.c
56910e94dbf2a2c75349949b575bf26a54b2e6cf
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / megaraid.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29 #include <linux/mm.h>
30 #include <linux/fs.h>
31 #include <linux/blkdev.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <linux/completion.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/reboot.h>
39 #include <linux/module.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/mutex.h>
46 #include <linux/slab.h>
47 #include <scsi/scsicam.h>
48
49 #include "scsi.h"
50 #include <scsi/scsi_host.h>
51
52 #include "megaraid.h"
53
54 #define MEGARAID_MODULE_VERSION "2.00.4"
55
56 MODULE_AUTHOR ("sju@lsil.com");
57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
58 MODULE_LICENSE ("GPL");
59 MODULE_VERSION(MEGARAID_MODULE_VERSION);
60
61 static DEFINE_MUTEX(megadev_mutex);
62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
63 module_param(max_cmd_per_lun, uint, 0);
64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
65
66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
67 module_param(max_sectors_per_io, ushort, 0);
68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
69
70
71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
72 module_param(max_mbox_busy_wait, ushort, 0);
73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
74
75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
79
80 /*
81 * Global variables
82 */
83
84 static int hba_count;
85 static adapter_t *hba_soft_state[MAX_CONTROLLERS];
86 static struct proc_dir_entry *mega_proc_dir_entry;
87
88 /* For controller re-ordering */
89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
90
91 static long
92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
93
94 /*
95 * The File Operations structure for the serial/ioctl interface of the driver
96 */
97 static const struct file_operations megadev_fops = {
98 .owner = THIS_MODULE,
99 .unlocked_ioctl = megadev_unlocked_ioctl,
100 .open = megadev_open,
101 .llseek = noop_llseek,
102 };
103
104 /*
105 * Array to structures for storing the information about the controllers. This
106 * information is sent to the user level applications, when they do an ioctl
107 * for this information.
108 */
109 static struct mcontroller mcontroller[MAX_CONTROLLERS];
110
111 /* The current driver version */
112 static u32 driver_ver = 0x02000000;
113
114 /* major number used by the device for character interface */
115 static int major;
116
117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
118
119
120 /*
121 * Debug variable to print some diagnostic messages
122 */
123 static int trace_level;
124
125 /**
126 * mega_setup_mailbox()
127 * @adapter: pointer to our soft state
128 *
129 * Allocates a 8 byte aligned memory for the handshake mailbox.
130 */
131 static int
132 mega_setup_mailbox(adapter_t *adapter)
133 {
134 unsigned long align;
135
136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
137 sizeof(mbox64_t),
138 &adapter->una_mbox64_dma,
139 GFP_KERNEL);
140
141 if( !adapter->una_mbox64 ) return -1;
142
143 adapter->mbox = &adapter->una_mbox64->mbox;
144
145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
146 (~0UL ^ 0xFUL));
147
148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
149
150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
151
152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
153
154 /*
155 * Register the mailbox if the controller is an io-mapped controller
156 */
157 if( adapter->flag & BOARD_IOMAP ) {
158
159 outb(adapter->mbox_dma & 0xFF,
160 adapter->host->io_port + MBOX_PORT0);
161
162 outb((adapter->mbox_dma >> 8) & 0xFF,
163 adapter->host->io_port + MBOX_PORT1);
164
165 outb((adapter->mbox_dma >> 16) & 0xFF,
166 adapter->host->io_port + MBOX_PORT2);
167
168 outb((adapter->mbox_dma >> 24) & 0xFF,
169 adapter->host->io_port + MBOX_PORT3);
170
171 outb(ENABLE_MBOX_BYTE,
172 adapter->host->io_port + ENABLE_MBOX_REGION);
173
174 irq_ack(adapter);
175
176 irq_enable(adapter);
177 }
178
179 return 0;
180 }
181
182
183 /*
184 * mega_query_adapter()
185 * @adapter - pointer to our soft state
186 *
187 * Issue the adapter inquiry commands to the controller and find out
188 * information and parameter about the devices attached
189 */
190 static int
191 mega_query_adapter(adapter_t *adapter)
192 {
193 dma_addr_t prod_info_dma_handle;
194 mega_inquiry3 *inquiry3;
195 u8 raw_mbox[sizeof(struct mbox_out)];
196 mbox_t *mbox;
197 int retval;
198
199 /* Initialize adapter inquiry mailbox */
200
201 mbox = (mbox_t *)raw_mbox;
202
203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
204 memset(&mbox->m_out, 0, sizeof(raw_mbox));
205
206 /*
207 * Try to issue Inquiry3 command
208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
209 * update enquiry3 structure
210 */
211 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
212
213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
214
215 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
216 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
217 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
218
219 /* Issue a blocking command to the card */
220 if ((retval = issue_scb_block(adapter, raw_mbox))) {
221 /* the adapter does not support 40ld */
222
223 mraid_ext_inquiry *ext_inq;
224 mraid_inquiry *inq;
225 dma_addr_t dma_handle;
226
227 ext_inq = dma_alloc_coherent(&adapter->dev->dev,
228 sizeof(mraid_ext_inquiry),
229 &dma_handle, GFP_KERNEL);
230
231 if( ext_inq == NULL ) return -1;
232
233 inq = &ext_inq->raid_inq;
234
235 mbox->m_out.xferaddr = (u32)dma_handle;
236
237 /*issue old 0x04 command to adapter */
238 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
239
240 issue_scb_block(adapter, raw_mbox);
241
242 /*
243 * update Enquiry3 and ProductInfo structures with
244 * mraid_inquiry structure
245 */
246 mega_8_to_40ld(inq, inquiry3,
247 (mega_product_info *)&adapter->product_info);
248
249 dma_free_coherent(&adapter->dev->dev,
250 sizeof(mraid_ext_inquiry), ext_inq,
251 dma_handle);
252
253 } else { /*adapter supports 40ld */
254 adapter->flag |= BOARD_40LD;
255
256 /*
257 * get product_info, which is static information and will be
258 * unchanged
259 */
260 prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
261 (void *)&adapter->product_info,
262 sizeof(mega_product_info),
263 DMA_FROM_DEVICE);
264
265 mbox->m_out.xferaddr = prod_info_dma_handle;
266
267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
269
270 if ((retval = issue_scb_block(adapter, raw_mbox)))
271 dev_warn(&adapter->dev->dev,
272 "Product_info cmd failed with error: %d\n",
273 retval);
274
275 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
276 sizeof(mega_product_info), DMA_FROM_DEVICE);
277 }
278
279
280 /*
281 * kernel scans the channels from 0 to <= max_channel
282 */
283 adapter->host->max_channel =
284 adapter->product_info.nchannels + NVIRT_CHAN -1;
285
286 adapter->host->max_id = 16; /* max targets per channel */
287
288 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
289
290 adapter->host->cmd_per_lun = max_cmd_per_lun;
291
292 adapter->numldrv = inquiry3->num_ldrv;
293
294 adapter->max_cmds = adapter->product_info.max_commands;
295
296 if(adapter->max_cmds > MAX_COMMANDS)
297 adapter->max_cmds = MAX_COMMANDS;
298
299 adapter->host->can_queue = adapter->max_cmds - 1;
300
301 /*
302 * Get the maximum number of scatter-gather elements supported by this
303 * firmware
304 */
305 mega_get_max_sgl(adapter);
306
307 adapter->host->sg_tablesize = adapter->sglen;
308
309 /* use HP firmware and bios version encoding
310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
311 right 8 bits making them zero. This 0 value was hardcoded to fix
312 sparse warnings. */
313 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
314 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
315 "%c%d%d.%d%d",
316 adapter->product_info.fw_version[2],
317 0,
318 adapter->product_info.fw_version[1] & 0x0f,
319 0,
320 adapter->product_info.fw_version[0] & 0x0f);
321 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
322 "%c%d%d.%d%d",
323 adapter->product_info.bios_version[2],
324 0,
325 adapter->product_info.bios_version[1] & 0x0f,
326 0,
327 adapter->product_info.bios_version[0] & 0x0f);
328 } else {
329 memcpy(adapter->fw_version,
330 (char *)adapter->product_info.fw_version, 4);
331 adapter->fw_version[4] = 0;
332
333 memcpy(adapter->bios_version,
334 (char *)adapter->product_info.bios_version, 4);
335
336 adapter->bios_version[4] = 0;
337 }
338
339 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
340 adapter->fw_version, adapter->bios_version, adapter->numldrv);
341
342 /*
343 * Do we support extended (>10 bytes) cdbs
344 */
345 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
346 if (adapter->support_ext_cdb)
347 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
348
349
350 return 0;
351 }
352
353 /**
354 * mega_runpendq()
355 * @adapter: pointer to our soft state
356 *
357 * Runs through the list of pending requests.
358 */
359 static inline void
360 mega_runpendq(adapter_t *adapter)
361 {
362 if(!list_empty(&adapter->pending_list))
363 __mega_runpendq(adapter);
364 }
365
366 /*
367 * megaraid_queue()
368 * @scmd - Issue this scsi command
369 * @done - the callback hook into the scsi mid-layer
370 *
371 * The command queuing entry point for the mid-layer.
372 */
373 static int
374 megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
375 {
376 adapter_t *adapter;
377 scb_t *scb;
378 int busy=0;
379 unsigned long flags;
380
381 adapter = (adapter_t *)scmd->device->host->hostdata;
382
383 scmd->scsi_done = done;
384
385
386 /*
387 * Allocate and build a SCB request
388 * busy flag will be set if mega_build_cmd() command could not
389 * allocate scb. We will return non-zero status in that case.
390 * NOTE: scb can be null even though certain commands completed
391 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
392 * return 0 in that case.
393 */
394
395 spin_lock_irqsave(&adapter->lock, flags);
396 scb = mega_build_cmd(adapter, scmd, &busy);
397 if (!scb)
398 goto out;
399
400 scb->state |= SCB_PENDQ;
401 list_add_tail(&scb->list, &adapter->pending_list);
402
403 /*
404 * Check if the HBA is in quiescent state, e.g., during a
405 * delete logical drive opertion. If it is, don't run
406 * the pending_list.
407 */
408 if (atomic_read(&adapter->quiescent) == 0)
409 mega_runpendq(adapter);
410
411 busy = 0;
412 out:
413 spin_unlock_irqrestore(&adapter->lock, flags);
414 return busy;
415 }
416
417 static DEF_SCSI_QCMD(megaraid_queue)
418
419 /**
420 * mega_allocate_scb()
421 * @adapter: pointer to our soft state
422 * @cmd: scsi command from the mid-layer
423 *
424 * Allocate a SCB structure. This is the central structure for controller
425 * commands.
426 */
427 static inline scb_t *
428 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
429 {
430 struct list_head *head = &adapter->free_list;
431 scb_t *scb;
432
433 /* Unlink command from Free List */
434 if( !list_empty(head) ) {
435
436 scb = list_entry(head->next, scb_t, list);
437
438 list_del_init(head->next);
439
440 scb->state = SCB_ACTIVE;
441 scb->cmd = cmd;
442 scb->dma_type = MEGA_DMA_TYPE_NONE;
443
444 return scb;
445 }
446
447 return NULL;
448 }
449
450 /**
451 * mega_get_ldrv_num()
452 * @adapter: pointer to our soft state
453 * @cmd: scsi mid layer command
454 * @channel: channel on the controller
455 *
456 * Calculate the logical drive number based on the information in scsi command
457 * and the channel number.
458 */
459 static inline int
460 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
461 {
462 int tgt;
463 int ldrv_num;
464
465 tgt = cmd->device->id;
466
467 if ( tgt > adapter->this_id )
468 tgt--; /* we do not get inquires for initiator id */
469
470 ldrv_num = (channel * 15) + tgt;
471
472
473 /*
474 * If we have a logical drive with boot enabled, project it first
475 */
476 if( adapter->boot_ldrv_enabled ) {
477 if( ldrv_num == 0 ) {
478 ldrv_num = adapter->boot_ldrv;
479 }
480 else {
481 if( ldrv_num <= adapter->boot_ldrv ) {
482 ldrv_num--;
483 }
484 }
485 }
486
487 /*
488 * If "delete logical drive" feature is enabled on this controller.
489 * Do only if at least one delete logical drive operation was done.
490 *
491 * Also, after logical drive deletion, instead of logical drive number,
492 * the value returned should be 0x80+logical drive id.
493 *
494 * These is valid only for IO commands.
495 */
496
497 if (adapter->support_random_del && adapter->read_ldidmap )
498 switch (cmd->cmnd[0]) {
499 case READ_6:
500 case WRITE_6:
501 case READ_10:
502 case WRITE_10:
503 ldrv_num += 0x80;
504 }
505
506 return ldrv_num;
507 }
508
509 /**
510 * mega_build_cmd()
511 * @adapter: pointer to our soft state
512 * @cmd: Prepare using this scsi command
513 * @busy: busy flag if no resources
514 *
515 * Prepares a command and scatter gather list for the controller. This routine
516 * also finds out if the commands is intended for a logical drive or a
517 * physical device and prepares the controller command accordingly.
518 *
519 * We also re-order the logical drives and physical devices based on their
520 * boot settings.
521 */
522 static scb_t *
523 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
524 {
525 mega_passthru *pthru;
526 scb_t *scb;
527 mbox_t *mbox;
528 u32 seg;
529 char islogical;
530 int max_ldrv_num;
531 int channel = 0;
532 int target = 0;
533 int ldrv_num = 0; /* logical drive number */
534
535 /*
536 * We know what channels our logical drives are on - mega_find_card()
537 */
538 islogical = adapter->logdrv_chan[cmd->device->channel];
539
540 /*
541 * The theory: If physical drive is chosen for boot, all the physical
542 * devices are exported before the logical drives, otherwise physical
543 * devices are pushed after logical drives, in which case - Kernel sees
544 * the physical devices on virtual channel which is obviously converted
545 * to actual channel on the HBA.
546 */
547 if( adapter->boot_pdrv_enabled ) {
548 if( islogical ) {
549 /* logical channel */
550 channel = cmd->device->channel -
551 adapter->product_info.nchannels;
552 }
553 else {
554 /* this is physical channel */
555 channel = cmd->device->channel;
556 target = cmd->device->id;
557
558 /*
559 * boot from a physical disk, that disk needs to be
560 * exposed first IF both the channels are SCSI, then
561 * booting from the second channel is not allowed.
562 */
563 if( target == 0 ) {
564 target = adapter->boot_pdrv_tgt;
565 }
566 else if( target == adapter->boot_pdrv_tgt ) {
567 target = 0;
568 }
569 }
570 }
571 else {
572 if( islogical ) {
573 /* this is the logical channel */
574 channel = cmd->device->channel;
575 }
576 else {
577 /* physical channel */
578 channel = cmd->device->channel - NVIRT_CHAN;
579 target = cmd->device->id;
580 }
581 }
582
583
584 if(islogical) {
585
586 /* have just LUN 0 for each target on virtual channels */
587 if (cmd->device->lun) {
588 cmd->result = (DID_BAD_TARGET << 16);
589 cmd->scsi_done(cmd);
590 return NULL;
591 }
592
593 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
594
595
596 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
597 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
598
599 /*
600 * max_ldrv_num increases by 0x80 if some logical drive was
601 * deleted.
602 */
603 if(adapter->read_ldidmap)
604 max_ldrv_num += 0x80;
605
606 if(ldrv_num > max_ldrv_num ) {
607 cmd->result = (DID_BAD_TARGET << 16);
608 cmd->scsi_done(cmd);
609 return NULL;
610 }
611
612 }
613 else {
614 if( cmd->device->lun > 7) {
615 /*
616 * Do not support lun >7 for physically accessed
617 * devices
618 */
619 cmd->result = (DID_BAD_TARGET << 16);
620 cmd->scsi_done(cmd);
621 return NULL;
622 }
623 }
624
625 /*
626 *
627 * Logical drive commands
628 *
629 */
630 if(islogical) {
631 switch (cmd->cmnd[0]) {
632 case TEST_UNIT_READY:
633 #if MEGA_HAVE_CLUSTERING
634 /*
635 * Do we support clustering and is the support enabled
636 * If no, return success always
637 */
638 if( !adapter->has_cluster ) {
639 cmd->result = (DID_OK << 16);
640 cmd->scsi_done(cmd);
641 return NULL;
642 }
643
644 if(!(scb = mega_allocate_scb(adapter, cmd))) {
645 *busy = 1;
646 return NULL;
647 }
648
649 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
650 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
651 scb->raw_mbox[3] = ldrv_num;
652
653 scb->dma_direction = DMA_NONE;
654
655 return scb;
656 #else
657 cmd->result = (DID_OK << 16);
658 cmd->scsi_done(cmd);
659 return NULL;
660 #endif
661
662 case MODE_SENSE: {
663 char *buf;
664 struct scatterlist *sg;
665
666 sg = scsi_sglist(cmd);
667 buf = kmap_atomic(sg_page(sg)) + sg->offset;
668
669 memset(buf, 0, cmd->cmnd[4]);
670 kunmap_atomic(buf - sg->offset);
671
672 cmd->result = (DID_OK << 16);
673 cmd->scsi_done(cmd);
674 return NULL;
675 }
676
677 case READ_CAPACITY:
678 case INQUIRY:
679
680 if(!(adapter->flag & (1L << cmd->device->channel))) {
681
682 dev_notice(&adapter->dev->dev,
683 "scsi%d: scanning scsi channel %d "
684 "for logical drives\n",
685 adapter->host->host_no,
686 cmd->device->channel);
687
688 adapter->flag |= (1L << cmd->device->channel);
689 }
690
691 /* Allocate a SCB and initialize passthru */
692 if(!(scb = mega_allocate_scb(adapter, cmd))) {
693 *busy = 1;
694 return NULL;
695 }
696 pthru = scb->pthru;
697
698 mbox = (mbox_t *)scb->raw_mbox;
699 memset(mbox, 0, sizeof(scb->raw_mbox));
700 memset(pthru, 0, sizeof(mega_passthru));
701
702 pthru->timeout = 0;
703 pthru->ars = 1;
704 pthru->reqsenselen = 14;
705 pthru->islogical = 1;
706 pthru->logdrv = ldrv_num;
707 pthru->cdblen = cmd->cmd_len;
708 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
709
710 if( adapter->has_64bit_addr ) {
711 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
712 }
713 else {
714 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
715 }
716
717 scb->dma_direction = DMA_FROM_DEVICE;
718
719 pthru->numsgelements = mega_build_sglist(adapter, scb,
720 &pthru->dataxferaddr, &pthru->dataxferlen);
721
722 mbox->m_out.xferaddr = scb->pthru_dma_addr;
723
724 return scb;
725
726 case READ_6:
727 case WRITE_6:
728 case READ_10:
729 case WRITE_10:
730 case READ_12:
731 case WRITE_12:
732
733 /* Allocate a SCB and initialize mailbox */
734 if(!(scb = mega_allocate_scb(adapter, cmd))) {
735 *busy = 1;
736 return NULL;
737 }
738 mbox = (mbox_t *)scb->raw_mbox;
739
740 memset(mbox, 0, sizeof(scb->raw_mbox));
741 mbox->m_out.logdrv = ldrv_num;
742
743 /*
744 * A little hack: 2nd bit is zero for all scsi read
745 * commands and is set for all scsi write commands
746 */
747 if( adapter->has_64bit_addr ) {
748 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
749 MEGA_MBOXCMD_LWRITE64:
750 MEGA_MBOXCMD_LREAD64 ;
751 }
752 else {
753 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
754 MEGA_MBOXCMD_LWRITE:
755 MEGA_MBOXCMD_LREAD ;
756 }
757
758 /*
759 * 6-byte READ(0x08) or WRITE(0x0A) cdb
760 */
761 if( cmd->cmd_len == 6 ) {
762 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
763 mbox->m_out.lba =
764 ((u32)cmd->cmnd[1] << 16) |
765 ((u32)cmd->cmnd[2] << 8) |
766 (u32)cmd->cmnd[3];
767
768 mbox->m_out.lba &= 0x1FFFFF;
769
770 #if MEGA_HAVE_STATS
771 /*
772 * Take modulo 0x80, since the logical drive
773 * number increases by 0x80 when a logical
774 * drive was deleted
775 */
776 if (*cmd->cmnd == READ_6) {
777 adapter->nreads[ldrv_num%0x80]++;
778 adapter->nreadblocks[ldrv_num%0x80] +=
779 mbox->m_out.numsectors;
780 } else {
781 adapter->nwrites[ldrv_num%0x80]++;
782 adapter->nwriteblocks[ldrv_num%0x80] +=
783 mbox->m_out.numsectors;
784 }
785 #endif
786 }
787
788 /*
789 * 10-byte READ(0x28) or WRITE(0x2A) cdb
790 */
791 if( cmd->cmd_len == 10 ) {
792 mbox->m_out.numsectors =
793 (u32)cmd->cmnd[8] |
794 ((u32)cmd->cmnd[7] << 8);
795 mbox->m_out.lba =
796 ((u32)cmd->cmnd[2] << 24) |
797 ((u32)cmd->cmnd[3] << 16) |
798 ((u32)cmd->cmnd[4] << 8) |
799 (u32)cmd->cmnd[5];
800
801 #if MEGA_HAVE_STATS
802 if (*cmd->cmnd == READ_10) {
803 adapter->nreads[ldrv_num%0x80]++;
804 adapter->nreadblocks[ldrv_num%0x80] +=
805 mbox->m_out.numsectors;
806 } else {
807 adapter->nwrites[ldrv_num%0x80]++;
808 adapter->nwriteblocks[ldrv_num%0x80] +=
809 mbox->m_out.numsectors;
810 }
811 #endif
812 }
813
814 /*
815 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
816 */
817 if( cmd->cmd_len == 12 ) {
818 mbox->m_out.lba =
819 ((u32)cmd->cmnd[2] << 24) |
820 ((u32)cmd->cmnd[3] << 16) |
821 ((u32)cmd->cmnd[4] << 8) |
822 (u32)cmd->cmnd[5];
823
824 mbox->m_out.numsectors =
825 ((u32)cmd->cmnd[6] << 24) |
826 ((u32)cmd->cmnd[7] << 16) |
827 ((u32)cmd->cmnd[8] << 8) |
828 (u32)cmd->cmnd[9];
829
830 #if MEGA_HAVE_STATS
831 if (*cmd->cmnd == READ_12) {
832 adapter->nreads[ldrv_num%0x80]++;
833 adapter->nreadblocks[ldrv_num%0x80] +=
834 mbox->m_out.numsectors;
835 } else {
836 adapter->nwrites[ldrv_num%0x80]++;
837 adapter->nwriteblocks[ldrv_num%0x80] +=
838 mbox->m_out.numsectors;
839 }
840 #endif
841 }
842
843 /*
844 * If it is a read command
845 */
846 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
847 scb->dma_direction = DMA_FROM_DEVICE;
848 }
849 else {
850 scb->dma_direction = DMA_TO_DEVICE;
851 }
852
853 /* Calculate Scatter-Gather info */
854 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
855 (u32 *)&mbox->m_out.xferaddr, &seg);
856
857 return scb;
858
859 #if MEGA_HAVE_CLUSTERING
860 case RESERVE:
861 case RELEASE:
862
863 /*
864 * Do we support clustering and is the support enabled
865 */
866 if( ! adapter->has_cluster ) {
867
868 cmd->result = (DID_BAD_TARGET << 16);
869 cmd->scsi_done(cmd);
870 return NULL;
871 }
872
873 /* Allocate a SCB and initialize mailbox */
874 if(!(scb = mega_allocate_scb(adapter, cmd))) {
875 *busy = 1;
876 return NULL;
877 }
878
879 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
880 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
881 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
882
883 scb->raw_mbox[3] = ldrv_num;
884
885 scb->dma_direction = DMA_NONE;
886
887 return scb;
888 #endif
889
890 default:
891 cmd->result = (DID_BAD_TARGET << 16);
892 cmd->scsi_done(cmd);
893 return NULL;
894 }
895 }
896
897 /*
898 * Passthru drive commands
899 */
900 else {
901 /* Allocate a SCB and initialize passthru */
902 if(!(scb = mega_allocate_scb(adapter, cmd))) {
903 *busy = 1;
904 return NULL;
905 }
906
907 mbox = (mbox_t *)scb->raw_mbox;
908 memset(mbox, 0, sizeof(scb->raw_mbox));
909
910 if( adapter->support_ext_cdb ) {
911
912 mega_prepare_extpassthru(adapter, scb, cmd,
913 channel, target);
914
915 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
916
917 mbox->m_out.xferaddr = scb->epthru_dma_addr;
918
919 }
920 else {
921
922 pthru = mega_prepare_passthru(adapter, scb, cmd,
923 channel, target);
924
925 /* Initialize mailbox */
926 if( adapter->has_64bit_addr ) {
927 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
928 }
929 else {
930 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
931 }
932
933 mbox->m_out.xferaddr = scb->pthru_dma_addr;
934
935 }
936 return scb;
937 }
938 return NULL;
939 }
940
941
942 /**
943 * mega_prepare_passthru()
944 * @adapter: pointer to our soft state
945 * @scb: our scsi control block
946 * @cmd: scsi command from the mid-layer
947 * @channel: actual channel on the controller
948 * @target: actual id on the controller.
949 *
950 * prepare a command for the scsi physical devices.
951 */
952 static mega_passthru *
953 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
954 int channel, int target)
955 {
956 mega_passthru *pthru;
957
958 pthru = scb->pthru;
959 memset(pthru, 0, sizeof (mega_passthru));
960
961 /* 0=6sec/1=60sec/2=10min/3=3hrs */
962 pthru->timeout = 2;
963
964 pthru->ars = 1;
965 pthru->reqsenselen = 14;
966 pthru->islogical = 0;
967
968 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
969
970 pthru->target = (adapter->flag & BOARD_40LD) ?
971 (channel << 4) | target : target;
972
973 pthru->cdblen = cmd->cmd_len;
974 pthru->logdrv = cmd->device->lun;
975
976 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
977
978 /* Not sure about the direction */
979 scb->dma_direction = DMA_BIDIRECTIONAL;
980
981 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
982 switch (cmd->cmnd[0]) {
983 case INQUIRY:
984 case READ_CAPACITY:
985 if(!(adapter->flag & (1L << cmd->device->channel))) {
986
987 dev_notice(&adapter->dev->dev,
988 "scsi%d: scanning scsi channel %d [P%d] "
989 "for physical devices\n",
990 adapter->host->host_no,
991 cmd->device->channel, channel);
992
993 adapter->flag |= (1L << cmd->device->channel);
994 }
995 fallthrough;
996 default:
997 pthru->numsgelements = mega_build_sglist(adapter, scb,
998 &pthru->dataxferaddr, &pthru->dataxferlen);
999 break;
1000 }
1001 return pthru;
1002 }
1003
1004
1005 /**
1006 * mega_prepare_extpassthru()
1007 * @adapter: pointer to our soft state
1008 * @scb: our scsi control block
1009 * @cmd: scsi command from the mid-layer
1010 * @channel: actual channel on the controller
1011 * @target: actual id on the controller.
1012 *
1013 * prepare a command for the scsi physical devices. This rountine prepares
1014 * commands for devices which can take extended CDBs (>10 bytes)
1015 */
1016 static mega_ext_passthru *
1017 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1018 struct scsi_cmnd *cmd,
1019 int channel, int target)
1020 {
1021 mega_ext_passthru *epthru;
1022
1023 epthru = scb->epthru;
1024 memset(epthru, 0, sizeof(mega_ext_passthru));
1025
1026 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1027 epthru->timeout = 2;
1028
1029 epthru->ars = 1;
1030 epthru->reqsenselen = 14;
1031 epthru->islogical = 0;
1032
1033 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1034 epthru->target = (adapter->flag & BOARD_40LD) ?
1035 (channel << 4) | target : target;
1036
1037 epthru->cdblen = cmd->cmd_len;
1038 epthru->logdrv = cmd->device->lun;
1039
1040 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1041
1042 /* Not sure about the direction */
1043 scb->dma_direction = DMA_BIDIRECTIONAL;
1044
1045 switch(cmd->cmnd[0]) {
1046 case INQUIRY:
1047 case READ_CAPACITY:
1048 if(!(adapter->flag & (1L << cmd->device->channel))) {
1049
1050 dev_notice(&adapter->dev->dev,
1051 "scsi%d: scanning scsi channel %d [P%d] "
1052 "for physical devices\n",
1053 adapter->host->host_no,
1054 cmd->device->channel, channel);
1055
1056 adapter->flag |= (1L << cmd->device->channel);
1057 }
1058 fallthrough;
1059 default:
1060 epthru->numsgelements = mega_build_sglist(adapter, scb,
1061 &epthru->dataxferaddr, &epthru->dataxferlen);
1062 break;
1063 }
1064
1065 return epthru;
1066 }
1067
1068 static void
1069 __mega_runpendq(adapter_t *adapter)
1070 {
1071 scb_t *scb;
1072 struct list_head *pos, *next;
1073
1074 /* Issue any pending commands to the card */
1075 list_for_each_safe(pos, next, &adapter->pending_list) {
1076
1077 scb = list_entry(pos, scb_t, list);
1078
1079 if( !(scb->state & SCB_ISSUED) ) {
1080
1081 if( issue_scb(adapter, scb) != 0 )
1082 return;
1083 }
1084 }
1085
1086 return;
1087 }
1088
1089
1090 /**
1091 * issue_scb()
1092 * @adapter: pointer to our soft state
1093 * @scb: scsi control block
1094 *
1095 * Post a command to the card if the mailbox is available, otherwise return
1096 * busy. We also take the scb from the pending list if the mailbox is
1097 * available.
1098 */
1099 static int
1100 issue_scb(adapter_t *adapter, scb_t *scb)
1101 {
1102 volatile mbox64_t *mbox64 = adapter->mbox64;
1103 volatile mbox_t *mbox = adapter->mbox;
1104 unsigned int i = 0;
1105
1106 if(unlikely(mbox->m_in.busy)) {
1107 do {
1108 udelay(1);
1109 i++;
1110 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1111
1112 if(mbox->m_in.busy) return -1;
1113 }
1114
1115 /* Copy mailbox data into host structure */
1116 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1117 sizeof(struct mbox_out));
1118
1119 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1120 mbox->m_in.busy = 1; /* Set busy */
1121
1122
1123 /*
1124 * Increment the pending queue counter
1125 */
1126 atomic_inc(&adapter->pend_cmds);
1127
1128 switch (mbox->m_out.cmd) {
1129 case MEGA_MBOXCMD_LREAD64:
1130 case MEGA_MBOXCMD_LWRITE64:
1131 case MEGA_MBOXCMD_PASSTHRU64:
1132 case MEGA_MBOXCMD_EXTPTHRU:
1133 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1134 mbox64->xfer_segment_hi = 0;
1135 mbox->m_out.xferaddr = 0xFFFFFFFF;
1136 break;
1137 default:
1138 mbox64->xfer_segment_lo = 0;
1139 mbox64->xfer_segment_hi = 0;
1140 }
1141
1142 /*
1143 * post the command
1144 */
1145 scb->state |= SCB_ISSUED;
1146
1147 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1148 mbox->m_in.poll = 0;
1149 mbox->m_in.ack = 0;
1150 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1151 }
1152 else {
1153 irq_enable(adapter);
1154 issue_command(adapter);
1155 }
1156
1157 return 0;
1158 }
1159
1160 /*
1161 * Wait until the controller's mailbox is available
1162 */
1163 static inline int
1164 mega_busywait_mbox (adapter_t *adapter)
1165 {
1166 if (adapter->mbox->m_in.busy)
1167 return __mega_busywait_mbox(adapter);
1168 return 0;
1169 }
1170
1171 /**
1172 * issue_scb_block()
1173 * @adapter: pointer to our soft state
1174 * @raw_mbox: the mailbox
1175 *
1176 * Issue a scb in synchronous and non-interrupt mode
1177 */
1178 static int
1179 issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1180 {
1181 volatile mbox64_t *mbox64 = adapter->mbox64;
1182 volatile mbox_t *mbox = adapter->mbox;
1183 u8 byte;
1184
1185 /* Wait until mailbox is free */
1186 if(mega_busywait_mbox (adapter))
1187 goto bug_blocked_mailbox;
1188
1189 /* Copy mailbox data into host structure */
1190 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1191 mbox->m_out.cmdid = 0xFE;
1192 mbox->m_in.busy = 1;
1193
1194 switch (raw_mbox[0]) {
1195 case MEGA_MBOXCMD_LREAD64:
1196 case MEGA_MBOXCMD_LWRITE64:
1197 case MEGA_MBOXCMD_PASSTHRU64:
1198 case MEGA_MBOXCMD_EXTPTHRU:
1199 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1200 mbox64->xfer_segment_hi = 0;
1201 mbox->m_out.xferaddr = 0xFFFFFFFF;
1202 break;
1203 default:
1204 mbox64->xfer_segment_lo = 0;
1205 mbox64->xfer_segment_hi = 0;
1206 }
1207
1208 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1209 mbox->m_in.poll = 0;
1210 mbox->m_in.ack = 0;
1211 mbox->m_in.numstatus = 0xFF;
1212 mbox->m_in.status = 0xFF;
1213 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1214
1215 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1216 cpu_relax();
1217
1218 mbox->m_in.numstatus = 0xFF;
1219
1220 while( (volatile u8)mbox->m_in.poll != 0x77 )
1221 cpu_relax();
1222
1223 mbox->m_in.poll = 0;
1224 mbox->m_in.ack = 0x77;
1225
1226 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1227
1228 while(RDINDOOR(adapter) & 0x2)
1229 cpu_relax();
1230 }
1231 else {
1232 irq_disable(adapter);
1233 issue_command(adapter);
1234
1235 while (!((byte = irq_state(adapter)) & INTR_VALID))
1236 cpu_relax();
1237
1238 set_irq_state(adapter, byte);
1239 irq_enable(adapter);
1240 irq_ack(adapter);
1241 }
1242
1243 return mbox->m_in.status;
1244
1245 bug_blocked_mailbox:
1246 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1247 udelay (1000);
1248 return -1;
1249 }
1250
1251
1252 /**
1253 * megaraid_isr_iomapped()
1254 * @irq: irq
1255 * @devp: pointer to our soft state
1256 *
1257 * Interrupt service routine for io-mapped controllers.
1258 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1259 * and service the completed commands.
1260 */
1261 static irqreturn_t
1262 megaraid_isr_iomapped(int irq, void *devp)
1263 {
1264 adapter_t *adapter = devp;
1265 unsigned long flags;
1266 u8 status;
1267 u8 nstatus;
1268 u8 completed[MAX_FIRMWARE_STATUS];
1269 u8 byte;
1270 int handled = 0;
1271
1272
1273 /*
1274 * loop till F/W has more commands for us to complete.
1275 */
1276 spin_lock_irqsave(&adapter->lock, flags);
1277
1278 do {
1279 /* Check if a valid interrupt is pending */
1280 byte = irq_state(adapter);
1281 if( (byte & VALID_INTR_BYTE) == 0 ) {
1282 /*
1283 * No more pending commands
1284 */
1285 goto out_unlock;
1286 }
1287 set_irq_state(adapter, byte);
1288
1289 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1290 == 0xFF)
1291 cpu_relax();
1292 adapter->mbox->m_in.numstatus = 0xFF;
1293
1294 status = adapter->mbox->m_in.status;
1295
1296 /*
1297 * decrement the pending queue counter
1298 */
1299 atomic_sub(nstatus, &adapter->pend_cmds);
1300
1301 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1302 nstatus);
1303
1304 /* Acknowledge interrupt */
1305 irq_ack(adapter);
1306
1307 mega_cmd_done(adapter, completed, nstatus, status);
1308
1309 mega_rundoneq(adapter);
1310
1311 handled = 1;
1312
1313 /* Loop through any pending requests */
1314 if(atomic_read(&adapter->quiescent) == 0) {
1315 mega_runpendq(adapter);
1316 }
1317
1318 } while(1);
1319
1320 out_unlock:
1321
1322 spin_unlock_irqrestore(&adapter->lock, flags);
1323
1324 return IRQ_RETVAL(handled);
1325 }
1326
1327
1328 /**
1329 * megaraid_isr_memmapped()
1330 * @irq: irq
1331 * @devp: pointer to our soft state
1332 *
1333 * Interrupt service routine for memory-mapped controllers.
1334 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1335 * and service the completed commands.
1336 */
1337 static irqreturn_t
1338 megaraid_isr_memmapped(int irq, void *devp)
1339 {
1340 adapter_t *adapter = devp;
1341 unsigned long flags;
1342 u8 status;
1343 u32 dword = 0;
1344 u8 nstatus;
1345 u8 completed[MAX_FIRMWARE_STATUS];
1346 int handled = 0;
1347
1348
1349 /*
1350 * loop till F/W has more commands for us to complete.
1351 */
1352 spin_lock_irqsave(&adapter->lock, flags);
1353
1354 do {
1355 /* Check if a valid interrupt is pending */
1356 dword = RDOUTDOOR(adapter);
1357 if(dword != 0x10001234) {
1358 /*
1359 * No more pending commands
1360 */
1361 goto out_unlock;
1362 }
1363 WROUTDOOR(adapter, 0x10001234);
1364
1365 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1366 == 0xFF) {
1367 cpu_relax();
1368 }
1369 adapter->mbox->m_in.numstatus = 0xFF;
1370
1371 status = adapter->mbox->m_in.status;
1372
1373 /*
1374 * decrement the pending queue counter
1375 */
1376 atomic_sub(nstatus, &adapter->pend_cmds);
1377
1378 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1379 nstatus);
1380
1381 /* Acknowledge interrupt */
1382 WRINDOOR(adapter, 0x2);
1383
1384 handled = 1;
1385
1386 while( RDINDOOR(adapter) & 0x02 )
1387 cpu_relax();
1388
1389 mega_cmd_done(adapter, completed, nstatus, status);
1390
1391 mega_rundoneq(adapter);
1392
1393 /* Loop through any pending requests */
1394 if(atomic_read(&adapter->quiescent) == 0) {
1395 mega_runpendq(adapter);
1396 }
1397
1398 } while(1);
1399
1400 out_unlock:
1401
1402 spin_unlock_irqrestore(&adapter->lock, flags);
1403
1404 return IRQ_RETVAL(handled);
1405 }
1406 /**
1407 * mega_cmd_done()
1408 * @adapter: pointer to our soft state
1409 * @completed: array of ids of completed commands
1410 * @nstatus: number of completed commands
1411 * @status: status of the last command completed
1412 *
1413 * Complete the commands and call the scsi mid-layer callback hooks.
1414 */
1415 static void
1416 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1417 {
1418 mega_ext_passthru *epthru = NULL;
1419 struct scatterlist *sgl;
1420 struct scsi_cmnd *cmd = NULL;
1421 mega_passthru *pthru = NULL;
1422 mbox_t *mbox = NULL;
1423 u8 c;
1424 scb_t *scb;
1425 int islogical;
1426 int cmdid;
1427 int i;
1428
1429 /*
1430 * for all the commands completed, call the mid-layer callback routine
1431 * and free the scb.
1432 */
1433 for( i = 0; i < nstatus; i++ ) {
1434
1435 cmdid = completed[i];
1436
1437 /*
1438 * Only free SCBs for the commands coming down from the
1439 * mid-layer, not for which were issued internally
1440 *
1441 * For internal command, restore the status returned by the
1442 * firmware so that user can interpret it.
1443 */
1444 if (cmdid == CMDID_INT_CMDS) {
1445 scb = &adapter->int_scb;
1446
1447 list_del_init(&scb->list);
1448 scb->state = SCB_FREE;
1449
1450 adapter->int_status = status;
1451 complete(&adapter->int_waitq);
1452 } else {
1453 scb = &adapter->scb_list[cmdid];
1454
1455 /*
1456 * Make sure f/w has completed a valid command
1457 */
1458 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1459 dev_crit(&adapter->dev->dev, "invalid command "
1460 "Id %d, scb->state:%x, scsi cmd:%p\n",
1461 cmdid, scb->state, scb->cmd);
1462
1463 continue;
1464 }
1465
1466 /*
1467 * Was a abort issued for this command
1468 */
1469 if( scb->state & SCB_ABORT ) {
1470
1471 dev_warn(&adapter->dev->dev,
1472 "aborted cmd [%x] complete\n",
1473 scb->idx);
1474
1475 scb->cmd->result = (DID_ABORT << 16);
1476
1477 list_add_tail(SCSI_LIST(scb->cmd),
1478 &adapter->completed_list);
1479
1480 mega_free_scb(adapter, scb);
1481
1482 continue;
1483 }
1484
1485 /*
1486 * Was a reset issued for this command
1487 */
1488 if( scb->state & SCB_RESET ) {
1489
1490 dev_warn(&adapter->dev->dev,
1491 "reset cmd [%x] complete\n",
1492 scb->idx);
1493
1494 scb->cmd->result = (DID_RESET << 16);
1495
1496 list_add_tail(SCSI_LIST(scb->cmd),
1497 &adapter->completed_list);
1498
1499 mega_free_scb (adapter, scb);
1500
1501 continue;
1502 }
1503
1504 cmd = scb->cmd;
1505 pthru = scb->pthru;
1506 epthru = scb->epthru;
1507 mbox = (mbox_t *)scb->raw_mbox;
1508
1509 #if MEGA_HAVE_STATS
1510 {
1511
1512 int logdrv = mbox->m_out.logdrv;
1513
1514 islogical = adapter->logdrv_chan[cmd->channel];
1515 /*
1516 * Maintain an error counter for the logical drive.
1517 * Some application like SNMP agent need such
1518 * statistics
1519 */
1520 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1521 cmd->cmnd[0] == READ_10 ||
1522 cmd->cmnd[0] == READ_12)) {
1523 /*
1524 * Logical drive number increases by 0x80 when
1525 * a logical drive is deleted
1526 */
1527 adapter->rd_errors[logdrv%0x80]++;
1528 }
1529
1530 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1531 cmd->cmnd[0] == WRITE_10 ||
1532 cmd->cmnd[0] == WRITE_12)) {
1533 /*
1534 * Logical drive number increases by 0x80 when
1535 * a logical drive is deleted
1536 */
1537 adapter->wr_errors[logdrv%0x80]++;
1538 }
1539
1540 }
1541 #endif
1542 }
1543
1544 /*
1545 * Do not return the presence of hard disk on the channel so,
1546 * inquiry sent, and returned data==hard disk or removable
1547 * hard disk and not logical, request should return failure! -
1548 * PJ
1549 */
1550 islogical = adapter->logdrv_chan[cmd->device->channel];
1551 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1552
1553 sgl = scsi_sglist(cmd);
1554 if( sg_page(sgl) ) {
1555 c = *(unsigned char *) sg_virt(&sgl[0]);
1556 } else {
1557 dev_warn(&adapter->dev->dev, "invalid sg\n");
1558 c = 0;
1559 }
1560
1561 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1562 ((c & 0x1F ) == TYPE_DISK)) {
1563 status = 0xF0;
1564 }
1565 }
1566
1567 /* clear result; otherwise, success returns corrupt value */
1568 cmd->result = 0;
1569
1570 /* Convert MegaRAID status to Linux error code */
1571 switch (status) {
1572 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1573 cmd->result |= (DID_OK << 16);
1574 break;
1575
1576 case 0x02: /* ERROR_ABORTED, i.e.
1577 SCSI_STATUS_CHECK_CONDITION */
1578
1579 /* set sense_buffer and result fields */
1580 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1581 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1582
1583 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1584 14);
1585
1586 cmd->result = SAM_STAT_CHECK_CONDITION;
1587 }
1588 else {
1589 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1590
1591 memcpy(cmd->sense_buffer,
1592 epthru->reqsensearea, 14);
1593
1594 cmd->result = SAM_STAT_CHECK_CONDITION;
1595 } else
1596 scsi_build_sense(cmd, 0,
1597 ABORTED_COMMAND, 0, 0);
1598 }
1599 break;
1600
1601 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1602 SCSI_STATUS_BUSY */
1603 cmd->result |= (DID_BUS_BUSY << 16) | status;
1604 break;
1605
1606 default:
1607 #if MEGA_HAVE_CLUSTERING
1608 /*
1609 * If TEST_UNIT_READY fails, we know
1610 * MEGA_RESERVATION_STATUS failed
1611 */
1612 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1613 cmd->result |= (DID_ERROR << 16) |
1614 SAM_STAT_RESERVATION_CONFLICT;
1615 }
1616 else
1617 /*
1618 * Error code returned is 1 if Reserve or Release
1619 * failed or the input parameter is invalid
1620 */
1621 if( status == 1 &&
1622 (cmd->cmnd[0] == RESERVE ||
1623 cmd->cmnd[0] == RELEASE) ) {
1624
1625 cmd->result |= (DID_ERROR << 16) |
1626 SAM_STAT_RESERVATION_CONFLICT;
1627 }
1628 else
1629 #endif
1630 cmd->result |= (DID_BAD_TARGET << 16)|status;
1631 }
1632
1633 mega_free_scb(adapter, scb);
1634
1635 /* Add Scsi_Command to end of completed queue */
1636 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1637 }
1638 }
1639
1640
1641 /*
1642 * mega_runpendq()
1643 *
1644 * Run through the list of completed requests and finish it
1645 */
1646 static void
1647 mega_rundoneq (adapter_t *adapter)
1648 {
1649 struct scsi_cmnd *cmd;
1650 struct list_head *pos;
1651
1652 list_for_each(pos, &adapter->completed_list) {
1653
1654 struct scsi_pointer* spos = (struct scsi_pointer *)pos;
1655
1656 cmd = list_entry(spos, struct scsi_cmnd, SCp);
1657 cmd->scsi_done(cmd);
1658 }
1659
1660 INIT_LIST_HEAD(&adapter->completed_list);
1661 }
1662
1663
1664 /*
1665 * Free a SCB structure
1666 * Note: We assume the scsi commands associated with this scb is not free yet.
1667 */
1668 static void
1669 mega_free_scb(adapter_t *adapter, scb_t *scb)
1670 {
1671 switch( scb->dma_type ) {
1672
1673 case MEGA_DMA_TYPE_NONE:
1674 break;
1675
1676 case MEGA_SGLIST:
1677 scsi_dma_unmap(scb->cmd);
1678 break;
1679 default:
1680 break;
1681 }
1682
1683 /*
1684 * Remove from the pending list
1685 */
1686 list_del_init(&scb->list);
1687
1688 /* Link the scb back into free list */
1689 scb->state = SCB_FREE;
1690 scb->cmd = NULL;
1691
1692 list_add(&scb->list, &adapter->free_list);
1693 }
1694
1695
1696 static int
1697 __mega_busywait_mbox (adapter_t *adapter)
1698 {
1699 volatile mbox_t *mbox = adapter->mbox;
1700 long counter;
1701
1702 for (counter = 0; counter < 10000; counter++) {
1703 if (!mbox->m_in.busy)
1704 return 0;
1705 udelay(100);
1706 cond_resched();
1707 }
1708 return -1; /* give up after 1 second */
1709 }
1710
1711 /*
1712 * Copies data to SGLIST
1713 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1714 */
1715 static int
1716 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1717 {
1718 struct scatterlist *sg;
1719 struct scsi_cmnd *cmd;
1720 int sgcnt;
1721 int idx;
1722
1723 cmd = scb->cmd;
1724
1725 /*
1726 * Copy Scatter-Gather list info into controller structure.
1727 *
1728 * The number of sg elements returned must not exceed our limit
1729 */
1730 sgcnt = scsi_dma_map(cmd);
1731
1732 scb->dma_type = MEGA_SGLIST;
1733
1734 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1735
1736 *len = 0;
1737
1738 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1739 sg = scsi_sglist(cmd);
1740 scb->dma_h_bulkdata = sg_dma_address(sg);
1741 *buf = (u32)scb->dma_h_bulkdata;
1742 *len = sg_dma_len(sg);
1743 return 0;
1744 }
1745
1746 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1747 if (adapter->has_64bit_addr) {
1748 scb->sgl64[idx].address = sg_dma_address(sg);
1749 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1750 } else {
1751 scb->sgl[idx].address = sg_dma_address(sg);
1752 *len += scb->sgl[idx].length = sg_dma_len(sg);
1753 }
1754 }
1755
1756 /* Reset pointer and length fields */
1757 *buf = scb->sgl_dma_addr;
1758
1759 /* Return count of SG requests */
1760 return sgcnt;
1761 }
1762
1763
1764 /*
1765 * mega_8_to_40ld()
1766 *
1767 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1768 * Enquiry3 structures for later use
1769 */
1770 static void
1771 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1772 mega_product_info *product_info)
1773 {
1774 int i;
1775
1776 product_info->max_commands = inquiry->adapter_info.max_commands;
1777 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1778 product_info->nchannels = inquiry->adapter_info.nchannels;
1779
1780 for (i = 0; i < 4; i++) {
1781 product_info->fw_version[i] =
1782 inquiry->adapter_info.fw_version[i];
1783
1784 product_info->bios_version[i] =
1785 inquiry->adapter_info.bios_version[i];
1786 }
1787 enquiry3->cache_flush_interval =
1788 inquiry->adapter_info.cache_flush_interval;
1789
1790 product_info->dram_size = inquiry->adapter_info.dram_size;
1791
1792 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1793
1794 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1795 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1796 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1797 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1798 }
1799
1800 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1801 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1802 }
1803
1804 static inline void
1805 mega_free_sgl(adapter_t *adapter)
1806 {
1807 scb_t *scb;
1808 int i;
1809
1810 for(i = 0; i < adapter->max_cmds; i++) {
1811
1812 scb = &adapter->scb_list[i];
1813
1814 if( scb->sgl64 ) {
1815 dma_free_coherent(&adapter->dev->dev,
1816 sizeof(mega_sgl64) * adapter->sglen,
1817 scb->sgl64, scb->sgl_dma_addr);
1818
1819 scb->sgl64 = NULL;
1820 }
1821
1822 if( scb->pthru ) {
1823 dma_free_coherent(&adapter->dev->dev,
1824 sizeof(mega_passthru), scb->pthru,
1825 scb->pthru_dma_addr);
1826
1827 scb->pthru = NULL;
1828 }
1829
1830 if( scb->epthru ) {
1831 dma_free_coherent(&adapter->dev->dev,
1832 sizeof(mega_ext_passthru),
1833 scb->epthru, scb->epthru_dma_addr);
1834
1835 scb->epthru = NULL;
1836 }
1837
1838 }
1839 }
1840
1841
1842 /*
1843 * Get information about the card/driver
1844 */
1845 const char *
1846 megaraid_info(struct Scsi_Host *host)
1847 {
1848 static char buffer[512];
1849 adapter_t *adapter;
1850
1851 adapter = (adapter_t *)host->hostdata;
1852
1853 sprintf (buffer,
1854 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1855 adapter->fw_version, adapter->product_info.max_commands,
1856 adapter->host->max_id, adapter->host->max_channel,
1857 (u32)adapter->host->max_lun);
1858 return buffer;
1859 }
1860
1861 /*
1862 * Abort a previous SCSI request. Only commands on the pending list can be
1863 * aborted. All the commands issued to the F/W must complete.
1864 */
1865 static int
1866 megaraid_abort(struct scsi_cmnd *cmd)
1867 {
1868 adapter_t *adapter;
1869 int rval;
1870
1871 adapter = (adapter_t *)cmd->device->host->hostdata;
1872
1873 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1874
1875 /*
1876 * This is required here to complete any completed requests
1877 * to be communicated over to the mid layer.
1878 */
1879 mega_rundoneq(adapter);
1880
1881 return rval;
1882 }
1883
1884
1885 static int
1886 megaraid_reset(struct scsi_cmnd *cmd)
1887 {
1888 adapter_t *adapter;
1889 megacmd_t mc;
1890 int rval;
1891
1892 adapter = (adapter_t *)cmd->device->host->hostdata;
1893
1894 #if MEGA_HAVE_CLUSTERING
1895 mc.cmd = MEGA_CLUSTER_CMD;
1896 mc.opcode = MEGA_RESET_RESERVATIONS;
1897
1898 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1899 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1900 }
1901 else {
1902 dev_info(&adapter->dev->dev, "reservation reset\n");
1903 }
1904 #endif
1905
1906 spin_lock_irq(&adapter->lock);
1907
1908 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
1909
1910 /*
1911 * This is required here to complete any completed requests
1912 * to be communicated over to the mid layer.
1913 */
1914 mega_rundoneq(adapter);
1915 spin_unlock_irq(&adapter->lock);
1916
1917 return rval;
1918 }
1919
1920 /**
1921 * megaraid_abort_and_reset()
1922 * @adapter: megaraid soft state
1923 * @cmd: scsi command to be aborted or reset
1924 * @aor: abort or reset flag
1925 *
1926 * Try to locate the scsi command in the pending queue. If found and is not
1927 * issued to the controller, abort/reset it. Otherwise return failure
1928 */
1929 static int
1930 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1931 {
1932 struct list_head *pos, *next;
1933 scb_t *scb;
1934
1935 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
1936 (aor == SCB_ABORT)? "ABORTING":"RESET",
1937 cmd->cmnd[0], cmd->device->channel,
1938 cmd->device->id, (u32)cmd->device->lun);
1939
1940 if(list_empty(&adapter->pending_list))
1941 return FAILED;
1942
1943 list_for_each_safe(pos, next, &adapter->pending_list) {
1944
1945 scb = list_entry(pos, scb_t, list);
1946
1947 if (scb->cmd == cmd) { /* Found command */
1948
1949 scb->state |= aor;
1950
1951 /*
1952 * Check if this command has firmware ownership. If
1953 * yes, we cannot reset this command. Whenever f/w
1954 * completes this command, we will return appropriate
1955 * status from ISR.
1956 */
1957 if( scb->state & SCB_ISSUED ) {
1958
1959 dev_warn(&adapter->dev->dev,
1960 "%s[%x], fw owner\n",
1961 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1962 scb->idx);
1963
1964 return FAILED;
1965 }
1966 else {
1967
1968 /*
1969 * Not yet issued! Remove from the pending
1970 * list
1971 */
1972 dev_warn(&adapter->dev->dev,
1973 "%s-[%x], driver owner\n",
1974 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1975 scb->idx);
1976
1977 mega_free_scb(adapter, scb);
1978
1979 if( aor == SCB_ABORT ) {
1980 cmd->result = (DID_ABORT << 16);
1981 }
1982 else {
1983 cmd->result = (DID_RESET << 16);
1984 }
1985
1986 list_add_tail(SCSI_LIST(cmd),
1987 &adapter->completed_list);
1988
1989 return SUCCESS;
1990 }
1991 }
1992 }
1993
1994 return FAILED;
1995 }
1996
1997 static inline int
1998 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
1999 {
2000 *pdev = pci_alloc_dev(NULL);
2001
2002 if( *pdev == NULL ) return -1;
2003
2004 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
2005
2006 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
2007 kfree(*pdev);
2008 return -1;
2009 }
2010
2011 return 0;
2012 }
2013
2014 static inline void
2015 free_local_pdev(struct pci_dev *pdev)
2016 {
2017 kfree(pdev);
2018 }
2019
2020 /**
2021 * mega_allocate_inquiry()
2022 * @dma_handle: handle returned for dma address
2023 * @pdev: handle to pci device
2024 *
2025 * allocates memory for inquiry structure
2026 */
2027 static inline void *
2028 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2029 {
2030 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
2031 dma_handle, GFP_KERNEL);
2032 }
2033
2034
2035 static inline void
2036 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2037 {
2038 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
2039 dma_handle);
2040 }
2041
2042
2043 #ifdef CONFIG_PROC_FS
2044 /* Following code handles /proc fs */
2045
2046 /**
2047 * proc_show_config()
2048 * @m: Synthetic file construction data
2049 * @v: File iterator
2050 *
2051 * Display configuration information about the controller.
2052 */
2053 static int
2054 proc_show_config(struct seq_file *m, void *v)
2055 {
2056
2057 adapter_t *adapter = m->private;
2058
2059 seq_puts(m, MEGARAID_VERSION);
2060 if(adapter->product_info.product_name[0])
2061 seq_printf(m, "%s\n", adapter->product_info.product_name);
2062
2063 seq_puts(m, "Controller Type: ");
2064
2065 if( adapter->flag & BOARD_MEMMAP )
2066 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2067 else
2068 seq_puts(m, "418/428/434\n");
2069
2070 if(adapter->flag & BOARD_40LD)
2071 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2072
2073 if(adapter->flag & BOARD_64BIT)
2074 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2075 if( adapter->has_64bit_addr )
2076 seq_puts(m, "Controller using 64-bit memory addressing\n");
2077 else
2078 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2079
2080 seq_printf(m, "Base = %08lx, Irq = %d, ",
2081 adapter->base, adapter->host->irq);
2082
2083 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2084 adapter->numldrv, adapter->product_info.nchannels);
2085
2086 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2087 adapter->fw_version, adapter->bios_version,
2088 adapter->product_info.dram_size);
2089
2090 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2091 adapter->product_info.max_commands, adapter->max_cmds);
2092
2093 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2094 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2095 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2096 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2097 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2098 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2099 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2100 seq_printf(m, "quiescent = %d\n",
2101 atomic_read(&adapter->quiescent));
2102 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2103
2104 seq_puts(m, "\nModule Parameters:\n");
2105 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2106 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2107 return 0;
2108 }
2109
2110 /**
2111 * proc_show_stat()
2112 * @m: Synthetic file construction data
2113 * @v: File iterator
2114 *
2115 * Display statistical information about the I/O activity.
2116 */
2117 static int
2118 proc_show_stat(struct seq_file *m, void *v)
2119 {
2120 adapter_t *adapter = m->private;
2121 #if MEGA_HAVE_STATS
2122 int i;
2123 #endif
2124
2125 seq_puts(m, "Statistical Information for this controller\n");
2126 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2127 #if MEGA_HAVE_STATS
2128 for(i = 0; i < adapter->numldrv; i++) {
2129 seq_printf(m, "Logical Drive %d:\n", i);
2130 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2131 adapter->nreads[i], adapter->nwrites[i]);
2132 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2133 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2134 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2135 adapter->rd_errors[i], adapter->wr_errors[i]);
2136 }
2137 #else
2138 seq_puts(m, "IO and error counters not compiled in driver.\n");
2139 #endif
2140 return 0;
2141 }
2142
2143
2144 /**
2145 * proc_show_mbox()
2146 * @m: Synthetic file construction data
2147 * @v: File iterator
2148 *
2149 * Display mailbox information for the last command issued. This information
2150 * is good for debugging.
2151 */
2152 static int
2153 proc_show_mbox(struct seq_file *m, void *v)
2154 {
2155 adapter_t *adapter = m->private;
2156 volatile mbox_t *mbox = adapter->mbox;
2157
2158 seq_puts(m, "Contents of Mail Box Structure\n");
2159 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2160 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2161 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2162 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2163 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2164 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2165 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2166 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2167 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2168 return 0;
2169 }
2170
2171
2172 /**
2173 * proc_show_rebuild_rate()
2174 * @m: Synthetic file construction data
2175 * @v: File iterator
2176 *
2177 * Display current rebuild rate
2178 */
2179 static int
2180 proc_show_rebuild_rate(struct seq_file *m, void *v)
2181 {
2182 adapter_t *adapter = m->private;
2183 dma_addr_t dma_handle;
2184 caddr_t inquiry;
2185 struct pci_dev *pdev;
2186
2187 if( make_local_pdev(adapter, &pdev) != 0 )
2188 return 0;
2189
2190 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2191 goto free_pdev;
2192
2193 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2194 seq_puts(m, "Adapter inquiry failed.\n");
2195 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2196 goto free_inquiry;
2197 }
2198
2199 if( adapter->flag & BOARD_40LD )
2200 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2201 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2202 else
2203 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2204 ((mraid_ext_inquiry *)
2205 inquiry)->raid_inq.adapter_info.rebuild_rate);
2206
2207 free_inquiry:
2208 mega_free_inquiry(inquiry, dma_handle, pdev);
2209 free_pdev:
2210 free_local_pdev(pdev);
2211 return 0;
2212 }
2213
2214
2215 /**
2216 * proc_show_battery()
2217 * @m: Synthetic file construction data
2218 * @v: File iterator
2219 *
2220 * Display information about the battery module on the controller.
2221 */
2222 static int
2223 proc_show_battery(struct seq_file *m, void *v)
2224 {
2225 adapter_t *adapter = m->private;
2226 dma_addr_t dma_handle;
2227 caddr_t inquiry;
2228 struct pci_dev *pdev;
2229 u8 battery_status;
2230
2231 if( make_local_pdev(adapter, &pdev) != 0 )
2232 return 0;
2233
2234 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2235 goto free_pdev;
2236
2237 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2238 seq_puts(m, "Adapter inquiry failed.\n");
2239 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2240 goto free_inquiry;
2241 }
2242
2243 if( adapter->flag & BOARD_40LD ) {
2244 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2245 }
2246 else {
2247 battery_status = ((mraid_ext_inquiry *)inquiry)->
2248 raid_inq.adapter_info.battery_status;
2249 }
2250
2251 /*
2252 * Decode the battery status
2253 */
2254 seq_printf(m, "Battery Status:[%d]", battery_status);
2255
2256 if(battery_status == MEGA_BATT_CHARGE_DONE)
2257 seq_puts(m, " Charge Done");
2258
2259 if(battery_status & MEGA_BATT_MODULE_MISSING)
2260 seq_puts(m, " Module Missing");
2261
2262 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2263 seq_puts(m, " Low Voltage");
2264
2265 if(battery_status & MEGA_BATT_TEMP_HIGH)
2266 seq_puts(m, " Temperature High");
2267
2268 if(battery_status & MEGA_BATT_PACK_MISSING)
2269 seq_puts(m, " Pack Missing");
2270
2271 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2272 seq_puts(m, " Charge In-progress");
2273
2274 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2275 seq_puts(m, " Charge Fail");
2276
2277 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2278 seq_puts(m, " Cycles Exceeded");
2279
2280 seq_putc(m, '\n');
2281
2282 free_inquiry:
2283 mega_free_inquiry(inquiry, dma_handle, pdev);
2284 free_pdev:
2285 free_local_pdev(pdev);
2286 return 0;
2287 }
2288
2289
2290 /*
2291 * Display scsi inquiry
2292 */
2293 static void
2294 mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2295 {
2296 int i;
2297
2298 seq_puts(m, " Vendor: ");
2299 seq_write(m, scsi_inq + 8, 8);
2300 seq_puts(m, " Model: ");
2301 seq_write(m, scsi_inq + 16, 16);
2302 seq_puts(m, " Rev: ");
2303 seq_write(m, scsi_inq + 32, 4);
2304 seq_putc(m, '\n');
2305
2306 i = scsi_inq[0] & 0x1f;
2307 seq_printf(m, " Type: %s ", scsi_device_type(i));
2308
2309 seq_printf(m, " ANSI SCSI revision: %02x",
2310 scsi_inq[2] & 0x07);
2311
2312 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2313 seq_puts(m, " CCS\n");
2314 else
2315 seq_putc(m, '\n');
2316 }
2317
2318 /**
2319 * proc_show_pdrv()
2320 * @m: Synthetic file construction data
2321 * @adapter: pointer to our soft state
2322 * @channel: channel
2323 *
2324 * Display information about the physical drives.
2325 */
2326 static int
2327 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2328 {
2329 dma_addr_t dma_handle;
2330 char *scsi_inq;
2331 dma_addr_t scsi_inq_dma_handle;
2332 caddr_t inquiry;
2333 struct pci_dev *pdev;
2334 u8 *pdrv_state;
2335 u8 state;
2336 int tgt;
2337 int max_channels;
2338 int i;
2339
2340 if( make_local_pdev(adapter, &pdev) != 0 )
2341 return 0;
2342
2343 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2344 goto free_pdev;
2345
2346 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2347 seq_puts(m, "Adapter inquiry failed.\n");
2348 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2349 goto free_inquiry;
2350 }
2351
2352
2353 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
2354 GFP_KERNEL);
2355 if( scsi_inq == NULL ) {
2356 seq_puts(m, "memory not available for scsi inq.\n");
2357 goto free_inquiry;
2358 }
2359
2360 if( adapter->flag & BOARD_40LD ) {
2361 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2362 }
2363 else {
2364 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2365 raid_inq.pdrv_info.pdrv_state;
2366 }
2367
2368 max_channels = adapter->product_info.nchannels;
2369
2370 if( channel >= max_channels ) {
2371 goto free_pci;
2372 }
2373
2374 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2375
2376 i = channel*16 + tgt;
2377
2378 state = *(pdrv_state + i);
2379 switch( state & 0x0F ) {
2380 case PDRV_ONLINE:
2381 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2382 channel, tgt);
2383 break;
2384
2385 case PDRV_FAILED:
2386 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2387 channel, tgt);
2388 break;
2389
2390 case PDRV_RBLD:
2391 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2392 channel, tgt);
2393 break;
2394
2395 case PDRV_HOTSPARE:
2396 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2397 channel, tgt);
2398 break;
2399
2400 default:
2401 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2402 channel, tgt);
2403 break;
2404 }
2405
2406 /*
2407 * This interface displays inquiries for disk drives
2408 * only. Inquries for logical drives and non-disk
2409 * devices are available through /proc/scsi/scsi
2410 */
2411 memset(scsi_inq, 0, 256);
2412 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2413 scsi_inq_dma_handle) ||
2414 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2415 continue;
2416 }
2417
2418 /*
2419 * Check for overflow. We print less than 240
2420 * characters for inquiry
2421 */
2422 seq_puts(m, ".\n");
2423 mega_print_inquiry(m, scsi_inq);
2424 }
2425
2426 free_pci:
2427 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
2428 free_inquiry:
2429 mega_free_inquiry(inquiry, dma_handle, pdev);
2430 free_pdev:
2431 free_local_pdev(pdev);
2432 return 0;
2433 }
2434
2435 /**
2436 * proc_show_pdrv_ch0()
2437 * @m: Synthetic file construction data
2438 * @v: File iterator
2439 *
2440 * Display information about the physical drives on physical channel 0.
2441 */
2442 static int
2443 proc_show_pdrv_ch0(struct seq_file *m, void *v)
2444 {
2445 return proc_show_pdrv(m, m->private, 0);
2446 }
2447
2448
2449 /**
2450 * proc_show_pdrv_ch1()
2451 * @m: Synthetic file construction data
2452 * @v: File iterator
2453 *
2454 * Display information about the physical drives on physical channel 1.
2455 */
2456 static int
2457 proc_show_pdrv_ch1(struct seq_file *m, void *v)
2458 {
2459 return proc_show_pdrv(m, m->private, 1);
2460 }
2461
2462
2463 /**
2464 * proc_show_pdrv_ch2()
2465 * @m: Synthetic file construction data
2466 * @v: File iterator
2467 *
2468 * Display information about the physical drives on physical channel 2.
2469 */
2470 static int
2471 proc_show_pdrv_ch2(struct seq_file *m, void *v)
2472 {
2473 return proc_show_pdrv(m, m->private, 2);
2474 }
2475
2476
2477 /**
2478 * proc_show_pdrv_ch3()
2479 * @m: Synthetic file construction data
2480 * @v: File iterator
2481 *
2482 * Display information about the physical drives on physical channel 3.
2483 */
2484 static int
2485 proc_show_pdrv_ch3(struct seq_file *m, void *v)
2486 {
2487 return proc_show_pdrv(m, m->private, 3);
2488 }
2489
2490
2491 /**
2492 * proc_show_rdrv()
2493 * @m: Synthetic file construction data
2494 * @adapter: pointer to our soft state
2495 * @start: starting logical drive to display
2496 * @end: ending logical drive to display
2497 *
2498 * We do not print the inquiry information since its already available through
2499 * /proc/scsi/scsi interface
2500 */
2501 static int
2502 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2503 {
2504 dma_addr_t dma_handle;
2505 logdrv_param *lparam;
2506 megacmd_t mc;
2507 char *disk_array;
2508 dma_addr_t disk_array_dma_handle;
2509 caddr_t inquiry;
2510 struct pci_dev *pdev;
2511 u8 *rdrv_state;
2512 int num_ldrv;
2513 u32 array_sz;
2514 int i;
2515
2516 if( make_local_pdev(adapter, &pdev) != 0 )
2517 return 0;
2518
2519 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2520 goto free_pdev;
2521
2522 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2523 seq_puts(m, "Adapter inquiry failed.\n");
2524 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2525 goto free_inquiry;
2526 }
2527
2528 memset(&mc, 0, sizeof(megacmd_t));
2529
2530 if( adapter->flag & BOARD_40LD ) {
2531 array_sz = sizeof(disk_array_40ld);
2532
2533 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2534
2535 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2536 }
2537 else {
2538 array_sz = sizeof(disk_array_8ld);
2539
2540 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2541 raid_inq.logdrv_info.ldrv_state;
2542
2543 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2544 raid_inq.logdrv_info.num_ldrv;
2545 }
2546
2547 disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
2548 &disk_array_dma_handle, GFP_KERNEL);
2549
2550 if( disk_array == NULL ) {
2551 seq_puts(m, "memory not available.\n");
2552 goto free_inquiry;
2553 }
2554
2555 mc.xferaddr = (u32)disk_array_dma_handle;
2556
2557 if( adapter->flag & BOARD_40LD ) {
2558 mc.cmd = FC_NEW_CONFIG;
2559 mc.opcode = OP_DCMD_READ_CONFIG;
2560
2561 if( mega_internal_command(adapter, &mc, NULL) ) {
2562 seq_puts(m, "40LD read config failed.\n");
2563 goto free_pci;
2564 }
2565
2566 }
2567 else {
2568 mc.cmd = NEW_READ_CONFIG_8LD;
2569
2570 if( mega_internal_command(adapter, &mc, NULL) ) {
2571 mc.cmd = READ_CONFIG_8LD;
2572 if( mega_internal_command(adapter, &mc, NULL) ) {
2573 seq_puts(m, "8LD read config failed.\n");
2574 goto free_pci;
2575 }
2576 }
2577 }
2578
2579 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2580
2581 if( adapter->flag & BOARD_40LD ) {
2582 lparam =
2583 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2584 }
2585 else {
2586 lparam =
2587 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2588 }
2589
2590 /*
2591 * Check for overflow. We print less than 240 characters for
2592 * information about each logical drive.
2593 */
2594 seq_printf(m, "Logical drive:%2d:, ", i);
2595
2596 switch( rdrv_state[i] & 0x0F ) {
2597 case RDRV_OFFLINE:
2598 seq_puts(m, "state: offline");
2599 break;
2600 case RDRV_DEGRADED:
2601 seq_puts(m, "state: degraded");
2602 break;
2603 case RDRV_OPTIMAL:
2604 seq_puts(m, "state: optimal");
2605 break;
2606 case RDRV_DELETED:
2607 seq_puts(m, "state: deleted");
2608 break;
2609 default:
2610 seq_puts(m, "state: unknown");
2611 break;
2612 }
2613
2614 /*
2615 * Check if check consistency or initialization is going on
2616 * for this logical drive.
2617 */
2618 if( (rdrv_state[i] & 0xF0) == 0x20 )
2619 seq_puts(m, ", check-consistency in progress");
2620 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2621 seq_puts(m, ", initialization in progress");
2622
2623 seq_putc(m, '\n');
2624
2625 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2626 seq_printf(m, "RAID level:%3d, ", lparam->level);
2627 seq_printf(m, "Stripe size:%3d, ",
2628 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2629 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2630
2631 seq_puts(m, "Read Policy: ");
2632 switch(lparam->read_ahead) {
2633 case NO_READ_AHEAD:
2634 seq_puts(m, "No read ahead, ");
2635 break;
2636 case READ_AHEAD:
2637 seq_puts(m, "Read ahead, ");
2638 break;
2639 case ADAP_READ_AHEAD:
2640 seq_puts(m, "Adaptive, ");
2641 break;
2642
2643 }
2644
2645 seq_puts(m, "Write Policy: ");
2646 switch(lparam->write_mode) {
2647 case WRMODE_WRITE_THRU:
2648 seq_puts(m, "Write thru, ");
2649 break;
2650 case WRMODE_WRITE_BACK:
2651 seq_puts(m, "Write back, ");
2652 break;
2653 }
2654
2655 seq_puts(m, "Cache Policy: ");
2656 switch(lparam->direct_io) {
2657 case CACHED_IO:
2658 seq_puts(m, "Cached IO\n\n");
2659 break;
2660 case DIRECT_IO:
2661 seq_puts(m, "Direct IO\n\n");
2662 break;
2663 }
2664 }
2665
2666 free_pci:
2667 dma_free_coherent(&pdev->dev, array_sz, disk_array,
2668 disk_array_dma_handle);
2669 free_inquiry:
2670 mega_free_inquiry(inquiry, dma_handle, pdev);
2671 free_pdev:
2672 free_local_pdev(pdev);
2673 return 0;
2674 }
2675
2676 /**
2677 * proc_show_rdrv_10()
2678 * @m: Synthetic file construction data
2679 * @v: File iterator
2680 *
2681 * Display real time information about the logical drives 0 through 9.
2682 */
2683 static int
2684 proc_show_rdrv_10(struct seq_file *m, void *v)
2685 {
2686 return proc_show_rdrv(m, m->private, 0, 9);
2687 }
2688
2689
2690 /**
2691 * proc_show_rdrv_20()
2692 * @m: Synthetic file construction data
2693 * @v: File iterator
2694 *
2695 * Display real time information about the logical drives 0 through 9.
2696 */
2697 static int
2698 proc_show_rdrv_20(struct seq_file *m, void *v)
2699 {
2700 return proc_show_rdrv(m, m->private, 10, 19);
2701 }
2702
2703
2704 /**
2705 * proc_show_rdrv_30()
2706 * @m: Synthetic file construction data
2707 * @v: File iterator
2708 *
2709 * Display real time information about the logical drives 0 through 9.
2710 */
2711 static int
2712 proc_show_rdrv_30(struct seq_file *m, void *v)
2713 {
2714 return proc_show_rdrv(m, m->private, 20, 29);
2715 }
2716
2717
2718 /**
2719 * proc_show_rdrv_40()
2720 * @m: Synthetic file construction data
2721 * @v: File iterator
2722 *
2723 * Display real time information about the logical drives 0 through 9.
2724 */
2725 static int
2726 proc_show_rdrv_40(struct seq_file *m, void *v)
2727 {
2728 return proc_show_rdrv(m, m->private, 30, 39);
2729 }
2730
2731 /**
2732 * mega_create_proc_entry()
2733 * @index: index in soft state array
2734 * @parent: parent node for this /proc entry
2735 *
2736 * Creates /proc entries for our controllers.
2737 */
2738 static void
2739 mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2740 {
2741 adapter_t *adapter = hba_soft_state[index];
2742 struct proc_dir_entry *dir;
2743 u8 string[16];
2744
2745 sprintf(string, "hba%d", adapter->host->host_no);
2746 dir = proc_mkdir_data(string, 0, parent, adapter);
2747 if (!dir) {
2748 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2749 return;
2750 }
2751
2752 proc_create_single_data("config", S_IRUSR, dir,
2753 proc_show_config, adapter);
2754 proc_create_single_data("stat", S_IRUSR, dir,
2755 proc_show_stat, adapter);
2756 proc_create_single_data("mailbox", S_IRUSR, dir,
2757 proc_show_mbox, adapter);
2758 #if MEGA_HAVE_ENH_PROC
2759 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2760 proc_show_rebuild_rate, adapter);
2761 proc_create_single_data("battery-status", S_IRUSR, dir,
2762 proc_show_battery, adapter);
2763 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2764 proc_show_pdrv_ch0, adapter);
2765 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2766 proc_show_pdrv_ch1, adapter);
2767 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2768 proc_show_pdrv_ch2, adapter);
2769 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2770 proc_show_pdrv_ch3, adapter);
2771 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2772 proc_show_rdrv_10, adapter);
2773 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2774 proc_show_rdrv_20, adapter);
2775 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2776 proc_show_rdrv_30, adapter);
2777 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2778 proc_show_rdrv_40, adapter);
2779 #endif
2780 }
2781
2782 #else
2783 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2784 {
2785 }
2786 #endif
2787
2788
2789 /*
2790 * megaraid_biosparam()
2791 *
2792 * Return the disk geometry for a particular disk
2793 */
2794 static int
2795 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2796 sector_t capacity, int geom[])
2797 {
2798 adapter_t *adapter;
2799 int heads;
2800 int sectors;
2801 int cylinders;
2802
2803 /* Get pointer to host config structure */
2804 adapter = (adapter_t *)sdev->host->hostdata;
2805
2806 if (IS_RAID_CH(adapter, sdev->channel)) {
2807 /* Default heads (64) & sectors (32) */
2808 heads = 64;
2809 sectors = 32;
2810 cylinders = (ulong)capacity / (heads * sectors);
2811
2812 /*
2813 * Handle extended translation size for logical drives
2814 * > 1Gb
2815 */
2816 if ((ulong)capacity >= 0x200000) {
2817 heads = 255;
2818 sectors = 63;
2819 cylinders = (ulong)capacity / (heads * sectors);
2820 }
2821
2822 /* return result */
2823 geom[0] = heads;
2824 geom[1] = sectors;
2825 geom[2] = cylinders;
2826 }
2827 else {
2828 if (scsi_partsize(bdev, capacity, geom))
2829 return 0;
2830
2831 dev_info(&adapter->dev->dev,
2832 "invalid partition on this disk on channel %d\n",
2833 sdev->channel);
2834
2835 /* Default heads (64) & sectors (32) */
2836 heads = 64;
2837 sectors = 32;
2838 cylinders = (ulong)capacity / (heads * sectors);
2839
2840 /* Handle extended translation size for logical drives > 1Gb */
2841 if ((ulong)capacity >= 0x200000) {
2842 heads = 255;
2843 sectors = 63;
2844 cylinders = (ulong)capacity / (heads * sectors);
2845 }
2846
2847 /* return result */
2848 geom[0] = heads;
2849 geom[1] = sectors;
2850 geom[2] = cylinders;
2851 }
2852
2853 return 0;
2854 }
2855
2856 /**
2857 * mega_init_scb()
2858 * @adapter: pointer to our soft state
2859 *
2860 * Allocate memory for the various pointers in the scb structures:
2861 * scatter-gather list pointer, passthru and extended passthru structure
2862 * pointers.
2863 */
2864 static int
2865 mega_init_scb(adapter_t *adapter)
2866 {
2867 scb_t *scb;
2868 int i;
2869
2870 for( i = 0; i < adapter->max_cmds; i++ ) {
2871
2872 scb = &adapter->scb_list[i];
2873
2874 scb->sgl64 = NULL;
2875 scb->sgl = NULL;
2876 scb->pthru = NULL;
2877 scb->epthru = NULL;
2878 }
2879
2880 for( i = 0; i < adapter->max_cmds; i++ ) {
2881
2882 scb = &adapter->scb_list[i];
2883
2884 scb->idx = i;
2885
2886 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
2887 sizeof(mega_sgl64) * adapter->sglen,
2888 &scb->sgl_dma_addr, GFP_KERNEL);
2889
2890 scb->sgl = (mega_sglist *)scb->sgl64;
2891
2892 if( !scb->sgl ) {
2893 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2894 mega_free_sgl(adapter);
2895 return -1;
2896 }
2897
2898 scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
2899 sizeof(mega_passthru),
2900 &scb->pthru_dma_addr, GFP_KERNEL);
2901
2902 if( !scb->pthru ) {
2903 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2904 mega_free_sgl(adapter);
2905 return -1;
2906 }
2907
2908 scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
2909 sizeof(mega_ext_passthru),
2910 &scb->epthru_dma_addr, GFP_KERNEL);
2911
2912 if( !scb->epthru ) {
2913 dev_warn(&adapter->dev->dev,
2914 "Can't allocate extended passthru\n");
2915 mega_free_sgl(adapter);
2916 return -1;
2917 }
2918
2919
2920 scb->dma_type = MEGA_DMA_TYPE_NONE;
2921
2922 /*
2923 * Link to free list
2924 * lock not required since we are loading the driver, so no
2925 * commands possible right now.
2926 */
2927 scb->state = SCB_FREE;
2928 scb->cmd = NULL;
2929 list_add(&scb->list, &adapter->free_list);
2930 }
2931
2932 return 0;
2933 }
2934
2935
2936 /**
2937 * megadev_open()
2938 * @inode: unused
2939 * @filep: unused
2940 *
2941 * Routines for the character/ioctl interface to the driver. Find out if this
2942 * is a valid open.
2943 */
2944 static int
2945 megadev_open (struct inode *inode, struct file *filep)
2946 {
2947 /*
2948 * Only allow superuser to access private ioctl interface
2949 */
2950 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2951
2952 return 0;
2953 }
2954
2955
2956 /**
2957 * megadev_ioctl()
2958 * @filep: Our device file
2959 * @cmd: ioctl command
2960 * @arg: user buffer
2961 *
2962 * ioctl entry point for our private ioctl interface. We move the data in from
2963 * the user space, prepare the command (if necessary, convert the old MIMD
2964 * ioctl to new ioctl command), and issue a synchronous command to the
2965 * controller.
2966 */
2967 static int
2968 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2969 {
2970 adapter_t *adapter;
2971 nitioctl_t uioc;
2972 int adapno;
2973 int rval;
2974 mega_passthru __user *upthru; /* user address for passthru */
2975 mega_passthru *pthru; /* copy user passthru here */
2976 dma_addr_t pthru_dma_hndl;
2977 void *data = NULL; /* data to be transferred */
2978 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2979 megacmd_t mc;
2980 #if MEGA_HAVE_STATS
2981 megastat_t __user *ustats = NULL;
2982 int num_ldrv = 0;
2983 #endif
2984 u32 uxferaddr = 0;
2985 struct pci_dev *pdev;
2986
2987 /*
2988 * Make sure only USCSICMD are issued through this interface.
2989 * MIMD application would still fire different command.
2990 */
2991 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
2992 return -EINVAL;
2993 }
2994
2995 /*
2996 * Check and convert a possible MIMD command to NIT command.
2997 * mega_m_to_n() copies the data from the user space, so we do not
2998 * have to do it here.
2999 * NOTE: We will need some user address to copyout the data, therefore
3000 * the inteface layer will also provide us with the required user
3001 * addresses.
3002 */
3003 memset(&uioc, 0, sizeof(nitioctl_t));
3004 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
3005 return rval;
3006
3007
3008 switch( uioc.opcode ) {
3009
3010 case GET_DRIVER_VER:
3011 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3012 return (-EFAULT);
3013
3014 break;
3015
3016 case GET_N_ADAP:
3017 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3018 return (-EFAULT);
3019
3020 /*
3021 * Shucks. MIMD interface returns a positive value for number
3022 * of adapters. TODO: Change it to return 0 when there is no
3023 * applicatio using mimd interface.
3024 */
3025 return hba_count;
3026
3027 case GET_ADAP_INFO:
3028
3029 /*
3030 * Which adapter
3031 */
3032 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3033 return (-ENODEV);
3034
3035 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3036 sizeof(struct mcontroller)) )
3037 return (-EFAULT);
3038 break;
3039
3040 #if MEGA_HAVE_STATS
3041
3042 case GET_STATS:
3043 /*
3044 * Which adapter
3045 */
3046 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3047 return (-ENODEV);
3048
3049 adapter = hba_soft_state[adapno];
3050
3051 ustats = uioc.uioc_uaddr;
3052
3053 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3054 return (-EFAULT);
3055
3056 /*
3057 * Check for the validity of the logical drive number
3058 */
3059 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3060
3061 if( copy_to_user(ustats->nreads, adapter->nreads,
3062 num_ldrv*sizeof(u32)) )
3063 return -EFAULT;
3064
3065 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3066 num_ldrv*sizeof(u32)) )
3067 return -EFAULT;
3068
3069 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3070 num_ldrv*sizeof(u32)) )
3071 return -EFAULT;
3072
3073 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3074 num_ldrv*sizeof(u32)) )
3075 return -EFAULT;
3076
3077 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3078 num_ldrv*sizeof(u32)) )
3079 return -EFAULT;
3080
3081 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3082 num_ldrv*sizeof(u32)) )
3083 return -EFAULT;
3084
3085 return 0;
3086
3087 #endif
3088 case MBOX_CMD:
3089
3090 /*
3091 * Which adapter
3092 */
3093 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3094 return (-ENODEV);
3095
3096 adapter = hba_soft_state[adapno];
3097
3098 /*
3099 * Deletion of logical drive is a special case. The adapter
3100 * should be quiescent before this command is issued.
3101 */
3102 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3103 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3104
3105 /*
3106 * Do we support this feature
3107 */
3108 if( !adapter->support_random_del ) {
3109 dev_warn(&adapter->dev->dev, "logdrv "
3110 "delete on non-supporting F/W\n");
3111
3112 return (-EINVAL);
3113 }
3114
3115 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3116
3117 if( rval == 0 ) {
3118 memset(&mc, 0, sizeof(megacmd_t));
3119
3120 mc.status = rval;
3121
3122 rval = mega_n_to_m((void __user *)arg, &mc);
3123 }
3124
3125 return rval;
3126 }
3127 /*
3128 * This interface only support the regular passthru commands.
3129 * Reject extended passthru and 64-bit passthru
3130 */
3131 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3132 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3133
3134 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3135
3136 return (-EINVAL);
3137 }
3138
3139 /*
3140 * For all internal commands, the buffer must be allocated in
3141 * <4GB address range
3142 */
3143 if( make_local_pdev(adapter, &pdev) != 0 )
3144 return -EIO;
3145
3146 /* Is it a passthru command or a DCMD */
3147 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3148 /* Passthru commands */
3149
3150 pthru = dma_alloc_coherent(&pdev->dev,
3151 sizeof(mega_passthru),
3152 &pthru_dma_hndl, GFP_KERNEL);
3153
3154 if( pthru == NULL ) {
3155 free_local_pdev(pdev);
3156 return (-ENOMEM);
3157 }
3158
3159 /*
3160 * The user passthru structure
3161 */
3162 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3163
3164 /*
3165 * Copy in the user passthru here.
3166 */
3167 if( copy_from_user(pthru, upthru,
3168 sizeof(mega_passthru)) ) {
3169
3170 dma_free_coherent(&pdev->dev,
3171 sizeof(mega_passthru),
3172 pthru, pthru_dma_hndl);
3173
3174 free_local_pdev(pdev);
3175
3176 return (-EFAULT);
3177 }
3178
3179 /*
3180 * Is there a data transfer
3181 */
3182 if( pthru->dataxferlen ) {
3183 data = dma_alloc_coherent(&pdev->dev,
3184 pthru->dataxferlen,
3185 &data_dma_hndl,
3186 GFP_KERNEL);
3187
3188 if( data == NULL ) {
3189 dma_free_coherent(&pdev->dev,
3190 sizeof(mega_passthru),
3191 pthru,
3192 pthru_dma_hndl);
3193
3194 free_local_pdev(pdev);
3195
3196 return (-ENOMEM);
3197 }
3198
3199 /*
3200 * Save the user address and point the kernel
3201 * address at just allocated memory
3202 */
3203 uxferaddr = pthru->dataxferaddr;
3204 pthru->dataxferaddr = data_dma_hndl;
3205 }
3206
3207
3208 /*
3209 * Is data coming down-stream
3210 */
3211 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3212 /*
3213 * Get the user data
3214 */
3215 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3216 pthru->dataxferlen) ) {
3217 rval = (-EFAULT);
3218 goto freemem_and_return;
3219 }
3220 }
3221
3222 memset(&mc, 0, sizeof(megacmd_t));
3223
3224 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3225 mc.xferaddr = (u32)pthru_dma_hndl;
3226
3227 /*
3228 * Issue the command
3229 */
3230 mega_internal_command(adapter, &mc, pthru);
3231
3232 rval = mega_n_to_m((void __user *)arg, &mc);
3233
3234 if( rval ) goto freemem_and_return;
3235
3236
3237 /*
3238 * Is data going up-stream
3239 */
3240 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3241 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3242 pthru->dataxferlen) ) {
3243 rval = (-EFAULT);
3244 }
3245 }
3246
3247 /*
3248 * Send the request sense data also, irrespective of
3249 * whether the user has asked for it or not.
3250 */
3251 if (copy_to_user(upthru->reqsensearea,
3252 pthru->reqsensearea, 14))
3253 rval = -EFAULT;
3254
3255 freemem_and_return:
3256 if( pthru->dataxferlen ) {
3257 dma_free_coherent(&pdev->dev,
3258 pthru->dataxferlen, data,
3259 data_dma_hndl);
3260 }
3261
3262 dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
3263 pthru, pthru_dma_hndl);
3264
3265 free_local_pdev(pdev);
3266
3267 return rval;
3268 }
3269 else {
3270 /* DCMD commands */
3271
3272 /*
3273 * Is there a data transfer
3274 */
3275 if( uioc.xferlen ) {
3276 data = dma_alloc_coherent(&pdev->dev,
3277 uioc.xferlen,
3278 &data_dma_hndl,
3279 GFP_KERNEL);
3280
3281 if( data == NULL ) {
3282 free_local_pdev(pdev);
3283 return (-ENOMEM);
3284 }
3285
3286 uxferaddr = MBOX(uioc)->xferaddr;
3287 }
3288
3289 /*
3290 * Is data coming down-stream
3291 */
3292 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3293 /*
3294 * Get the user data
3295 */
3296 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3297 uioc.xferlen) ) {
3298
3299 dma_free_coherent(&pdev->dev,
3300 uioc.xferlen, data,
3301 data_dma_hndl);
3302
3303 free_local_pdev(pdev);
3304
3305 return (-EFAULT);
3306 }
3307 }
3308
3309 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3310
3311 mc.xferaddr = (u32)data_dma_hndl;
3312
3313 /*
3314 * Issue the command
3315 */
3316 mega_internal_command(adapter, &mc, NULL);
3317
3318 rval = mega_n_to_m((void __user *)arg, &mc);
3319
3320 if( rval ) {
3321 if( uioc.xferlen ) {
3322 dma_free_coherent(&pdev->dev,
3323 uioc.xferlen, data,
3324 data_dma_hndl);
3325 }
3326
3327 free_local_pdev(pdev);
3328
3329 return rval;
3330 }
3331
3332 /*
3333 * Is data going up-stream
3334 */
3335 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3336 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3337 uioc.xferlen) ) {
3338
3339 rval = (-EFAULT);
3340 }
3341 }
3342
3343 if( uioc.xferlen ) {
3344 dma_free_coherent(&pdev->dev, uioc.xferlen,
3345 data, data_dma_hndl);
3346 }
3347
3348 free_local_pdev(pdev);
3349
3350 return rval;
3351 }
3352
3353 default:
3354 return (-EINVAL);
3355 }
3356
3357 return 0;
3358 }
3359
3360 static long
3361 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3362 {
3363 int ret;
3364
3365 mutex_lock(&megadev_mutex);
3366 ret = megadev_ioctl(filep, cmd, arg);
3367 mutex_unlock(&megadev_mutex);
3368
3369 return ret;
3370 }
3371
3372 /**
3373 * mega_m_to_n()
3374 * @arg: user address
3375 * @uioc: new ioctl structure
3376 *
3377 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3378 * structure
3379 *
3380 * Converts the older mimd ioctl structure to newer NIT structure
3381 */
3382 static int
3383 mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3384 {
3385 struct uioctl_t uioc_mimd;
3386 char signature[8] = {0};
3387 u8 opcode;
3388 u8 subopcode;
3389
3390
3391 /*
3392 * check is the application conforms to NIT. We do not have to do much
3393 * in that case.
3394 * We exploit the fact that the signature is stored in the very
3395 * beginning of the structure.
3396 */
3397
3398 if( copy_from_user(signature, arg, 7) )
3399 return (-EFAULT);
3400
3401 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3402
3403 /*
3404 * NOTE NOTE: The nit ioctl is still under flux because of
3405 * change of mailbox definition, in HPE. No applications yet
3406 * use this interface and let's not have applications use this
3407 * interface till the new specifitions are in place.
3408 */
3409 return -EINVAL;
3410 #if 0
3411 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3412 return (-EFAULT);
3413 return 0;
3414 #endif
3415 }
3416
3417 /*
3418 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3419 *
3420 * Get the user ioctl structure
3421 */
3422 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3423 return (-EFAULT);
3424
3425
3426 /*
3427 * Get the opcode and subopcode for the commands
3428 */
3429 opcode = uioc_mimd.ui.fcs.opcode;
3430 subopcode = uioc_mimd.ui.fcs.subopcode;
3431
3432 switch (opcode) {
3433 case 0x82:
3434
3435 switch (subopcode) {
3436
3437 case MEGAIOC_QDRVRVER: /* Query driver version */
3438 uioc->opcode = GET_DRIVER_VER;
3439 uioc->uioc_uaddr = uioc_mimd.data;
3440 break;
3441
3442 case MEGAIOC_QNADAP: /* Get # of adapters */
3443 uioc->opcode = GET_N_ADAP;
3444 uioc->uioc_uaddr = uioc_mimd.data;
3445 break;
3446
3447 case MEGAIOC_QADAPINFO: /* Get adapter information */
3448 uioc->opcode = GET_ADAP_INFO;
3449 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3450 uioc->uioc_uaddr = uioc_mimd.data;
3451 break;
3452
3453 default:
3454 return(-EINVAL);
3455 }
3456
3457 break;
3458
3459
3460 case 0x81:
3461
3462 uioc->opcode = MBOX_CMD;
3463 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3464
3465 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3466
3467 uioc->xferlen = uioc_mimd.ui.fcs.length;
3468
3469 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3470 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3471
3472 break;
3473
3474 case 0x80:
3475
3476 uioc->opcode = MBOX_CMD;
3477 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3478
3479 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3480
3481 /*
3482 * Choose the xferlen bigger of input and output data
3483 */
3484 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3485 uioc_mimd.outlen : uioc_mimd.inlen;
3486
3487 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3488 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3489
3490 break;
3491
3492 default:
3493 return (-EINVAL);
3494
3495 }
3496
3497 return 0;
3498 }
3499
3500 /*
3501 * mega_n_to_m()
3502 * @arg: user address
3503 * @mc: mailbox command
3504 *
3505 * Updates the status information to the application, depending on application
3506 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3507 */
3508 static int
3509 mega_n_to_m(void __user *arg, megacmd_t *mc)
3510 {
3511 nitioctl_t __user *uiocp;
3512 megacmd_t __user *umc;
3513 mega_passthru __user *upthru;
3514 struct uioctl_t __user *uioc_mimd;
3515 char signature[8] = {0};
3516
3517 /*
3518 * check is the application conforms to NIT.
3519 */
3520 if( copy_from_user(signature, arg, 7) )
3521 return -EFAULT;
3522
3523 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3524
3525 uiocp = arg;
3526
3527 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3528 return (-EFAULT);
3529
3530 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3531
3532 umc = MBOX_P(uiocp);
3533
3534 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3535 return -EFAULT;
3536
3537 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3538 return (-EFAULT);
3539 }
3540 }
3541 else {
3542 uioc_mimd = arg;
3543
3544 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3545 return (-EFAULT);
3546
3547 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3548
3549 umc = (megacmd_t __user *)uioc_mimd->mbox;
3550
3551 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3552 return (-EFAULT);
3553
3554 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3555 return (-EFAULT);
3556 }
3557 }
3558
3559 return 0;
3560 }
3561
3562
3563 /*
3564 * MEGARAID 'FW' commands.
3565 */
3566
3567 /**
3568 * mega_is_bios_enabled()
3569 * @adapter: pointer to our soft state
3570 *
3571 * issue command to find out if the BIOS is enabled for this controller
3572 */
3573 static int
3574 mega_is_bios_enabled(adapter_t *adapter)
3575 {
3576 unsigned char raw_mbox[sizeof(struct mbox_out)];
3577 mbox_t *mbox;
3578
3579 mbox = (mbox_t *)raw_mbox;
3580
3581 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3582
3583 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3584
3585 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3586
3587 raw_mbox[0] = IS_BIOS_ENABLED;
3588 raw_mbox[2] = GET_BIOS;
3589
3590 issue_scb_block(adapter, raw_mbox);
3591
3592 return *(char *)adapter->mega_buffer;
3593 }
3594
3595
3596 /**
3597 * mega_enum_raid_scsi()
3598 * @adapter: pointer to our soft state
3599 *
3600 * Find out what channels are RAID/SCSI. This information is used to
3601 * differentiate the virtual channels and physical channels and to support
3602 * ROMB feature and non-disk devices.
3603 */
3604 static void
3605 mega_enum_raid_scsi(adapter_t *adapter)
3606 {
3607 unsigned char raw_mbox[sizeof(struct mbox_out)];
3608 mbox_t *mbox;
3609 int i;
3610
3611 mbox = (mbox_t *)raw_mbox;
3612
3613 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3614
3615 /*
3616 * issue command to find out what channels are raid/scsi
3617 */
3618 raw_mbox[0] = CHNL_CLASS;
3619 raw_mbox[2] = GET_CHNL_CLASS;
3620
3621 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3622
3623 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3624
3625 /*
3626 * Non-ROMB firmware fail this command, so all channels
3627 * must be shown RAID
3628 */
3629 adapter->mega_ch_class = 0xFF;
3630
3631 if(!issue_scb_block(adapter, raw_mbox)) {
3632 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3633
3634 }
3635
3636 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3637 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3638 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3639 i);
3640 }
3641 else {
3642 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3643 i);
3644 }
3645 }
3646
3647 return;
3648 }
3649
3650
3651 /**
3652 * mega_get_boot_drv()
3653 * @adapter: pointer to our soft state
3654 *
3655 * Find out which device is the boot device. Note, any logical drive or any
3656 * phyical device (e.g., a CDROM) can be designated as a boot device.
3657 */
3658 static void
3659 mega_get_boot_drv(adapter_t *adapter)
3660 {
3661 struct private_bios_data *prv_bios_data;
3662 unsigned char raw_mbox[sizeof(struct mbox_out)];
3663 mbox_t *mbox;
3664 u16 cksum = 0;
3665 u8 *cksum_p;
3666 u8 boot_pdrv;
3667 int i;
3668
3669 mbox = (mbox_t *)raw_mbox;
3670
3671 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3672
3673 raw_mbox[0] = BIOS_PVT_DATA;
3674 raw_mbox[2] = GET_BIOS_PVT_DATA;
3675
3676 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3677
3678 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3679
3680 adapter->boot_ldrv_enabled = 0;
3681 adapter->boot_ldrv = 0;
3682
3683 adapter->boot_pdrv_enabled = 0;
3684 adapter->boot_pdrv_ch = 0;
3685 adapter->boot_pdrv_tgt = 0;
3686
3687 if(issue_scb_block(adapter, raw_mbox) == 0) {
3688 prv_bios_data =
3689 (struct private_bios_data *)adapter->mega_buffer;
3690
3691 cksum = 0;
3692 cksum_p = (char *)prv_bios_data;
3693 for (i = 0; i < 14; i++ ) {
3694 cksum += (u16)(*cksum_p++);
3695 }
3696
3697 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3698
3699 /*
3700 * If MSB is set, a physical drive is set as boot
3701 * device
3702 */
3703 if( prv_bios_data->boot_drv & 0x80 ) {
3704 adapter->boot_pdrv_enabled = 1;
3705 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3706 adapter->boot_pdrv_ch = boot_pdrv / 16;
3707 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3708 }
3709 else {
3710 adapter->boot_ldrv_enabled = 1;
3711 adapter->boot_ldrv = prv_bios_data->boot_drv;
3712 }
3713 }
3714 }
3715
3716 }
3717
3718 /**
3719 * mega_support_random_del()
3720 * @adapter: pointer to our soft state
3721 *
3722 * Find out if this controller supports random deletion and addition of
3723 * logical drives
3724 */
3725 static int
3726 mega_support_random_del(adapter_t *adapter)
3727 {
3728 unsigned char raw_mbox[sizeof(struct mbox_out)];
3729 mbox_t *mbox;
3730 int rval;
3731
3732 mbox = (mbox_t *)raw_mbox;
3733
3734 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3735
3736 /*
3737 * issue command
3738 */
3739 raw_mbox[0] = FC_DEL_LOGDRV;
3740 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3741
3742 rval = issue_scb_block(adapter, raw_mbox);
3743
3744 return !rval;
3745 }
3746
3747
3748 /**
3749 * mega_support_ext_cdb()
3750 * @adapter: pointer to our soft state
3751 *
3752 * Find out if this firmware support cdblen > 10
3753 */
3754 static int
3755 mega_support_ext_cdb(adapter_t *adapter)
3756 {
3757 unsigned char raw_mbox[sizeof(struct mbox_out)];
3758 mbox_t *mbox;
3759 int rval;
3760
3761 mbox = (mbox_t *)raw_mbox;
3762
3763 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3764 /*
3765 * issue command to find out if controller supports extended CDBs.
3766 */
3767 raw_mbox[0] = 0xA4;
3768 raw_mbox[2] = 0x16;
3769
3770 rval = issue_scb_block(adapter, raw_mbox);
3771
3772 return !rval;
3773 }
3774
3775
3776 /**
3777 * mega_del_logdrv()
3778 * @adapter: pointer to our soft state
3779 * @logdrv: logical drive to be deleted
3780 *
3781 * Delete the specified logical drive. It is the responsibility of the user
3782 * app to let the OS know about this operation.
3783 */
3784 static int
3785 mega_del_logdrv(adapter_t *adapter, int logdrv)
3786 {
3787 unsigned long flags;
3788 scb_t *scb;
3789 int rval;
3790
3791 /*
3792 * Stop sending commands to the controller, queue them internally.
3793 * When deletion is complete, ISR will flush the queue.
3794 */
3795 atomic_set(&adapter->quiescent, 1);
3796
3797 /*
3798 * Wait till all the issued commands are complete and there are no
3799 * commands in the pending queue
3800 */
3801 while (atomic_read(&adapter->pend_cmds) > 0 ||
3802 !list_empty(&adapter->pending_list))
3803 msleep(1000); /* sleep for 1s */
3804
3805 rval = mega_do_del_logdrv(adapter, logdrv);
3806
3807 spin_lock_irqsave(&adapter->lock, flags);
3808
3809 /*
3810 * If delete operation was successful, add 0x80 to the logical drive
3811 * ids for commands in the pending queue.
3812 */
3813 if (adapter->read_ldidmap) {
3814 struct list_head *pos;
3815 list_for_each(pos, &adapter->pending_list) {
3816 scb = list_entry(pos, scb_t, list);
3817 if (scb->pthru->logdrv < 0x80 )
3818 scb->pthru->logdrv += 0x80;
3819 }
3820 }
3821
3822 atomic_set(&adapter->quiescent, 0);
3823
3824 mega_runpendq(adapter);
3825
3826 spin_unlock_irqrestore(&adapter->lock, flags);
3827
3828 return rval;
3829 }
3830
3831
3832 static int
3833 mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3834 {
3835 megacmd_t mc;
3836 int rval;
3837
3838 memset( &mc, 0, sizeof(megacmd_t));
3839
3840 mc.cmd = FC_DEL_LOGDRV;
3841 mc.opcode = OP_DEL_LOGDRV;
3842 mc.subopcode = logdrv;
3843
3844 rval = mega_internal_command(adapter, &mc, NULL);
3845
3846 /* log this event */
3847 if(rval) {
3848 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3849 return rval;
3850 }
3851
3852 /*
3853 * After deleting first logical drive, the logical drives must be
3854 * addressed by adding 0x80 to the logical drive id.
3855 */
3856 adapter->read_ldidmap = 1;
3857
3858 return rval;
3859 }
3860
3861
3862 /**
3863 * mega_get_max_sgl()
3864 * @adapter: pointer to our soft state
3865 *
3866 * Find out the maximum number of scatter-gather elements supported by this
3867 * version of the firmware
3868 */
3869 static void
3870 mega_get_max_sgl(adapter_t *adapter)
3871 {
3872 unsigned char raw_mbox[sizeof(struct mbox_out)];
3873 mbox_t *mbox;
3874
3875 mbox = (mbox_t *)raw_mbox;
3876
3877 memset(mbox, 0, sizeof(raw_mbox));
3878
3879 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3880
3881 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3882
3883 raw_mbox[0] = MAIN_MISC_OPCODE;
3884 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3885
3886
3887 if( issue_scb_block(adapter, raw_mbox) ) {
3888 /*
3889 * f/w does not support this command. Choose the default value
3890 */
3891 adapter->sglen = MIN_SGLIST;
3892 }
3893 else {
3894 adapter->sglen = *((char *)adapter->mega_buffer);
3895
3896 /*
3897 * Make sure this is not more than the resources we are
3898 * planning to allocate
3899 */
3900 if ( adapter->sglen > MAX_SGLIST )
3901 adapter->sglen = MAX_SGLIST;
3902 }
3903
3904 return;
3905 }
3906
3907
3908 /**
3909 * mega_support_cluster()
3910 * @adapter: pointer to our soft state
3911 *
3912 * Find out if this firmware support cluster calls.
3913 */
3914 static int
3915 mega_support_cluster(adapter_t *adapter)
3916 {
3917 unsigned char raw_mbox[sizeof(struct mbox_out)];
3918 mbox_t *mbox;
3919
3920 mbox = (mbox_t *)raw_mbox;
3921
3922 memset(mbox, 0, sizeof(raw_mbox));
3923
3924 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3925
3926 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3927
3928 /*
3929 * Try to get the initiator id. This command will succeed iff the
3930 * clustering is available on this HBA.
3931 */
3932 raw_mbox[0] = MEGA_GET_TARGET_ID;
3933
3934 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3935
3936 /*
3937 * Cluster support available. Get the initiator target id.
3938 * Tell our id to mid-layer too.
3939 */
3940 adapter->this_id = *(u32 *)adapter->mega_buffer;
3941 adapter->host->this_id = adapter->this_id;
3942
3943 return 1;
3944 }
3945
3946 return 0;
3947 }
3948
3949 #ifdef CONFIG_PROC_FS
3950 /**
3951 * mega_adapinq()
3952 * @adapter: pointer to our soft state
3953 * @dma_handle: DMA address of the buffer
3954 *
3955 * Issue internal commands while interrupts are available.
3956 * We only issue direct mailbox commands from within the driver. ioctl()
3957 * interface using these routines can issue passthru commands.
3958 */
3959 static int
3960 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3961 {
3962 megacmd_t mc;
3963
3964 memset(&mc, 0, sizeof(megacmd_t));
3965
3966 if( adapter->flag & BOARD_40LD ) {
3967 mc.cmd = FC_NEW_CONFIG;
3968 mc.opcode = NC_SUBOP_ENQUIRY3;
3969 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3970 }
3971 else {
3972 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3973 }
3974
3975 mc.xferaddr = (u32)dma_handle;
3976
3977 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3978 return -1;
3979 }
3980
3981 return 0;
3982 }
3983
3984
3985 /**
3986 * mega_internal_dev_inquiry()
3987 * @adapter: pointer to our soft state
3988 * @ch: channel for this device
3989 * @tgt: ID of this device
3990 * @buf_dma_handle: DMA address of the buffer
3991 *
3992 * Issue the scsi inquiry for the specified device.
3993 */
3994 static int
3995 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
3996 dma_addr_t buf_dma_handle)
3997 {
3998 mega_passthru *pthru;
3999 dma_addr_t pthru_dma_handle;
4000 megacmd_t mc;
4001 int rval;
4002 struct pci_dev *pdev;
4003
4004
4005 /*
4006 * For all internal commands, the buffer must be allocated in <4GB
4007 * address range
4008 */
4009 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
4010
4011 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
4012 &pthru_dma_handle, GFP_KERNEL);
4013
4014 if( pthru == NULL ) {
4015 free_local_pdev(pdev);
4016 return -1;
4017 }
4018
4019 pthru->timeout = 2;
4020 pthru->ars = 1;
4021 pthru->reqsenselen = 14;
4022 pthru->islogical = 0;
4023
4024 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
4025
4026 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4027
4028 pthru->cdblen = 6;
4029
4030 pthru->cdb[0] = INQUIRY;
4031 pthru->cdb[1] = 0;
4032 pthru->cdb[2] = 0;
4033 pthru->cdb[3] = 0;
4034 pthru->cdb[4] = 255;
4035 pthru->cdb[5] = 0;
4036
4037
4038 pthru->dataxferaddr = (u32)buf_dma_handle;
4039 pthru->dataxferlen = 256;
4040
4041 memset(&mc, 0, sizeof(megacmd_t));
4042
4043 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4044 mc.xferaddr = (u32)pthru_dma_handle;
4045
4046 rval = mega_internal_command(adapter, &mc, pthru);
4047
4048 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
4049 pthru_dma_handle);
4050
4051 free_local_pdev(pdev);
4052
4053 return rval;
4054 }
4055 #endif
4056
4057 /**
4058 * mega_internal_command()
4059 * @adapter: pointer to our soft state
4060 * @mc: the mailbox command
4061 * @pthru: Passthru structure for DCDB commands
4062 *
4063 * Issue the internal commands in interrupt mode.
4064 * The last argument is the address of the passthru structure if the command
4065 * to be fired is a passthru command
4066 *
4067 * Note: parameter 'pthru' is null for non-passthru commands.
4068 */
4069 static int
4070 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4071 {
4072 unsigned long flags;
4073 scb_t *scb;
4074 int rval;
4075
4076 /*
4077 * The internal commands share one command id and hence are
4078 * serialized. This is so because we want to reserve maximum number of
4079 * available command ids for the I/O commands.
4080 */
4081 mutex_lock(&adapter->int_mtx);
4082
4083 scb = &adapter->int_scb;
4084 memset(scb, 0, sizeof(scb_t));
4085
4086 scb->idx = CMDID_INT_CMDS;
4087 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4088
4089 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4090
4091 /*
4092 * Is it a passthru command
4093 */
4094 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4095 scb->pthru = pthru;
4096
4097 spin_lock_irqsave(&adapter->lock, flags);
4098 list_add_tail(&scb->list, &adapter->pending_list);
4099 /*
4100 * Check if the HBA is in quiescent state, e.g., during a
4101 * delete logical drive opertion. If it is, don't run
4102 * the pending_list.
4103 */
4104 if (atomic_read(&adapter->quiescent) == 0)
4105 mega_runpendq(adapter);
4106 spin_unlock_irqrestore(&adapter->lock, flags);
4107
4108 wait_for_completion(&adapter->int_waitq);
4109
4110 mc->status = rval = adapter->int_status;
4111
4112 /*
4113 * Print a debug message for all failed commands. Applications can use
4114 * this information.
4115 */
4116 if (rval && trace_level) {
4117 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4118 mc->cmd, mc->opcode, mc->subopcode, rval);
4119 }
4120
4121 mutex_unlock(&adapter->int_mtx);
4122 return rval;
4123 }
4124
4125 static struct scsi_host_template megaraid_template = {
4126 .module = THIS_MODULE,
4127 .name = "MegaRAID",
4128 .proc_name = "megaraid_legacy",
4129 .info = megaraid_info,
4130 .queuecommand = megaraid_queue,
4131 .bios_param = megaraid_biosparam,
4132 .max_sectors = MAX_SECTORS_PER_IO,
4133 .can_queue = MAX_COMMANDS,
4134 .this_id = DEFAULT_INITIATOR_ID,
4135 .sg_tablesize = MAX_SGLIST,
4136 .cmd_per_lun = DEF_CMD_PER_LUN,
4137 .eh_abort_handler = megaraid_abort,
4138 .eh_device_reset_handler = megaraid_reset,
4139 .eh_bus_reset_handler = megaraid_reset,
4140 .eh_host_reset_handler = megaraid_reset,
4141 .no_write_same = 1,
4142 };
4143
4144 static int
4145 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4146 {
4147 struct Scsi_Host *host;
4148 adapter_t *adapter;
4149 unsigned long mega_baseport, tbase, flag = 0;
4150 u16 subsysid, subsysvid;
4151 u8 pci_bus, pci_dev_func;
4152 int irq, i, j;
4153 int error = -ENODEV;
4154
4155 if (hba_count >= MAX_CONTROLLERS)
4156 goto out;
4157
4158 if (pci_enable_device(pdev))
4159 goto out;
4160 pci_set_master(pdev);
4161
4162 pci_bus = pdev->bus->number;
4163 pci_dev_func = pdev->devfn;
4164
4165 /*
4166 * The megaraid3 stuff reports the ID of the Intel part which is not
4167 * remotely specific to the megaraid
4168 */
4169 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4170 u16 magic;
4171 /*
4172 * Don't fall over the Compaq management cards using the same
4173 * PCI identifier
4174 */
4175 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4176 pdev->subsystem_device == 0xC000)
4177 goto out_disable_device;
4178 /* Now check the magic signature byte */
4179 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4180 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4181 goto out_disable_device;
4182 /* Ok it is probably a megaraid */
4183 }
4184
4185 /*
4186 * For these vendor and device ids, signature offsets are not
4187 * valid and 64 bit is implicit
4188 */
4189 if (id->driver_data & BOARD_64BIT)
4190 flag |= BOARD_64BIT;
4191 else {
4192 u32 magic64;
4193
4194 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4195 if (magic64 == HBA_SIGNATURE_64BIT)
4196 flag |= BOARD_64BIT;
4197 }
4198
4199 subsysvid = pdev->subsystem_vendor;
4200 subsysid = pdev->subsystem_device;
4201
4202 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4203 id->vendor, id->device);
4204
4205 /* Read the base port and IRQ from PCI */
4206 mega_baseport = pci_resource_start(pdev, 0);
4207 irq = pdev->irq;
4208
4209 tbase = mega_baseport;
4210 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4211 flag |= BOARD_MEMMAP;
4212
4213 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4214 dev_warn(&pdev->dev, "mem region busy!\n");
4215 goto out_disable_device;
4216 }
4217
4218 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4219 if (!mega_baseport) {
4220 dev_warn(&pdev->dev, "could not map hba memory\n");
4221 goto out_release_region;
4222 }
4223 } else {
4224 flag |= BOARD_IOMAP;
4225 mega_baseport += 0x10;
4226
4227 if (!request_region(mega_baseport, 16, "megaraid"))
4228 goto out_disable_device;
4229 }
4230
4231 /* Initialize SCSI Host structure */
4232 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4233 if (!host)
4234 goto out_iounmap;
4235
4236 adapter = (adapter_t *)host->hostdata;
4237 memset(adapter, 0, sizeof(adapter_t));
4238
4239 dev_notice(&pdev->dev,
4240 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4241 host->host_no, mega_baseport, irq);
4242
4243 adapter->base = mega_baseport;
4244 if (flag & BOARD_MEMMAP)
4245 adapter->mmio_base = (void __iomem *) mega_baseport;
4246
4247 INIT_LIST_HEAD(&adapter->free_list);
4248 INIT_LIST_HEAD(&adapter->pending_list);
4249 INIT_LIST_HEAD(&adapter->completed_list);
4250
4251 adapter->flag = flag;
4252 spin_lock_init(&adapter->lock);
4253
4254 host->cmd_per_lun = max_cmd_per_lun;
4255 host->max_sectors = max_sectors_per_io;
4256
4257 adapter->dev = pdev;
4258 adapter->host = host;
4259
4260 adapter->host->irq = irq;
4261
4262 if (flag & BOARD_MEMMAP)
4263 adapter->host->base = tbase;
4264 else {
4265 adapter->host->io_port = tbase;
4266 adapter->host->n_io_port = 16;
4267 }
4268
4269 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4270
4271 /*
4272 * Allocate buffer to issue internal commands.
4273 */
4274 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
4275 MEGA_BUFFER_SIZE,
4276 &adapter->buf_dma_handle,
4277 GFP_KERNEL);
4278 if (!adapter->mega_buffer) {
4279 dev_warn(&pdev->dev, "out of RAM\n");
4280 goto out_host_put;
4281 }
4282
4283 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4284 GFP_KERNEL);
4285 if (!adapter->scb_list) {
4286 dev_warn(&pdev->dev, "out of RAM\n");
4287 goto out_free_cmd_buffer;
4288 }
4289
4290 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4291 megaraid_isr_memmapped : megaraid_isr_iomapped,
4292 IRQF_SHARED, "megaraid", adapter)) {
4293 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4294 goto out_free_scb_list;
4295 }
4296
4297 if (mega_setup_mailbox(adapter))
4298 goto out_free_irq;
4299
4300 if (mega_query_adapter(adapter))
4301 goto out_free_mbox;
4302
4303 /*
4304 * Have checks for some buggy f/w
4305 */
4306 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4307 /*
4308 * Which firmware
4309 */
4310 if (!strcmp(adapter->fw_version, "3.00") ||
4311 !strcmp(adapter->fw_version, "3.01")) {
4312
4313 dev_warn(&pdev->dev,
4314 "Your card is a Dell PERC "
4315 "2/SC RAID controller with "
4316 "firmware\nmegaraid: 3.00 or 3.01. "
4317 "This driver is known to have "
4318 "corruption issues\nmegaraid: with "
4319 "those firmware versions on this "
4320 "specific card. In order\nmegaraid: "
4321 "to protect your data, please upgrade "
4322 "your firmware to version\nmegaraid: "
4323 "3.10 or later, available from the "
4324 "Dell Technical Support web\n"
4325 "megaraid: site at\nhttp://support."
4326 "dell.com/us/en/filelib/download/"
4327 "index.asp?fileid=2940\n"
4328 );
4329 }
4330 }
4331
4332 /*
4333 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4334 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4335 * support, since this firmware cannot handle 64 bit
4336 * addressing
4337 */
4338 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4339 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4340 /*
4341 * which firmware
4342 */
4343 if (!strcmp(adapter->fw_version, "H01.07") ||
4344 !strcmp(adapter->fw_version, "H01.08") ||
4345 !strcmp(adapter->fw_version, "H01.09") ) {
4346 dev_warn(&pdev->dev,
4347 "Firmware H.01.07, "
4348 "H.01.08, and H.01.09 on 1M/2M "
4349 "controllers\n"
4350 "do not support 64 bit "
4351 "addressing.\nDISABLING "
4352 "64 bit support.\n");
4353 adapter->flag &= ~BOARD_64BIT;
4354 }
4355 }
4356
4357 if (mega_is_bios_enabled(adapter))
4358 mega_hbas[hba_count].is_bios_enabled = 1;
4359 mega_hbas[hba_count].hostdata_addr = adapter;
4360
4361 /*
4362 * Find out which channel is raid and which is scsi. This is
4363 * for ROMB support.
4364 */
4365 mega_enum_raid_scsi(adapter);
4366
4367 /*
4368 * Find out if a logical drive is set as the boot drive. If
4369 * there is one, will make that as the first logical drive.
4370 * ROMB: Do we have to boot from a physical drive. Then all
4371 * the physical drives would appear before the logical disks.
4372 * Else, all the physical drives would be exported to the mid
4373 * layer after logical drives.
4374 */
4375 mega_get_boot_drv(adapter);
4376
4377 if (adapter->boot_pdrv_enabled) {
4378 j = adapter->product_info.nchannels;
4379 for( i = 0; i < j; i++ )
4380 adapter->logdrv_chan[i] = 0;
4381 for( i = j; i < NVIRT_CHAN + j; i++ )
4382 adapter->logdrv_chan[i] = 1;
4383 } else {
4384 for (i = 0; i < NVIRT_CHAN; i++)
4385 adapter->logdrv_chan[i] = 1;
4386 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4387 adapter->logdrv_chan[i] = 0;
4388 adapter->mega_ch_class <<= NVIRT_CHAN;
4389 }
4390
4391 /*
4392 * Do we support random deletion and addition of logical
4393 * drives
4394 */
4395 adapter->read_ldidmap = 0; /* set it after first logdrv
4396 delete cmd */
4397 adapter->support_random_del = mega_support_random_del(adapter);
4398
4399 /* Initialize SCBs */
4400 if (mega_init_scb(adapter))
4401 goto out_free_mbox;
4402
4403 /*
4404 * Reset the pending commands counter
4405 */
4406 atomic_set(&adapter->pend_cmds, 0);
4407
4408 /*
4409 * Reset the adapter quiescent flag
4410 */
4411 atomic_set(&adapter->quiescent, 0);
4412
4413 hba_soft_state[hba_count] = adapter;
4414
4415 /*
4416 * Fill in the structure which needs to be passed back to the
4417 * application when it does an ioctl() for controller related
4418 * information.
4419 */
4420 i = hba_count;
4421
4422 mcontroller[i].base = mega_baseport;
4423 mcontroller[i].irq = irq;
4424 mcontroller[i].numldrv = adapter->numldrv;
4425 mcontroller[i].pcibus = pci_bus;
4426 mcontroller[i].pcidev = id->device;
4427 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4428 mcontroller[i].pciid = -1;
4429 mcontroller[i].pcivendor = id->vendor;
4430 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4431 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4432
4433
4434 /* Set the Mode of addressing to 64 bit if we can */
4435 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4436 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4437 adapter->has_64bit_addr = 1;
4438 } else {
4439 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4440 adapter->has_64bit_addr = 0;
4441 }
4442
4443 mutex_init(&adapter->int_mtx);
4444 init_completion(&adapter->int_waitq);
4445
4446 adapter->this_id = DEFAULT_INITIATOR_ID;
4447 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4448
4449 #if MEGA_HAVE_CLUSTERING
4450 /*
4451 * Is cluster support enabled on this controller
4452 * Note: In a cluster the HBAs ( the initiators ) will have
4453 * different target IDs and we cannot assume it to be 7. Call
4454 * to mega_support_cluster() will get the target ids also if
4455 * the cluster support is available
4456 */
4457 adapter->has_cluster = mega_support_cluster(adapter);
4458 if (adapter->has_cluster) {
4459 dev_notice(&pdev->dev,
4460 "Cluster driver, initiator id:%d\n",
4461 adapter->this_id);
4462 }
4463 #endif
4464
4465 pci_set_drvdata(pdev, host);
4466
4467 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4468
4469 error = scsi_add_host(host, &pdev->dev);
4470 if (error)
4471 goto out_free_mbox;
4472
4473 scsi_scan_host(host);
4474 hba_count++;
4475 return 0;
4476
4477 out_free_mbox:
4478 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4479 adapter->una_mbox64, adapter->una_mbox64_dma);
4480 out_free_irq:
4481 free_irq(adapter->host->irq, adapter);
4482 out_free_scb_list:
4483 kfree(adapter->scb_list);
4484 out_free_cmd_buffer:
4485 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4486 adapter->mega_buffer, adapter->buf_dma_handle);
4487 out_host_put:
4488 scsi_host_put(host);
4489 out_iounmap:
4490 if (flag & BOARD_MEMMAP)
4491 iounmap((void *)mega_baseport);
4492 out_release_region:
4493 if (flag & BOARD_MEMMAP)
4494 release_mem_region(tbase, 128);
4495 else
4496 release_region(mega_baseport, 16);
4497 out_disable_device:
4498 pci_disable_device(pdev);
4499 out:
4500 return error;
4501 }
4502
4503 static void
4504 __megaraid_shutdown(adapter_t *adapter)
4505 {
4506 u_char raw_mbox[sizeof(struct mbox_out)];
4507 mbox_t *mbox = (mbox_t *)raw_mbox;
4508 int i;
4509
4510 /* Flush adapter cache */
4511 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4512 raw_mbox[0] = FLUSH_ADAPTER;
4513
4514 free_irq(adapter->host->irq, adapter);
4515
4516 /* Issue a blocking (interrupts disabled) command to the card */
4517 issue_scb_block(adapter, raw_mbox);
4518
4519 /* Flush disks cache */
4520 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4521 raw_mbox[0] = FLUSH_SYSTEM;
4522
4523 /* Issue a blocking (interrupts disabled) command to the card */
4524 issue_scb_block(adapter, raw_mbox);
4525
4526 if (atomic_read(&adapter->pend_cmds) > 0)
4527 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4528
4529 /*
4530 * Have a delibrate delay to make sure all the caches are
4531 * actually flushed.
4532 */
4533 for (i = 0; i <= 10; i++)
4534 mdelay(1000);
4535 }
4536
4537 static void
4538 megaraid_remove_one(struct pci_dev *pdev)
4539 {
4540 struct Scsi_Host *host = pci_get_drvdata(pdev);
4541 adapter_t *adapter = (adapter_t *)host->hostdata;
4542 char buf[12] = { 0 };
4543
4544 scsi_remove_host(host);
4545
4546 __megaraid_shutdown(adapter);
4547
4548 /* Free our resources */
4549 if (adapter->flag & BOARD_MEMMAP) {
4550 iounmap((void *)adapter->base);
4551 release_mem_region(adapter->host->base, 128);
4552 } else
4553 release_region(adapter->base, 16);
4554
4555 mega_free_sgl(adapter);
4556
4557 sprintf(buf, "hba%d", adapter->host->host_no);
4558 remove_proc_subtree(buf, mega_proc_dir_entry);
4559
4560 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4561 adapter->mega_buffer, adapter->buf_dma_handle);
4562 kfree(adapter->scb_list);
4563 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4564 adapter->una_mbox64, adapter->una_mbox64_dma);
4565
4566 scsi_host_put(host);
4567 pci_disable_device(pdev);
4568
4569 hba_count--;
4570 }
4571
4572 static void
4573 megaraid_shutdown(struct pci_dev *pdev)
4574 {
4575 struct Scsi_Host *host = pci_get_drvdata(pdev);
4576 adapter_t *adapter = (adapter_t *)host->hostdata;
4577
4578 __megaraid_shutdown(adapter);
4579 }
4580
4581 static struct pci_device_id megaraid_pci_tbl[] = {
4582 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4583 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4584 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4585 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4586 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4587 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4588 {0,}
4589 };
4590 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4591
4592 static struct pci_driver megaraid_pci_driver = {
4593 .name = "megaraid_legacy",
4594 .id_table = megaraid_pci_tbl,
4595 .probe = megaraid_probe_one,
4596 .remove = megaraid_remove_one,
4597 .shutdown = megaraid_shutdown,
4598 };
4599
4600 static int __init megaraid_init(void)
4601 {
4602 int error;
4603
4604 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4605 max_cmd_per_lun = MAX_CMD_PER_LUN;
4606 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4607 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4608
4609 #ifdef CONFIG_PROC_FS
4610 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4611 if (!mega_proc_dir_entry) {
4612 printk(KERN_WARNING
4613 "megaraid: failed to create megaraid root\n");
4614 }
4615 #endif
4616 error = pci_register_driver(&megaraid_pci_driver);
4617 if (error) {
4618 #ifdef CONFIG_PROC_FS
4619 remove_proc_entry("megaraid", NULL);
4620 #endif
4621 return error;
4622 }
4623
4624 /*
4625 * Register the driver as a character device, for applications
4626 * to access it for ioctls.
4627 * First argument (major) to register_chrdev implies a dynamic
4628 * major number allocation.
4629 */
4630 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4631 if (!major) {
4632 printk(KERN_WARNING
4633 "megaraid: failed to register char device\n");
4634 }
4635
4636 return 0;
4637 }
4638
4639 static void __exit megaraid_exit(void)
4640 {
4641 /*
4642 * Unregister the character device interface to the driver.
4643 */
4644 unregister_chrdev(major, "megadev_legacy");
4645
4646 pci_unregister_driver(&megaraid_pci_driver);
4647
4648 #ifdef CONFIG_PROC_FS
4649 remove_proc_entry("megaraid", NULL);
4650 #endif
4651 }
4652
4653 module_init(megaraid_init);
4654 module_exit(megaraid_exit);
4655
4656 /* vi: set ts=8 sw=8 tw=78: */