]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/cciss.c
[PATCH] Initramfs docs update
[mirror_ubuntu-artful-kernel.git] / drivers / block / cciss.c
CommitLineData
1da177e4
LT
1/*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
fb86a35b 3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23#include <linux/config.h> /* CONFIG_PROC_FS */
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/bio.h>
34#include <linux/blkpg.h>
35#include <linux/timer.h>
36#include <linux/proc_fs.h>
37#include <linux/init.h>
38#include <linux/hdreg.h>
39#include <linux/spinlock.h>
40#include <linux/compat.h>
2056a782 41#include <linux/blktrace_api.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/io.h>
44
eb0df996 45#include <linux/dma-mapping.h>
1da177e4
LT
46#include <linux/blkdev.h>
47#include <linux/genhd.h>
48#include <linux/completion.h>
49
50#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
fb86a35b
MM
51#define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
52#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
1da177e4
LT
53
54/* Embedded module documentation macros - see modules.h */
55MODULE_AUTHOR("Hewlett-Packard Company");
fb86a35b 56MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
1da177e4 57MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
9dc7a86e 58 " SA6i P600 P800 P400 P400i E200 E200i");
1da177e4
LT
59MODULE_LICENSE("GPL");
60
61#include "cciss_cmd.h"
62#include "cciss.h"
63#include <linux/cciss_ioctl.h>
64
65/* define the PCI info for the cards we can control */
66static const struct pci_device_id cciss_pci_device_id[] = {
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
68 0x0E11, 0x4070, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4080, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
72 0x0E11, 0x4082, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
74 0x0E11, 0x4083, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409A, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409B, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x409C, 0, 0, 0},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
82 0x0E11, 0x409D, 0, 0, 0},
83 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
84 0x0E11, 0x4091, 0, 0, 0},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
86 0x103C, 0x3225, 0, 0, 0},
9dc7a86e 87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
1da177e4 88 0x103c, 0x3223, 0, 0, 0},
3de0a70b 89 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
9dc7a86e 90 0x103c, 0x3234, 0, 0, 0},
3de0a70b 91 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
9dc7a86e
MM
92 0x103c, 0x3235, 0, 0, 0},
93 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
94 0x103c, 0x3211, 0, 0, 0},
95 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
96 0x103c, 0x3212, 0, 0, 0},
97 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
98 0x103c, 0x3213, 0, 0, 0},
99 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
100 0x103c, 0x3214, 0, 0, 0},
101 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
102 0x103c, 0x3215, 0, 0, 0},
1da177e4
LT
103 {0,}
104};
105MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
106
945f390f 107#define NR_PRODUCTS ARRAY_SIZE(products)
1da177e4
LT
108
109/* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
112 */
113static struct board_type products[] = {
114 { 0x40700E11, "Smart Array 5300", &SA5_access },
115 { 0x40800E11, "Smart Array 5i", &SA5B_access},
116 { 0x40820E11, "Smart Array 532", &SA5B_access},
117 { 0x40830E11, "Smart Array 5312", &SA5B_access},
118 { 0x409A0E11, "Smart Array 641", &SA5_access},
119 { 0x409B0E11, "Smart Array 642", &SA5_access},
120 { 0x409C0E11, "Smart Array 6400", &SA5_access},
121 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 { 0x40910E11, "Smart Array 6i", &SA5_access},
123 { 0x3225103C, "Smart Array P600", &SA5_access},
124 { 0x3223103C, "Smart Array P800", &SA5_access},
9dc7a86e
MM
125 { 0x3234103C, "Smart Array P400", &SA5_access},
126 { 0x3235103C, "Smart Array P400i", &SA5_access},
127 { 0x3211103C, "Smart Array E200i", &SA5_access},
128 { 0x3212103C, "Smart Array E200", &SA5_access},
129 { 0x3213103C, "Smart Array E200i", &SA5_access},
130 { 0x3214103C, "Smart Array E200i", &SA5_access},
131 { 0x3215103C, "Smart Array E200i", &SA5_access},
1da177e4
LT
132};
133
134/* How long to wait (in millesconds) for board to go into simple mode */
135#define MAX_CONFIG_WAIT 30000
136#define MAX_IOCTL_CONFIG_WAIT 1000
137
138/*define how many times we will try a command because of bus resets */
139#define MAX_CMD_RETRIES 3
140
141#define READ_AHEAD 1024
142#define NR_CMDS 384 /* #commands that can be outstanding */
143#define MAX_CTLR 32
144
145/* Originally cciss driver only supports 8 major numbers */
146#define MAX_CTLR_ORIG 8
147
148
1da177e4
LT
149static ctlr_info_t *hba[MAX_CTLR];
150
151static void do_cciss_request(request_queue_t *q);
3da8b713 152static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
1da177e4
LT
153static int cciss_open(struct inode *inode, struct file *filep);
154static int cciss_release(struct inode *inode, struct file *filep);
155static int cciss_ioctl(struct inode *inode, struct file *filep,
156 unsigned int cmd, unsigned long arg);
a885c8c4 157static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1da177e4
LT
158
159static int revalidate_allvol(ctlr_info_t *host);
160static int cciss_revalidate(struct gendisk *disk);
ddd47442
MM
161static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
162static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
1da177e4 163
ddd47442
MM
164static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
165 int withirq, unsigned int *total_size, unsigned int *block_size);
166static void cciss_geometry_inquiry(int ctlr, int logvol,
167 int withirq, unsigned int total_size,
168 unsigned int block_size, InquiryData_struct *inq_buff,
169 drive_info_struct *drv);
1da177e4 170static void cciss_getgeometry(int cntl_num);
fb86a35b 171static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
1da177e4
LT
172static void start_io( ctlr_info_t *h);
173static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
175 unsigned char *scsi3addr, int cmd_type);
ddd47442
MM
176static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
177 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
178 int cmd_type);
1da177e4 179
33079b21
MM
180static void fail_all_cmds(unsigned long ctlr);
181
1da177e4
LT
182#ifdef CONFIG_PROC_FS
183static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
184 int length, int *eof, void *data);
185static void cciss_procinit(int i);
186#else
187static void cciss_procinit(int i) {}
188#endif /* CONFIG_PROC_FS */
189
190#ifdef CONFIG_COMPAT
191static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
192#endif
193
194static struct block_device_operations cciss_fops = {
195 .owner = THIS_MODULE,
196 .open = cciss_open,
197 .release = cciss_release,
198 .ioctl = cciss_ioctl,
a885c8c4 199 .getgeo = cciss_getgeo,
1da177e4
LT
200#ifdef CONFIG_COMPAT
201 .compat_ioctl = cciss_compat_ioctl,
202#endif
203 .revalidate_disk= cciss_revalidate,
204};
205
206/*
207 * Enqueuing and dequeuing functions for cmdlists.
208 */
209static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
210{
211 if (*Qptr == NULL) {
212 *Qptr = c;
213 c->next = c->prev = c;
214 } else {
215 c->prev = (*Qptr)->prev;
216 c->next = (*Qptr);
217 (*Qptr)->prev->next = c;
218 (*Qptr)->prev = c;
219 }
220}
221
222static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
223 CommandList_struct *c)
224{
225 if (c && c->next != c) {
226 if (*Qptr == c) *Qptr = c->next;
227 c->prev->next = c->next;
228 c->next->prev = c->prev;
229 } else {
230 *Qptr = NULL;
231 }
232 return c;
233}
234
235#include "cciss_scsi.c" /* For SCSI tape support */
236
237#ifdef CONFIG_PROC_FS
238
239/*
240 * Report information about this controller.
241 */
242#define ENG_GIG 1000000000
243#define ENG_GIG_FACTOR (ENG_GIG/512)
244#define RAID_UNKNOWN 6
245static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
246 "UNKNOWN"};
247
248static struct proc_dir_entry *proc_cciss;
249
250static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
251 int length, int *eof, void *data)
252{
253 off_t pos = 0;
254 off_t len = 0;
255 int size, i, ctlr;
256 ctlr_info_t *h = (ctlr_info_t*)data;
257 drive_info_struct *drv;
258 unsigned long flags;
259 sector_t vol_sz, vol_sz_frac;
260
261 ctlr = h->ctlr;
262
263 /* prevent displaying bogus info during configuration
264 * or deconfiguration of a logical volume
265 */
266 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
267 if (h->busy_configuring) {
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269 return -EBUSY;
270 }
271 h->busy_configuring = 1;
272 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273
274 size = sprintf(buffer, "%s: HP %s Controller\n"
275 "Board ID: 0x%08lx\n"
276 "Firmware Version: %c%c%c%c\n"
277 "IRQ: %d\n"
278 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
fb86a35b 288 (unsigned int)h->intr[SIMPLE_MODE_INT],
1da177e4
LT
289 h->num_luns,
290 h->Qdepth, h->commands_outstanding,
291 h->maxQsinceinit, h->max_outstanding, h->maxSG);
292
293 pos += size; len += size;
294 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
295 for(i=0; i<=h->highest_lun; i++) {
296
297 drv = &h->drv[i];
ddd47442 298 if (drv->heads == 0)
1da177e4
LT
299 continue;
300
301 vol_sz = drv->nr_blocks;
302 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
303 vol_sz_frac *= 100;
304 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
305
306 if (drv->raid_level > 5)
307 drv->raid_level = RAID_UNKNOWN;
308 size = sprintf(buffer+len, "cciss/c%dd%d:"
309 "\t%4u.%02uGB\tRAID %s\n",
310 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
311 raid_label[drv->raid_level]);
312 pos += size; len += size;
313 }
314
315 *eof = 1;
316 *start = buffer+offset;
317 len -= offset;
318 if (len>length)
319 len = length;
320 h->busy_configuring = 0;
321 return len;
322}
323
324static int
325cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
327{
328 unsigned char cmd[80];
329 int len;
330#ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
332 int rc;
333#endif
334
335 if (count > sizeof(cmd)-1) return -EINVAL;
336 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
337 cmd[count] = '\0';
338 len = strlen(cmd); // above 3 lines ensure safety
339 if (len && cmd[len-1] == '\n')
340 cmd[--len] = '\0';
341# ifdef CONFIG_CISS_SCSI_TAPE
342 if (strcmp("engage scsi", cmd)==0) {
343 rc = cciss_engage_scsi(h->ctlr);
344 if (rc != 0) return -rc;
345 return count;
346 }
347 /* might be nice to have "disengage" too, but it's not
348 safely possible. (only 1 module use count, lock issues.) */
349# endif
350 return -EINVAL;
351}
352
353/*
354 * Get us a file in /proc/cciss that says something about each controller.
355 * Create /proc/cciss if it doesn't exist yet.
356 */
357static void __devinit cciss_procinit(int i)
358{
359 struct proc_dir_entry *pde;
360
361 if (proc_cciss == NULL) {
362 proc_cciss = proc_mkdir("cciss", proc_root_driver);
363 if (!proc_cciss)
364 return;
365 }
366
367 pde = create_proc_read_entry(hba[i]->devname,
368 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
369 proc_cciss, cciss_proc_get_info, hba[i]);
370 pde->write_proc = cciss_proc_write;
371}
372#endif /* CONFIG_PROC_FS */
373
374/*
375 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
380 */
381static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
382{
383 CommandList_struct *c;
384 int i;
385 u64bit temp64;
386 dma_addr_t cmd_dma_handle, err_dma_handle;
387
388 if (!get_from_pool)
389 {
390 c = (CommandList_struct *) pci_alloc_consistent(
391 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
392 if(c==NULL)
393 return NULL;
394 memset(c, 0, sizeof(CommandList_struct));
395
33079b21
MM
396 c->cmdindex = -1;
397
1da177e4
LT
398 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
399 h->pdev, sizeof(ErrorInfo_struct),
400 &err_dma_handle);
401
402 if (c->err_info == NULL)
403 {
404 pci_free_consistent(h->pdev,
405 sizeof(CommandList_struct), c, cmd_dma_handle);
406 return NULL;
407 }
408 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
409 } else /* get it out of the controllers pool */
410 {
411 do {
412 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
413 if (i == NR_CMDS)
414 return NULL;
415 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
416#ifdef CCISS_DEBUG
417 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
418#endif
419 c = h->cmd_pool + i;
420 memset(c, 0, sizeof(CommandList_struct));
421 cmd_dma_handle = h->cmd_pool_dhandle
422 + i*sizeof(CommandList_struct);
423 c->err_info = h->errinfo_pool + i;
424 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
425 err_dma_handle = h->errinfo_pool_dhandle
426 + i*sizeof(ErrorInfo_struct);
427 h->nr_allocs++;
33079b21
MM
428
429 c->cmdindex = i;
1da177e4
LT
430 }
431
432 c->busaddr = (__u32) cmd_dma_handle;
433 temp64.val = (__u64) err_dma_handle;
434 c->ErrDesc.Addr.lower = temp64.val32.lower;
435 c->ErrDesc.Addr.upper = temp64.val32.upper;
436 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
437
438 c->ctlr = h->ctlr;
439 return c;
440
441
442}
443
444/*
445 * Frees a command block that was previously allocated with cmd_alloc().
446 */
447static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
448{
449 int i;
450 u64bit temp64;
451
452 if( !got_from_pool)
453 {
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
460 } else
461 {
462 i = c - h->cmd_pool;
463 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
464 h->nr_frees++;
465 }
466}
467
468static inline ctlr_info_t *get_host(struct gendisk *disk)
469{
470 return disk->queue->queuedata;
471}
472
473static inline drive_info_struct *get_drv(struct gendisk *disk)
474{
475 return disk->private_data;
476}
477
478/*
479 * Open. Make sure the device is really there.
480 */
481static int cciss_open(struct inode *inode, struct file *filep)
482{
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
485
486#ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488#endif /* CCISS_DEBUG */
489
ddd47442
MM
490 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY;
1da177e4
LT
492 /*
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
499 */
500 if (drv->nr_blocks == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
504 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
507 return -ENXIO;
508 }
509 }
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
512 }
513 drv->usage_count++;
514 host->usage_count++;
515 return 0;
516}
517/*
518 * Close. Sync first.
519 */
520static int cciss_release(struct inode *inode, struct file *filep)
521{
522 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
523 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
524
525#ifdef CCISS_DEBUG
526 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
527#endif /* CCISS_DEBUG */
528
529 drv->usage_count--;
530 host->usage_count--;
531 return 0;
532}
533
534#ifdef CONFIG_COMPAT
535
536static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
537{
538 int ret;
539 lock_kernel();
540 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
541 unlock_kernel();
542 return ret;
543}
544
545static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
546static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
547
548static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
549{
550 switch (cmd) {
551 case CCISS_GETPCIINFO:
552 case CCISS_GETINTINFO:
553 case CCISS_SETINTINFO:
554 case CCISS_GETNODENAME:
555 case CCISS_SETNODENAME:
556 case CCISS_GETHEARTBEAT:
557 case CCISS_GETBUSTYPES:
558 case CCISS_GETFIRMVER:
559 case CCISS_GETDRIVVER:
560 case CCISS_REVALIDVOLS:
561 case CCISS_DEREGDISK:
562 case CCISS_REGNEWDISK:
563 case CCISS_REGNEWD:
564 case CCISS_RESCANDISK:
565 case CCISS_GETLUNINFO:
566 return do_ioctl(f, cmd, arg);
567
568 case CCISS_PASSTHRU32:
569 return cciss_ioctl32_passthru(f, cmd, arg);
570 case CCISS_BIG_PASSTHRU32:
571 return cciss_ioctl32_big_passthru(f, cmd, arg);
572
573 default:
574 return -ENOIOCTLCMD;
575 }
576}
577
578static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
579{
580 IOCTL32_Command_struct __user *arg32 =
581 (IOCTL32_Command_struct __user *) arg;
582 IOCTL_Command_struct arg64;
583 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 int err;
585 u32 cp;
586
587 err = 0;
588 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
589 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
590 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
591 err |= get_user(arg64.buf_size, &arg32->buf_size);
592 err |= get_user(cp, &arg32->buf);
593 arg64.buf = compat_ptr(cp);
594 err |= copy_to_user(p, &arg64, sizeof(arg64));
595
596 if (err)
597 return -EFAULT;
598
599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
600 if (err)
601 return err;
602 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
603 if (err)
604 return -EFAULT;
605 return err;
606}
607
608static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
609{
610 BIG_IOCTL32_Command_struct __user *arg32 =
611 (BIG_IOCTL32_Command_struct __user *) arg;
612 BIG_IOCTL_Command_struct arg64;
613 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
614 int err;
615 u32 cp;
616
617 err = 0;
618 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
619 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
620 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
621 err |= get_user(arg64.buf_size, &arg32->buf_size);
622 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
623 err |= get_user(cp, &arg32->buf);
624 arg64.buf = compat_ptr(cp);
625 err |= copy_to_user(p, &arg64, sizeof(arg64));
626
627 if (err)
628 return -EFAULT;
629
630 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
631 if (err)
632 return err;
633 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
634 if (err)
635 return -EFAULT;
636 return err;
637}
638#endif
a885c8c4
CH
639
640static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
641{
642 drive_info_struct *drv = get_drv(bdev->bd_disk);
643
644 if (!drv->cylinders)
645 return -ENXIO;
646
647 geo->heads = drv->heads;
648 geo->sectors = drv->sectors;
649 geo->cylinders = drv->cylinders;
650 return 0;
651}
652
1da177e4
LT
653/*
654 * ioctl
655 */
656static int cciss_ioctl(struct inode *inode, struct file *filep,
657 unsigned int cmd, unsigned long arg)
658{
659 struct block_device *bdev = inode->i_bdev;
660 struct gendisk *disk = bdev->bd_disk;
661 ctlr_info_t *host = get_host(disk);
662 drive_info_struct *drv = get_drv(disk);
663 int ctlr = host->ctlr;
664 void __user *argp = (void __user *)arg;
665
666#ifdef CCISS_DEBUG
667 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
668#endif /* CCISS_DEBUG */
669
670 switch(cmd) {
1da177e4
LT
671 case CCISS_GETPCIINFO:
672 {
673 cciss_pci_info_struct pciinfo;
674
675 if (!arg) return -EINVAL;
cd6fb584 676 pciinfo.domain = pci_domain_nr(host->pdev->bus);
1da177e4
LT
677 pciinfo.bus = host->pdev->bus->number;
678 pciinfo.dev_fn = host->pdev->devfn;
679 pciinfo.board_id = host->board_id;
680 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
681 return -EFAULT;
682 return(0);
683 }
684 case CCISS_GETINTINFO:
685 {
686 cciss_coalint_struct intinfo;
687 if (!arg) return -EINVAL;
688 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
689 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
690 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
691 return -EFAULT;
692 return(0);
693 }
694 case CCISS_SETINTINFO:
695 {
696 cciss_coalint_struct intinfo;
697 unsigned long flags;
698 int i;
699
700 if (!arg) return -EINVAL;
701 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
702 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
703 return -EFAULT;
704 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
705
706 {
707// printk("cciss_ioctl: delay and count cannot be 0\n");
708 return( -EINVAL);
709 }
710 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
711 /* Update the field, and then ring the doorbell */
712 writel( intinfo.delay,
713 &(host->cfgtable->HostWrite.CoalIntDelay));
714 writel( intinfo.count,
715 &(host->cfgtable->HostWrite.CoalIntCount));
716 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
717
718 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
719 if (!(readl(host->vaddr + SA5_DOORBELL)
720 & CFGTBL_ChangeReq))
721 break;
722 /* delay and try again */
723 udelay(1000);
724 }
725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
726 if (i >= MAX_IOCTL_CONFIG_WAIT)
727 return -EAGAIN;
728 return(0);
729 }
730 case CCISS_GETNODENAME:
731 {
732 NodeName_type NodeName;
733 int i;
734
735 if (!arg) return -EINVAL;
736 for(i=0;i<16;i++)
737 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
738 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
739 return -EFAULT;
740 return(0);
741 }
742 case CCISS_SETNODENAME:
743 {
744 NodeName_type NodeName;
745 unsigned long flags;
746 int i;
747
748 if (!arg) return -EINVAL;
749 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
750
751 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
752 return -EFAULT;
753
754 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
755
756 /* Update the field, and then ring the doorbell */
757 for(i=0;i<16;i++)
758 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
759
760 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
761
762 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
763 if (!(readl(host->vaddr + SA5_DOORBELL)
764 & CFGTBL_ChangeReq))
765 break;
766 /* delay and try again */
767 udelay(1000);
768 }
769 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
770 if (i >= MAX_IOCTL_CONFIG_WAIT)
771 return -EAGAIN;
772 return(0);
773 }
774
775 case CCISS_GETHEARTBEAT:
776 {
777 Heartbeat_type heartbeat;
778
779 if (!arg) return -EINVAL;
780 heartbeat = readl(&host->cfgtable->HeartBeat);
781 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
782 return -EFAULT;
783 return(0);
784 }
785 case CCISS_GETBUSTYPES:
786 {
787 BusTypes_type BusTypes;
788
789 if (!arg) return -EINVAL;
790 BusTypes = readl(&host->cfgtable->BusTypes);
791 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
792 return -EFAULT;
793 return(0);
794 }
795 case CCISS_GETFIRMVER:
796 {
797 FirmwareVer_type firmware;
798
799 if (!arg) return -EINVAL;
800 memcpy(firmware, host->firm_ver, 4);
801
802 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
803 return -EFAULT;
804 return(0);
805 }
806 case CCISS_GETDRIVVER:
807 {
808 DriverVer_type DriverVer = DRIVER_VERSION;
809
810 if (!arg) return -EINVAL;
811
812 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
813 return -EFAULT;
814 return(0);
815 }
816
817 case CCISS_REVALIDVOLS:
818 if (bdev != bdev->bd_contains || drv != host->drv)
819 return -ENXIO;
820 return revalidate_allvol(host);
821
822 case CCISS_GETLUNINFO: {
823 LogvolInfo_struct luninfo;
1da177e4
LT
824
825 luninfo.LunID = drv->LunID;
826 luninfo.num_opens = drv->usage_count;
827 luninfo.num_parts = 0;
1da177e4
LT
828 if (copy_to_user(argp, &luninfo,
829 sizeof(LogvolInfo_struct)))
830 return -EFAULT;
831 return(0);
832 }
833 case CCISS_DEREGDISK:
ddd47442 834 return rebuild_lun_table(host, disk);
1da177e4
LT
835
836 case CCISS_REGNEWD:
ddd47442 837 return rebuild_lun_table(host, NULL);
1da177e4
LT
838
839 case CCISS_PASSTHRU:
840 {
841 IOCTL_Command_struct iocommand;
842 CommandList_struct *c;
843 char *buff = NULL;
844 u64bit temp64;
845 unsigned long flags;
846 DECLARE_COMPLETION(wait);
847
848 if (!arg) return -EINVAL;
849
850 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
851
852 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
853 return -EFAULT;
854 if((iocommand.buf_size < 1) &&
855 (iocommand.Request.Type.Direction != XFER_NONE))
856 {
857 return -EINVAL;
858 }
859#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
860 /* Check kmalloc limits */
861 if(iocommand.buf_size > 128000)
862 return -EINVAL;
863#endif
864 if(iocommand.buf_size > 0)
865 {
866 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
867 if( buff == NULL)
868 return -EFAULT;
869 }
870 if (iocommand.Request.Type.Direction == XFER_WRITE)
871 {
872 /* Copy the data into the buffer we created */
873 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
874 {
875 kfree(buff);
876 return -EFAULT;
877 }
878 } else {
879 memset(buff, 0, iocommand.buf_size);
880 }
881 if ((c = cmd_alloc(host , 0)) == NULL)
882 {
883 kfree(buff);
884 return -ENOMEM;
885 }
886 // Fill in the command type
887 c->cmd_type = CMD_IOCTL_PEND;
888 // Fill in Command Header
889 c->Header.ReplyQueue = 0; // unused in simple mode
890 if( iocommand.buf_size > 0) // buffer to fill
891 {
892 c->Header.SGList = 1;
893 c->Header.SGTotal= 1;
894 } else // no buffers to fill
895 {
896 c->Header.SGList = 0;
897 c->Header.SGTotal= 0;
898 }
899 c->Header.LUN = iocommand.LUN_info;
900 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
901
902 // Fill in Request block
903 c->Request = iocommand.Request;
904
905 // Fill in the scatter gather information
906 if (iocommand.buf_size > 0 )
907 {
908 temp64.val = pci_map_single( host->pdev, buff,
909 iocommand.buf_size,
910 PCI_DMA_BIDIRECTIONAL);
911 c->SG[0].Addr.lower = temp64.val32.lower;
912 c->SG[0].Addr.upper = temp64.val32.upper;
913 c->SG[0].Len = iocommand.buf_size;
914 c->SG[0].Ext = 0; // we are not chaining
915 }
916 c->waiting = &wait;
917
918 /* Put the request on the tail of the request queue */
919 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
920 addQ(&host->reqQ, c);
921 host->Qdepth++;
922 start_io(host);
923 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
924
925 wait_for_completion(&wait);
926
927 /* unlock the buffers from DMA */
928 temp64.val32.lower = c->SG[0].Addr.lower;
929 temp64.val32.upper = c->SG[0].Addr.upper;
930 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
931 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
932
933 /* Copy the error information out */
934 iocommand.error_info = *(c->err_info);
935 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
936 {
937 kfree(buff);
938 cmd_free(host, c, 0);
939 return( -EFAULT);
940 }
941
942 if (iocommand.Request.Type.Direction == XFER_READ)
943 {
944 /* Copy the data out of the buffer we created */
945 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
946 {
947 kfree(buff);
948 cmd_free(host, c, 0);
949 return -EFAULT;
950 }
951 }
952 kfree(buff);
953 cmd_free(host, c, 0);
954 return(0);
955 }
956 case CCISS_BIG_PASSTHRU: {
957 BIG_IOCTL_Command_struct *ioc;
958 CommandList_struct *c;
959 unsigned char **buff = NULL;
960 int *buff_size = NULL;
961 u64bit temp64;
962 unsigned long flags;
963 BYTE sg_used = 0;
964 int status = 0;
965 int i;
966 DECLARE_COMPLETION(wait);
967 __u32 left;
968 __u32 sz;
969 BYTE __user *data_ptr;
970
971 if (!arg)
972 return -EINVAL;
973 if (!capable(CAP_SYS_RAWIO))
974 return -EPERM;
975 ioc = (BIG_IOCTL_Command_struct *)
976 kmalloc(sizeof(*ioc), GFP_KERNEL);
977 if (!ioc) {
978 status = -ENOMEM;
979 goto cleanup1;
980 }
981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
982 status = -EFAULT;
983 goto cleanup1;
984 }
985 if ((ioc->buf_size < 1) &&
986 (ioc->Request.Type.Direction != XFER_NONE)) {
987 status = -EINVAL;
988 goto cleanup1;
989 }
990 /* Check kmalloc limits using all SGs */
991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
992 status = -EINVAL;
993 goto cleanup1;
994 }
995 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
996 status = -EINVAL;
997 goto cleanup1;
998 }
06ff37ff 999 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1da177e4
LT
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1003 }
1da177e4
LT
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1009 }
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM;
1018 goto cleanup1;
1019 }
15534d38
JA
1020 if (ioc->Request.Type.Direction == XFER_WRITE) {
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1da177e4 1022 status = -ENOMEM;
15534d38
JA
1023 goto cleanup1;
1024 }
1da177e4
LT
1025 } else {
1026 memset(buff[sg_used], 0, sz);
1027 }
1028 left -= sz;
1029 data_ptr += sz;
1030 sg_used++;
1031 }
1032 if ((c = cmd_alloc(host , 0)) == NULL) {
1033 status = -ENOMEM;
1034 goto cleanup1;
1035 }
1036 c->cmd_type = CMD_IOCTL_PEND;
1037 c->Header.ReplyQueue = 0;
1038
1039 if( ioc->buf_size > 0) {
1040 c->Header.SGList = sg_used;
1041 c->Header.SGTotal= sg_used;
1042 } else {
1043 c->Header.SGList = 0;
1044 c->Header.SGTotal= 0;
1045 }
1046 c->Header.LUN = ioc->LUN_info;
1047 c->Header.Tag.lower = c->busaddr;
1048
1049 c->Request = ioc->Request;
1050 if (ioc->buf_size > 0 ) {
1051 int i;
1052 for(i=0; i<sg_used; i++) {
1053 temp64.val = pci_map_single( host->pdev, buff[i],
1054 buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1060 }
1061 }
1062 c->waiting = &wait;
1063 /* Put the request on the tail of the request queue */
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1065 addQ(&host->reqQ, c);
1066 host->Qdepth++;
1067 start_io(host);
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1069 wait_for_completion(&wait);
1070 /* unlock the buffers from DMA */
1071 for(i=0; i<sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower;
1073 temp64.val32.upper = c->SG[i].Addr.upper;
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1076 }
1077 /* Copy the error information out */
1078 ioc->error_info = *(c->err_info);
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1083 }
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1092 }
1093 ptr += buff_size[i];
1094 }
1095 }
1096 cmd_free(host, c, 0);
1097 status = 0;
1098cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
6044ec88 1101 kfree(buff[i]);
1da177e4
LT
1102 kfree(buff);
1103 }
6044ec88
JJ
1104 kfree(buff_size);
1105 kfree(ioc);
1da177e4
LT
1106 return(status);
1107 }
1108 default:
1109 return -ENOTTY;
1110 }
1111
1112}
1113
1114/*
1115 * revalidate_allvol is for online array config utilities. After a
1116 * utility reconfigures the drives in the array, it can use this function
1117 * (through an ioctl) to make the driver zap any previous disk structs for
1118 * that controller and get new ones.
1119 *
1120 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular
1123 * controller).
1124 */
1125static int revalidate_allvol(ctlr_info_t *host)
1126{
1127 int ctlr = host->ctlr, i;
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY;
1136 }
1137 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1139
1140 for(i=0; i< NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i];
6f5a0f7c 1142 if (disk) {
1143 request_queue_t *q = disk->queue;
1144
1145 if (disk->flags & GENHD_FL_UP)
1146 del_gendisk(disk);
1147 if (q)
1148 blk_cleanup_queue(q);
6f5a0f7c 1149 }
1da177e4
LT
1150 }
1151
1152 /*
1153 * Set the partition and block size structures for all volumes
1154 * on this controller to zero. We will reread all of this data
1155 */
1156 memset(host->drv, 0, sizeof(drive_info_struct)
1157 * CISS_MAX_LUN);
1158 /*
1159 * Tell the array controller not to give us any interrupts while
1160 * we check the new geometry. Then turn interrupts back on when
1161 * we're done.
1162 */
1163 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1164 cciss_getgeometry(ctlr);
1165 host->access.set_intr_mask(host, CCISS_INTR_ON);
1166
1167 /* Loop through each real device */
1168 for (i = 0; i < NWD; i++) {
1169 struct gendisk *disk = host->gendisk[i];
1170 drive_info_struct *drv = &(host->drv[i]);
1171 /* we must register the controller even if no disks exist */
1172 /* this is for the online array utilities */
1173 if (!drv->heads && i)
1174 continue;
ad2b9312 1175 blk_queue_hardsect_size(drv->queue, drv->block_size);
1da177e4
LT
1176 set_capacity(disk, drv->nr_blocks);
1177 add_disk(disk);
1178 }
1179 host->usage_count--;
1180 return 0;
1181}
1182
ca1e0484
MM
1183static inline void complete_buffers(struct bio *bio, int status)
1184{
1185 while (bio) {
1186 struct bio *xbh = bio->bi_next;
1187 int nr_sectors = bio_sectors(bio);
1188
1189 bio->bi_next = NULL;
1190 blk_finished_io(len);
1191 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1192 bio = xbh;
1193 }
1194
1195}
1196
1197static void cciss_softirq_done(struct request *rq)
1198{
1199 CommandList_struct *cmd = rq->completion_data;
1200 ctlr_info_t *h = hba[cmd->ctlr];
1201 unsigned long flags;
1202 u64bit temp64;
1203 int i, ddir;
1204
1205 if (cmd->Request.Type.Direction == XFER_READ)
1206 ddir = PCI_DMA_FROMDEVICE;
1207 else
1208 ddir = PCI_DMA_TODEVICE;
1209
1210 /* command did not need to be retried */
1211 /* unmap the DMA mapping for all the scatter gather elements */
1212 for(i=0; i<cmd->Header.SGList; i++) {
1213 temp64.val32.lower = cmd->SG[i].Addr.lower;
1214 temp64.val32.upper = cmd->SG[i].Addr.upper;
1215 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1216 }
1217
1218 complete_buffers(rq->bio, rq->errors);
1219
1220#ifdef CCISS_DEBUG
1221 printk("Done with %p\n", rq);
1222#endif /* CCISS_DEBUG */
1223
8bd0b97e 1224 add_disk_randomness(rq->rq_disk);
ca1e0484
MM
1225 spin_lock_irqsave(&h->lock, flags);
1226 end_that_request_last(rq, rq->errors);
1227 cmd_free(h, cmd,1);
1228 spin_unlock_irqrestore(&h->lock, flags);
1229}
1230
ddd47442
MM
1231/* This function will check the usage_count of the drive to be updated/added.
1232 * If the usage_count is zero then the drive information will be updated and
1233 * the disk will be re-registered with the kernel. If not then it will be
1234 * left alone for the next reboot. The exception to this is disk 0 which
1235 * will always be left registered with the kernel since it is also the
1236 * controller node. Any changes to disk 0 will show up on the next
1237 * reboot.
1238*/
1239static void cciss_update_drive_info(int ctlr, int drv_index)
1240 {
1241 ctlr_info_t *h = hba[ctlr];
1242 struct gendisk *disk;
1243 ReadCapdata_struct *size_buff = NULL;
1244 InquiryData_struct *inq_buff = NULL;
1245 unsigned int block_size;
1246 unsigned int total_size;
1247 unsigned long flags = 0;
1248 int ret = 0;
1249
1250 /* if the disk already exists then deregister it before proceeding*/
1251 if (h->drv[drv_index].raid_level != -1){
1252 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1253 h->drv[drv_index].busy_configuring = 1;
1254 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1255 ret = deregister_disk(h->gendisk[drv_index],
1256 &h->drv[drv_index], 0);
1257 h->drv[drv_index].busy_configuring = 0;
1258 }
1259
1260 /* If the disk is in use return */
1261 if (ret)
1262 return;
1263
1264
1265 /* Get information about the disk and modify the driver sturcture */
1266 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1267 if (size_buff == NULL)
1268 goto mem_msg;
1269 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1270 if (inq_buff == NULL)
1271 goto mem_msg;
1272
1273 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1274 &total_size, &block_size);
1275 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1276 inq_buff, &h->drv[drv_index]);
1277
1278 ++h->num_luns;
1279 disk = h->gendisk[drv_index];
1280 set_capacity(disk, h->drv[drv_index].nr_blocks);
1281
1282
1283 /* if it's the controller it's already added */
1284 if (drv_index){
1285 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1286
1287 /* Set up queue information */
1288 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1289 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1290
1291 /* This is a hardware imposed limit. */
1292 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1293
1294 /* This is a limit in the driver and could be eliminated. */
1295 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1296
1297 blk_queue_max_sectors(disk->queue, 512);
1298
ca1e0484
MM
1299 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1300
ddd47442
MM
1301 disk->queue->queuedata = hba[ctlr];
1302
1303 blk_queue_hardsect_size(disk->queue,
1304 hba[ctlr]->drv[drv_index].block_size);
1305
1306 h->drv[drv_index].queue = disk->queue;
1307 add_disk(disk);
1308 }
1309
1310freeret:
1311 kfree(size_buff);
1312 kfree(inq_buff);
1313 return;
1314mem_msg:
1315 printk(KERN_ERR "cciss: out of memory\n");
1316 goto freeret;
1317}
1318
1319/* This function will find the first index of the controllers drive array
1320 * that has a -1 for the raid_level and will return that index. This is
1321 * where new drives will be added. If the index to be returned is greater
1322 * than the highest_lun index for the controller then highest_lun is set
1323 * to this new index. If there are no available indexes then -1 is returned.
1324*/
1325static int cciss_find_free_drive_index(int ctlr)
1326{
1327 int i;
1328
1329 for (i=0; i < CISS_MAX_LUN; i++){
1330 if (hba[ctlr]->drv[i].raid_level == -1){
1331 if (i > hba[ctlr]->highest_lun)
1332 hba[ctlr]->highest_lun = i;
1333 return i;
1334 }
1335 }
1336 return -1;
1337}
1338
1339/* This function will add and remove logical drives from the Logical
1340 * drive array of the controller and maintain persistancy of ordering
1341 * so that mount points are preserved until the next reboot. This allows
1342 * for the removal of logical drives in the middle of the drive array
1343 * without a re-ordering of those drives.
1344 * INPUT
1345 * h = The controller to perform the operations on
1346 * del_disk = The disk to remove if specified. If the value given
1347 * is NULL then no disk is removed.
1348*/
1349static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1da177e4 1350{
ddd47442
MM
1351 int ctlr = h->ctlr;
1352 int num_luns;
1353 ReportLunData_struct *ld_buff = NULL;
1354 drive_info_struct *drv = NULL;
1355 int return_code;
1356 int listlength = 0;
1357 int i;
1358 int drv_found;
1359 int drv_index = 0;
1360 __u32 lunid = 0;
1da177e4 1361 unsigned long flags;
ddd47442
MM
1362
1363 /* Set busy_configuring flag for this operation */
1364 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1365 if (h->num_luns >= CISS_MAX_LUN){
1366 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1367 return -EINVAL;
1368 }
1369
1370 if (h->busy_configuring){
1371 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1372 return -EBUSY;
1373 }
1374 h->busy_configuring = 1;
1375
1376 /* if del_disk is NULL then we are being called to add a new disk
1377 * and update the logical drive table. If it is not NULL then
1378 * we will check if the disk is in use or not.
1379 */
1380 if (del_disk != NULL){
1381 drv = get_drv(del_disk);
1382 drv->busy_configuring = 1;
1383 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1384 return_code = deregister_disk(del_disk, drv, 1);
1385 drv->busy_configuring = 0;
1386 h->busy_configuring = 0;
1387 return return_code;
1388 } else {
1389 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1390 if (!capable(CAP_SYS_RAWIO))
1391 return -EPERM;
1392
1393 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1394 if (ld_buff == NULL)
1395 goto mem_msg;
1396
1397 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1398 sizeof(ReportLunData_struct), 0, 0, 0,
1399 TYPE_CMD);
1400
1401 if (return_code == IO_OK){
1402 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1403 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1404 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1405 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1406 } else{ /* reading number of logical volumes failed */
1407 printk(KERN_WARNING "cciss: report logical volume"
1408 " command failed\n");
1409 listlength = 0;
1410 goto freeret;
1411 }
1412
1413 num_luns = listlength / 8; /* 8 bytes per entry */
1414 if (num_luns > CISS_MAX_LUN){
1415 num_luns = CISS_MAX_LUN;
1416 printk(KERN_WARNING "cciss: more luns configured"
1417 " on controller than can be handled by"
1418 " this driver.\n");
1419 }
1420
1421 /* Compare controller drive array to drivers drive array.
1422 * Check for updates in the drive information and any new drives
1423 * on the controller.
1424 */
1425 for (i=0; i < num_luns; i++){
1426 int j;
1427
1428 drv_found = 0;
1429
1430 lunid = (0xff &
1431 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1432 lunid |= (0xff &
1433 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1434 lunid |= (0xff &
1435 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1436 lunid |= 0xff &
1437 (unsigned int)(ld_buff->LUN[i][0]);
1438
1439 /* Find if the LUN is already in the drive array
1440 * of the controller. If so then update its info
1441 * if not is use. If it does not exist then find
1442 * the first free index and add it.
1443 */
1444 for (j=0; j <= h->highest_lun; j++){
1445 if (h->drv[j].LunID == lunid){
1446 drv_index = j;
1447 drv_found = 1;
1448 }
1449 }
1450
1451 /* check if the drive was found already in the array */
1452 if (!drv_found){
1453 drv_index = cciss_find_free_drive_index(ctlr);
1454 if (drv_index == -1)
1455 goto freeret;
1456
1457 }
1458 h->drv[drv_index].LunID = lunid;
1459 cciss_update_drive_info(ctlr, drv_index);
1460 } /* end for */
1461 } /* end else */
1462
1463freeret:
1464 kfree(ld_buff);
1465 h->busy_configuring = 0;
1466 /* We return -1 here to tell the ACU that we have registered/updated
1467 * all of the drives that we can and to keep it from calling us
1468 * additional times.
1469 */
1470 return -1;
1471mem_msg:
1472 printk(KERN_ERR "cciss: out of memory\n");
1473 goto freeret;
1474}
1475
1476/* This function will deregister the disk and it's queue from the
1477 * kernel. It must be called with the controller lock held and the
1478 * drv structures busy_configuring flag set. It's parameters are:
1479 *
1480 * disk = This is the disk to be deregistered
1481 * drv = This is the drive_info_struct associated with the disk to be
1482 * deregistered. It contains information about the disk used
1483 * by the driver.
1484 * clear_all = This flag determines whether or not the disk information
1485 * is going to be completely cleared out and the highest_lun
1486 * reset. Sometimes we want to clear out information about
1487 * the disk in preperation for re-adding it. In this case
1488 * the highest_lun should be left unchanged and the LunID
1489 * should not be cleared.
1490*/
1491static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1492 int clear_all)
1493{
1da177e4 1494 ctlr_info_t *h = get_host(disk);
1da177e4
LT
1495
1496 if (!capable(CAP_SYS_RAWIO))
1497 return -EPERM;
1498
1da177e4 1499 /* make sure logical volume is NOT is use */
ddd47442
MM
1500 if(clear_all || (h->gendisk[0] == disk)) {
1501 if (drv->usage_count > 1)
1da177e4
LT
1502 return -EBUSY;
1503 }
ddd47442
MM
1504 else
1505 if( drv->usage_count > 0 )
1506 return -EBUSY;
1da177e4 1507
ddd47442
MM
1508 /* invalidate the devices and deregister the disk. If it is disk
1509 * zero do not deregister it but just zero out it's values. This
1510 * allows us to delete disk zero but keep the controller registered.
1511 */
1512 if (h->gendisk[0] != disk){
6f5a0f7c 1513 if (disk) {
1514 request_queue_t *q = disk->queue;
1515 if (disk->flags & GENHD_FL_UP)
1516 del_gendisk(disk);
2f6331fa 1517 if (q) {
6f5a0f7c 1518 blk_cleanup_queue(q);
2f6331fa
MM
1519 drv->queue = NULL;
1520 }
ddd47442
MM
1521 }
1522 }
1523
1524 --h->num_luns;
1525 /* zero out the disk size info */
1526 drv->nr_blocks = 0;
1527 drv->block_size = 0;
1528 drv->heads = 0;
1529 drv->sectors = 0;
1530 drv->cylinders = 0;
1531 drv->raid_level = -1; /* This can be used as a flag variable to
1532 * indicate that this element of the drive
1533 * array is free.
1534 */
1535
1536 if (clear_all){
1da177e4
LT
1537 /* check to see if it was the last disk */
1538 if (drv == h->drv + h->highest_lun) {
1539 /* if so, find the new hightest lun */
1540 int i, newhighest =-1;
1541 for(i=0; i<h->highest_lun; i++) {
1542 /* if the disk has size > 0, it is available */
ddd47442 1543 if (h->drv[i].heads)
1da177e4
LT
1544 newhighest = i;
1545 }
1546 h->highest_lun = newhighest;
1da177e4 1547 }
ddd47442 1548
1da177e4 1549 drv->LunID = 0;
ddd47442 1550 }
1da177e4
LT
1551 return(0);
1552}
ddd47442 1553
1da177e4
LT
1554static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1555 size_t size,
1556 unsigned int use_unit_num, /* 0: address the controller,
1557 1: address logical volume log_unit,
1558 2: periph device address is scsi3addr */
1559 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1560 int cmd_type)
1561{
1562 ctlr_info_t *h= hba[ctlr];
1563 u64bit buff_dma_handle;
1564 int status = IO_OK;
1565
1566 c->cmd_type = CMD_IOCTL_PEND;
1567 c->Header.ReplyQueue = 0;
1568 if( buff != NULL) {
1569 c->Header.SGList = 1;
1570 c->Header.SGTotal= 1;
1571 } else {
1572 c->Header.SGList = 0;
1573 c->Header.SGTotal= 0;
1574 }
1575 c->Header.Tag.lower = c->busaddr;
1576
1577 c->Request.Type.Type = cmd_type;
1578 if (cmd_type == TYPE_CMD) {
1579 switch(cmd) {
1580 case CISS_INQUIRY:
1581 /* If the logical unit number is 0 then, this is going
1582 to controller so It's a physical command
1583 mode = 0 target = 0. So we have nothing to write.
1584 otherwise, if use_unit_num == 1,
1585 mode = 1(volume set addressing) target = LUNID
1586 otherwise, if use_unit_num == 2,
1587 mode = 0(periph dev addr) target = scsi3addr */
1588 if (use_unit_num == 1) {
1589 c->Header.LUN.LogDev.VolId=
1590 h->drv[log_unit].LunID;
1591 c->Header.LUN.LogDev.Mode = 1;
1592 } else if (use_unit_num == 2) {
1593 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1594 c->Header.LUN.LogDev.Mode = 0;
1595 }
1596 /* are we trying to read a vital product page */
1597 if(page_code != 0) {
1598 c->Request.CDB[1] = 0x01;
1599 c->Request.CDB[2] = page_code;
1600 }
1601 c->Request.CDBLen = 6;
1602 c->Request.Type.Attribute = ATTR_SIMPLE;
1603 c->Request.Type.Direction = XFER_READ;
1604 c->Request.Timeout = 0;
1605 c->Request.CDB[0] = CISS_INQUIRY;
1606 c->Request.CDB[4] = size & 0xFF;
1607 break;
1608 case CISS_REPORT_LOG:
1609 case CISS_REPORT_PHYS:
1610 /* Talking to controller so It's a physical command
1611 mode = 00 target = 0. Nothing to write.
1612 */
1613 c->Request.CDBLen = 12;
1614 c->Request.Type.Attribute = ATTR_SIMPLE;
1615 c->Request.Type.Direction = XFER_READ;
1616 c->Request.Timeout = 0;
1617 c->Request.CDB[0] = cmd;
1618 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1619 c->Request.CDB[7] = (size >> 16) & 0xFF;
1620 c->Request.CDB[8] = (size >> 8) & 0xFF;
1621 c->Request.CDB[9] = size & 0xFF;
1622 break;
1623
1624 case CCISS_READ_CAPACITY:
1625 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1626 c->Header.LUN.LogDev.Mode = 1;
1627 c->Request.CDBLen = 10;
1628 c->Request.Type.Attribute = ATTR_SIMPLE;
1629 c->Request.Type.Direction = XFER_READ;
1630 c->Request.Timeout = 0;
1631 c->Request.CDB[0] = cmd;
1632 break;
1633 case CCISS_CACHE_FLUSH:
1634 c->Request.CDBLen = 12;
1635 c->Request.Type.Attribute = ATTR_SIMPLE;
1636 c->Request.Type.Direction = XFER_WRITE;
1637 c->Request.Timeout = 0;
1638 c->Request.CDB[0] = BMIC_WRITE;
1639 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1640 break;
1641 default:
1642 printk(KERN_WARNING
1643 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1644 return(IO_ERROR);
1645 }
1646 } else if (cmd_type == TYPE_MSG) {
1647 switch (cmd) {
3da8b713 1648 case 0: /* ABORT message */
1649 c->Request.CDBLen = 12;
1650 c->Request.Type.Attribute = ATTR_SIMPLE;
1651 c->Request.Type.Direction = XFER_WRITE;
1652 c->Request.Timeout = 0;
1653 c->Request.CDB[0] = cmd; /* abort */
1654 c->Request.CDB[1] = 0; /* abort a command */
1655 /* buff contains the tag of the command to abort */
1656 memcpy(&c->Request.CDB[4], buff, 8);
1657 break;
1658 case 1: /* RESET message */
1659 c->Request.CDBLen = 12;
1660 c->Request.Type.Attribute = ATTR_SIMPLE;
1661 c->Request.Type.Direction = XFER_WRITE;
1662 c->Request.Timeout = 0;
1663 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1664 c->Request.CDB[0] = cmd; /* reset */
1665 c->Request.CDB[1] = 0x04; /* reset a LUN */
1da177e4
LT
1666 case 3: /* No-Op message */
1667 c->Request.CDBLen = 1;
1668 c->Request.Type.Attribute = ATTR_SIMPLE;
1669 c->Request.Type.Direction = XFER_WRITE;
1670 c->Request.Timeout = 0;
1671 c->Request.CDB[0] = cmd;
1672 break;
1673 default:
1674 printk(KERN_WARNING
1675 "cciss%d: unknown message type %d\n",
1676 ctlr, cmd);
1677 return IO_ERROR;
1678 }
1679 } else {
1680 printk(KERN_WARNING
1681 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1682 return IO_ERROR;
1683 }
1684 /* Fill in the scatter gather information */
1685 if (size > 0) {
1686 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1687 buff, size, PCI_DMA_BIDIRECTIONAL);
1688 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1689 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1690 c->SG[0].Len = size;
1691 c->SG[0].Ext = 0; /* we are not chaining */
1692 }
1693 return status;
1694}
1695static int sendcmd_withirq(__u8 cmd,
1696 int ctlr,
1697 void *buff,
1698 size_t size,
1699 unsigned int use_unit_num,
1700 unsigned int log_unit,
1701 __u8 page_code,
1702 int cmd_type)
1703{
1704 ctlr_info_t *h = hba[ctlr];
1705 CommandList_struct *c;
1706 u64bit buff_dma_handle;
1707 unsigned long flags;
1708 int return_status;
1709 DECLARE_COMPLETION(wait);
1710
1711 if ((c = cmd_alloc(h , 0)) == NULL)
1712 return -ENOMEM;
1713 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1714 log_unit, page_code, NULL, cmd_type);
1715 if (return_status != IO_OK) {
1716 cmd_free(h, c, 0);
1717 return return_status;
1718 }
1719resend_cmd2:
1720 c->waiting = &wait;
1721
1722 /* Put the request on the tail of the queue and send it */
1723 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1724 addQ(&h->reqQ, c);
1725 h->Qdepth++;
1726 start_io(h);
1727 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1728
1729 wait_for_completion(&wait);
1730
1731 if(c->err_info->CommandStatus != 0)
1732 { /* an error has occurred */
1733 switch(c->err_info->CommandStatus)
1734 {
1735 case CMD_TARGET_STATUS:
1736 printk(KERN_WARNING "cciss: cmd %p has "
1737 " completed with errors\n", c);
1738 if( c->err_info->ScsiStatus)
1739 {
1740 printk(KERN_WARNING "cciss: cmd %p "
1741 "has SCSI Status = %x\n",
1742 c,
1743 c->err_info->ScsiStatus);
1744 }
1745
1746 break;
1747 case CMD_DATA_UNDERRUN:
1748 case CMD_DATA_OVERRUN:
1749 /* expected for inquire and report lun commands */
1750 break;
1751 case CMD_INVALID:
1752 printk(KERN_WARNING "cciss: Cmd %p is "
1753 "reported invalid\n", c);
1754 return_status = IO_ERROR;
1755 break;
1756 case CMD_PROTOCOL_ERR:
1757 printk(KERN_WARNING "cciss: cmd %p has "
1758 "protocol error \n", c);
1759 return_status = IO_ERROR;
1760 break;
1761case CMD_HARDWARE_ERR:
1762 printk(KERN_WARNING "cciss: cmd %p had "
1763 " hardware error\n", c);
1764 return_status = IO_ERROR;
1765 break;
1766 case CMD_CONNECTION_LOST:
1767 printk(KERN_WARNING "cciss: cmd %p had "
1768 "connection lost\n", c);
1769 return_status = IO_ERROR;
1770 break;
1771 case CMD_ABORTED:
1772 printk(KERN_WARNING "cciss: cmd %p was "
1773 "aborted\n", c);
1774 return_status = IO_ERROR;
1775 break;
1776 case CMD_ABORT_FAILED:
1777 printk(KERN_WARNING "cciss: cmd %p reports "
1778 "abort failed\n", c);
1779 return_status = IO_ERROR;
1780 break;
1781 case CMD_UNSOLICITED_ABORT:
1782 printk(KERN_WARNING
1783 "cciss%d: unsolicited abort %p\n",
1784 ctlr, c);
1785 if (c->retry_count < MAX_CMD_RETRIES) {
1786 printk(KERN_WARNING
1787 "cciss%d: retrying %p\n",
1788 ctlr, c);
1789 c->retry_count++;
1790 /* erase the old error information */
1791 memset(c->err_info, 0,
1792 sizeof(ErrorInfo_struct));
1793 return_status = IO_OK;
1794 INIT_COMPLETION(wait);
1795 goto resend_cmd2;
1796 }
1797 return_status = IO_ERROR;
1798 break;
1799 default:
1800 printk(KERN_WARNING "cciss: cmd %p returned "
1801 "unknown status %x\n", c,
1802 c->err_info->CommandStatus);
1803 return_status = IO_ERROR;
1804 }
1805 }
1806 /* unlock the buffers from DMA */
bb2a37bf
MM
1807 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1808 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1da177e4 1809 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
bb2a37bf 1810 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1811 cmd_free(h, c, 0);
1812 return(return_status);
1813
1814}
1815static void cciss_geometry_inquiry(int ctlr, int logvol,
1816 int withirq, unsigned int total_size,
1817 unsigned int block_size, InquiryData_struct *inq_buff,
1818 drive_info_struct *drv)
1819{
1820 int return_code;
1821 memset(inq_buff, 0, sizeof(InquiryData_struct));
1822 if (withirq)
1823 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1824 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1825 else
1826 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1827 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1828 if (return_code == IO_OK) {
1829 if(inq_buff->data_byte[8] == 0xFF) {
1830 printk(KERN_WARNING
1831 "cciss: reading geometry failed, volume "
1832 "does not support reading geometry\n");
1833 drv->block_size = block_size;
1834 drv->nr_blocks = total_size;
1835 drv->heads = 255;
1836 drv->sectors = 32; // Sectors per track
1837 drv->cylinders = total_size / 255 / 32;
1838 } else {
1839 unsigned int t;
1840
1841 drv->block_size = block_size;
1842 drv->nr_blocks = total_size;
1843 drv->heads = inq_buff->data_byte[6];
1844 drv->sectors = inq_buff->data_byte[7];
1845 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1846 drv->cylinders += inq_buff->data_byte[5];
1847 drv->raid_level = inq_buff->data_byte[8];
1848 t = drv->heads * drv->sectors;
1849 if (t > 1) {
1850 drv->cylinders = total_size/t;
1851 }
1852 }
1853 } else { /* Get geometry failed */
1854 printk(KERN_WARNING "cciss: reading geometry failed\n");
1855 }
1856 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1857 drv->heads, drv->sectors, drv->cylinders);
1858}
1859static void
1860cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1861 int withirq, unsigned int *total_size, unsigned int *block_size)
1862{
1863 int return_code;
1864 memset(buf, 0, sizeof(*buf));
1865 if (withirq)
1866 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1867 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1868 else
1869 return_code = sendcmd(CCISS_READ_CAPACITY,
1870 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1871 if (return_code == IO_OK) {
1872 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1873 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1874 } else { /* read capacity command failed */
1875 printk(KERN_WARNING "cciss: read capacity failed\n");
1876 *total_size = 0;
1877 *block_size = BLOCK_SIZE;
1878 }
1879 printk(KERN_INFO " blocks= %u block_size= %d\n",
1880 *total_size, *block_size);
1881 return;
1882}
1883
1da177e4
LT
1884static int cciss_revalidate(struct gendisk *disk)
1885{
1886 ctlr_info_t *h = get_host(disk);
1887 drive_info_struct *drv = get_drv(disk);
1888 int logvol;
1889 int FOUND=0;
1890 unsigned int block_size;
1891 unsigned int total_size;
1892 ReadCapdata_struct *size_buff = NULL;
1893 InquiryData_struct *inq_buff = NULL;
1894
1895 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1896 {
1897 if(h->drv[logvol].LunID == drv->LunID) {
1898 FOUND=1;
1899 break;
1900 }
1901 }
1902
1903 if (!FOUND) return 1;
1904
1905 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1906 if (size_buff == NULL)
1907 {
1908 printk(KERN_WARNING "cciss: out of memory\n");
1909 return 1;
1910 }
1911 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1912 if (inq_buff == NULL)
1913 {
1914 printk(KERN_WARNING "cciss: out of memory\n");
1915 kfree(size_buff);
1916 return 1;
1917 }
1918
1919 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1920 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1921
ad2b9312 1922 blk_queue_hardsect_size(drv->queue, drv->block_size);
1da177e4
LT
1923 set_capacity(disk, drv->nr_blocks);
1924
1925 kfree(size_buff);
1926 kfree(inq_buff);
1927 return 0;
1928}
1929
1930/*
1931 * Wait polling for a command to complete.
1932 * The memory mapped FIFO is polled for the completion.
1933 * Used only at init time, interrupts from the HBA are disabled.
1934 */
1935static unsigned long pollcomplete(int ctlr)
1936{
1937 unsigned long done;
1938 int i;
1939
1940 /* Wait (up to 20 seconds) for a command to complete */
1941
1942 for (i = 20 * HZ; i > 0; i--) {
1943 done = hba[ctlr]->access.command_completed(hba[ctlr]);
86e84862
NA
1944 if (done == FIFO_EMPTY)
1945 schedule_timeout_uninterruptible(1);
1946 else
1da177e4
LT
1947 return (done);
1948 }
1949 /* Invalid address to tell caller we ran out of time */
1950 return 1;
1951}
3da8b713 1952
1953static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1954{
1955 /* We get in here if sendcmd() is polling for completions
1956 and gets some command back that it wasn't expecting --
1957 something other than that which it just sent down.
1958 Ordinarily, that shouldn't happen, but it can happen when
1959 the scsi tape stuff gets into error handling mode, and
1960 starts using sendcmd() to try to abort commands and
1961 reset tape drives. In that case, sendcmd may pick up
1962 completions of commands that were sent to logical drives
1963 through the block i/o system, or cciss ioctls completing, etc.
1964 In that case, we need to save those completions for later
1965 processing by the interrupt handler.
1966 */
1967
1968#ifdef CONFIG_CISS_SCSI_TAPE
1969 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1970
1971 /* If it's not the scsi tape stuff doing error handling, (abort */
1972 /* or reset) then we don't expect anything weird. */
1973 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1974#endif
1975 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1976 "Invalid command list address returned! (%lx)\n",
1977 ctlr, complete);
1978 /* not much we can do. */
1979#ifdef CONFIG_CISS_SCSI_TAPE
1980 return 1;
1981 }
1982
1983 /* We've sent down an abort or reset, but something else
1984 has completed */
1985 if (srl->ncompletions >= (NR_CMDS + 2)) {
1986 /* Uh oh. No room to save it for later... */
1987 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1988 "reject list overflow, command lost!\n", ctlr);
1989 return 1;
1990 }
1991 /* Save it for later */
1992 srl->complete[srl->ncompletions] = complete;
1993 srl->ncompletions++;
1994#endif
1995 return 0;
1996}
1997
1da177e4
LT
1998/*
1999 * Send a command to the controller, and wait for it to complete.
2000 * Only used at init time.
2001 */
2002static int sendcmd(
2003 __u8 cmd,
2004 int ctlr,
2005 void *buff,
2006 size_t size,
2007 unsigned int use_unit_num, /* 0: address the controller,
2008 1: address logical volume log_unit,
2009 2: periph device address is scsi3addr */
2010 unsigned int log_unit,
2011 __u8 page_code,
2012 unsigned char *scsi3addr,
2013 int cmd_type)
2014{
2015 CommandList_struct *c;
2016 int i;
2017 unsigned long complete;
2018 ctlr_info_t *info_p= hba[ctlr];
2019 u64bit buff_dma_handle;
3da8b713 2020 int status, done = 0;
1da177e4
LT
2021
2022 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2023 printk(KERN_WARNING "cciss: unable to get memory");
2024 return(IO_ERROR);
2025 }
2026 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2027 log_unit, page_code, scsi3addr, cmd_type);
2028 if (status != IO_OK) {
2029 cmd_free(info_p, c, 1);
2030 return status;
2031 }
2032resend_cmd1:
2033 /*
2034 * Disable interrupt
2035 */
2036#ifdef CCISS_DEBUG
2037 printk(KERN_DEBUG "cciss: turning intr off\n");
2038#endif /* CCISS_DEBUG */
2039 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2040
2041 /* Make sure there is room in the command FIFO */
3da8b713 2042 /* Actually it should be completely empty at this time */
2043 /* unless we are in here doing error handling for the scsi */
2044 /* tape side of the driver. */
1da177e4
LT
2045 for (i = 200000; i > 0; i--)
2046 {
2047 /* if fifo isn't full go */
2048 if (!(info_p->access.fifo_full(info_p)))
2049 {
2050
2051 break;
2052 }
2053 udelay(10);
2054 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2055 " waiting!\n", ctlr);
2056 }
2057 /*
2058 * Send the cmd
2059 */
2060 info_p->access.submit_command(info_p, c);
3da8b713 2061 done = 0;
2062 do {
2063 complete = pollcomplete(ctlr);
1da177e4
LT
2064
2065#ifdef CCISS_DEBUG
3da8b713 2066 printk(KERN_DEBUG "cciss: command completed\n");
1da177e4
LT
2067#endif /* CCISS_DEBUG */
2068
3da8b713 2069 if (complete == 1) {
2070 printk( KERN_WARNING
2071 "cciss cciss%d: SendCmd Timeout out, "
2072 "No command list address returned!\n",
2073 ctlr);
2074 status = IO_ERROR;
2075 done = 1;
2076 break;
2077 }
2078
2079 /* This will need to change for direct lookup completions */
1da177e4
LT
2080 if ( (complete & CISS_ERROR_BIT)
2081 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2082 {
2083 /* if data overrun or underun on Report command
2084 ignore it
2085 */
2086 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2087 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2088 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2089 ((c->err_info->CommandStatus ==
2090 CMD_DATA_OVERRUN) ||
2091 (c->err_info->CommandStatus ==
2092 CMD_DATA_UNDERRUN)
2093 ))
2094 {
2095 complete = c->busaddr;
2096 } else {
2097 if (c->err_info->CommandStatus ==
2098 CMD_UNSOLICITED_ABORT) {
2099 printk(KERN_WARNING "cciss%d: "
2100 "unsolicited abort %p\n",
2101 ctlr, c);
2102 if (c->retry_count < MAX_CMD_RETRIES) {
2103 printk(KERN_WARNING
2104 "cciss%d: retrying %p\n",
2105 ctlr, c);
2106 c->retry_count++;
2107 /* erase the old error */
2108 /* information */
2109 memset(c->err_info, 0,
2110 sizeof(ErrorInfo_struct));
2111 goto resend_cmd1;
2112 } else {
2113 printk(KERN_WARNING
2114 "cciss%d: retried %p too "
2115 "many times\n", ctlr, c);
2116 status = IO_ERROR;
2117 goto cleanup1;
2118 }
3da8b713 2119 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2120 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2121 status = IO_ERROR;
2122 goto cleanup1;
1da177e4
LT
2123 }
2124 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2125 " Error %x \n", ctlr,
2126 c->err_info->CommandStatus);
2127 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2128 " offensive info\n"
2129 " size %x\n num %x value %x\n", ctlr,
2130 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2131 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2132 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2133 status = IO_ERROR;
2134 goto cleanup1;
2135 }
2136 }
3da8b713 2137 /* This will need changing for direct lookup completions */
1da177e4 2138 if (complete != c->busaddr) {
3da8b713 2139 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2140 BUG(); /* we are pretty much hosed if we get here. */
2141 }
2142 continue;
2143 } else
2144 done = 1;
2145 } while (!done);
1da177e4
LT
2146
2147cleanup1:
2148 /* unlock the data buffer from DMA */
bb2a37bf
MM
2149 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2150 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1da177e4 2151 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
bb2a37bf 2152 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
3da8b713 2153#ifdef CONFIG_CISS_SCSI_TAPE
2154 /* if we saved some commands for later, process them now. */
2155 if (info_p->scsi_rejects.ncompletions > 0)
2156 do_cciss_intr(0, info_p, NULL);
2157#endif
1da177e4
LT
2158 cmd_free(info_p, c, 1);
2159 return (status);
2160}
2161/*
2162 * Map (physical) PCI mem into (virtual) kernel space
2163 */
2164static void __iomem *remap_pci_mem(ulong base, ulong size)
2165{
2166 ulong page_base = ((ulong) base) & PAGE_MASK;
2167 ulong page_offs = ((ulong) base) - page_base;
2168 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2169
2170 return page_remapped ? (page_remapped + page_offs) : NULL;
2171}
2172
2173/*
2174 * Takes jobs of the Q and sends them to the hardware, then puts it on
2175 * the Q to wait for completion.
2176 */
2177static void start_io( ctlr_info_t *h)
2178{
2179 CommandList_struct *c;
2180
2181 while(( c = h->reqQ) != NULL )
2182 {
2183 /* can't do anything if fifo is full */
2184 if ((h->access.fifo_full(h))) {
2185 printk(KERN_WARNING "cciss: fifo full\n");
2186 break;
2187 }
2188
80682fa9 2189 /* Get the first entry from the Request Q */
1da177e4
LT
2190 removeQ(&(h->reqQ), c);
2191 h->Qdepth--;
2192
2193 /* Tell the controller execute command */
2194 h->access.submit_command(h, c);
2195
2196 /* Put job onto the completed Q */
2197 addQ (&(h->cmpQ), c);
2198 }
2199}
1da177e4
LT
2200/* Assumes that CCISS_LOCK(h->ctlr) is held. */
2201/* Zeros out the error record and then resends the command back */
2202/* to the controller */
2203static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2204{
2205 /* erase the old error information */
2206 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2207
2208 /* add it to software queue and then send it to the controller */
2209 addQ(&(h->reqQ),c);
2210 h->Qdepth++;
2211 if(h->Qdepth > h->maxQsinceinit)
2212 h->maxQsinceinit = h->Qdepth;
2213
2214 start_io(h);
2215}
a9925a06 2216
1da177e4 2217/* checks the status of the job and calls complete buffers to mark all
a9925a06
JA
2218 * buffers for the completed job. Note that this function does not need
2219 * to hold the hba/queue lock.
1da177e4
LT
2220 */
2221static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2222 int timeout)
2223{
2224 int status = 1;
1da177e4 2225 int retry_cmd = 0;
1da177e4
LT
2226
2227 if (timeout)
2228 status = 0;
2229
2230 if(cmd->err_info->CommandStatus != 0)
2231 { /* an error has occurred */
2232 switch(cmd->err_info->CommandStatus)
2233 {
2234 unsigned char sense_key;
2235 case CMD_TARGET_STATUS:
2236 status = 0;
2237
2238 if( cmd->err_info->ScsiStatus == 0x02)
2239 {
2240 printk(KERN_WARNING "cciss: cmd %p "
2241 "has CHECK CONDITION "
2242 " byte 2 = 0x%x\n", cmd,
2243 cmd->err_info->SenseInfo[2]
2244 );
2245 /* check the sense key */
2246 sense_key = 0xf &
2247 cmd->err_info->SenseInfo[2];
2248 /* no status or recovered error */
2249 if((sense_key == 0x0) ||
2250 (sense_key == 0x1))
2251 {
2252 status = 1;
2253 }
2254 } else
2255 {
2256 printk(KERN_WARNING "cciss: cmd %p "
2257 "has SCSI Status 0x%x\n",
2258 cmd, cmd->err_info->ScsiStatus);
2259 }
2260 break;
2261 case CMD_DATA_UNDERRUN:
2262 printk(KERN_WARNING "cciss: cmd %p has"
2263 " completed with data underrun "
2264 "reported\n", cmd);
2265 break;
2266 case CMD_DATA_OVERRUN:
2267 printk(KERN_WARNING "cciss: cmd %p has"
2268 " completed with data overrun "
2269 "reported\n", cmd);
2270 break;
2271 case CMD_INVALID:
2272 printk(KERN_WARNING "cciss: cmd %p is "
2273 "reported invalid\n", cmd);
2274 status = 0;
2275 break;
2276 case CMD_PROTOCOL_ERR:
2277 printk(KERN_WARNING "cciss: cmd %p has "
2278 "protocol error \n", cmd);
2279 status = 0;
2280 break;
2281 case CMD_HARDWARE_ERR:
2282 printk(KERN_WARNING "cciss: cmd %p had "
2283 " hardware error\n", cmd);
2284 status = 0;
2285 break;
2286 case CMD_CONNECTION_LOST:
2287 printk(KERN_WARNING "cciss: cmd %p had "
2288 "connection lost\n", cmd);
2289 status=0;
2290 break;
2291 case CMD_ABORTED:
2292 printk(KERN_WARNING "cciss: cmd %p was "
2293 "aborted\n", cmd);
2294 status=0;
2295 break;
2296 case CMD_ABORT_FAILED:
2297 printk(KERN_WARNING "cciss: cmd %p reports "
2298 "abort failed\n", cmd);
2299 status=0;
2300 break;
2301 case CMD_UNSOLICITED_ABORT:
2302 printk(KERN_WARNING "cciss%d: unsolicited "
2303 "abort %p\n", h->ctlr, cmd);
2304 if (cmd->retry_count < MAX_CMD_RETRIES) {
2305 retry_cmd=1;
2306 printk(KERN_WARNING
2307 "cciss%d: retrying %p\n",
2308 h->ctlr, cmd);
2309 cmd->retry_count++;
2310 } else
2311 printk(KERN_WARNING
2312 "cciss%d: %p retried too "
2313 "many times\n", h->ctlr, cmd);
2314 status=0;
2315 break;
2316 case CMD_TIMEOUT:
2317 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2318 cmd);
2319 status=0;
2320 break;
2321 default:
2322 printk(KERN_WARNING "cciss: cmd %p returned "
2323 "unknown status %x\n", cmd,
2324 cmd->err_info->CommandStatus);
2325 status=0;
2326 }
2327 }
2328 /* We need to return this command */
2329 if(retry_cmd) {
2330 resend_cciss_cmd(h,cmd);
2331 return;
2332 }
1da177e4 2333
a9925a06
JA
2334 cmd->rq->completion_data = cmd;
2335 cmd->rq->errors = status;
2056a782 2336 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
a9925a06 2337 blk_complete_request(cmd->rq);
1da177e4
LT
2338}
2339
2340/*
2341 * Get a request and submit it to the controller.
2342 */
2343static void do_cciss_request(request_queue_t *q)
2344{
2345 ctlr_info_t *h= q->queuedata;
2346 CommandList_struct *c;
2347 int start_blk, seg;
2348 struct request *creq;
2349 u64bit temp64;
2350 struct scatterlist tmp_sg[MAXSGENTRIES];
2351 drive_info_struct *drv;
2352 int i, dir;
2353
2354 /* We call start_io here in case there is a command waiting on the
2355 * queue that has not been sent.
2356 */
2357 if (blk_queue_plugged(q))
2358 goto startio;
2359
2360queue:
2361 creq = elv_next_request(q);
2362 if (!creq)
2363 goto startio;
2364
089fe1b2 2365 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
1da177e4
LT
2366
2367 if (( c = cmd_alloc(h, 1)) == NULL)
2368 goto full;
2369
2370 blkdev_dequeue_request(creq);
2371
2372 spin_unlock_irq(q->queue_lock);
2373
2374 c->cmd_type = CMD_RWREQ;
2375 c->rq = creq;
2376
2377 /* fill in the request */
2378 drv = creq->rq_disk->private_data;
2379 c->Header.ReplyQueue = 0; // unused in simple mode
33079b21
MM
2380 /* got command from pool, so use the command block index instead */
2381 /* for direct lookups. */
2382 /* The first 2 bits are reserved for controller error reporting. */
2383 c->Header.Tag.lower = (c->cmdindex << 3);
2384 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
1da177e4
LT
2385 c->Header.LUN.LogDev.VolId= drv->LunID;
2386 c->Header.LUN.LogDev.Mode = 1;
2387 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2388 c->Request.Type.Type = TYPE_CMD; // It is a command.
2389 c->Request.Type.Attribute = ATTR_SIMPLE;
2390 c->Request.Type.Direction =
2391 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2392 c->Request.Timeout = 0; // Don't time out
2393 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2394 start_blk = creq->sector;
2395#ifdef CCISS_DEBUG
2396 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2397 (int) creq->nr_sectors);
2398#endif /* CCISS_DEBUG */
2399
2400 seg = blk_rq_map_sg(q, creq, tmp_sg);
2401
2402 /* get the DMA records for the setup */
2403 if (c->Request.Type.Direction == XFER_READ)
2404 dir = PCI_DMA_FROMDEVICE;
2405 else
2406 dir = PCI_DMA_TODEVICE;
2407
2408 for (i=0; i<seg; i++)
2409 {
2410 c->SG[i].Len = tmp_sg[i].length;
2411 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2412 tmp_sg[i].offset, tmp_sg[i].length,
2413 dir);
2414 c->SG[i].Addr.lower = temp64.val32.lower;
2415 c->SG[i].Addr.upper = temp64.val32.upper;
2416 c->SG[i].Ext = 0; // we are not chaining
2417 }
2418 /* track how many SG entries we are using */
2419 if( seg > h->maxSG)
2420 h->maxSG = seg;
2421
2422#ifdef CCISS_DEBUG
2423 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2424#endif /* CCISS_DEBUG */
2425
2426 c->Header.SGList = c->Header.SGTotal = seg;
2427 c->Request.CDB[1]= 0;
2428 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2429 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2430 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2431 c->Request.CDB[5]= start_blk & 0xff;
2432 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2433 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2434 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2435 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2436
2437 spin_lock_irq(q->queue_lock);
2438
2439 addQ(&(h->reqQ),c);
2440 h->Qdepth++;
2441 if(h->Qdepth > h->maxQsinceinit)
2442 h->maxQsinceinit = h->Qdepth;
2443
2444 goto queue;
2445full:
2446 blk_stop_queue(q);
2447startio:
2448 /* We will already have the driver lock here so not need
2449 * to lock it.
2450 */
2451 start_io(h);
2452}
2453
3da8b713 2454static inline unsigned long get_next_completion(ctlr_info_t *h)
2455{
2456#ifdef CONFIG_CISS_SCSI_TAPE
2457 /* Any rejects from sendcmd() lying around? Process them first */
2458 if (h->scsi_rejects.ncompletions == 0)
2459 return h->access.command_completed(h);
2460 else {
2461 struct sendcmd_reject_list *srl;
2462 int n;
2463 srl = &h->scsi_rejects;
2464 n = --srl->ncompletions;
2465 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2466 printk("p");
2467 return srl->complete[n];
2468 }
2469#else
2470 return h->access.command_completed(h);
2471#endif
2472}
2473
2474static inline int interrupt_pending(ctlr_info_t *h)
2475{
2476#ifdef CONFIG_CISS_SCSI_TAPE
2477 return ( h->access.intr_pending(h)
2478 || (h->scsi_rejects.ncompletions > 0));
2479#else
2480 return h->access.intr_pending(h);
2481#endif
2482}
2483
2484static inline long interrupt_not_for_us(ctlr_info_t *h)
2485{
2486#ifdef CONFIG_CISS_SCSI_TAPE
2487 return (((h->access.intr_pending(h) == 0) ||
2488 (h->interrupts_enabled == 0))
2489 && (h->scsi_rejects.ncompletions == 0));
2490#else
2491 return (((h->access.intr_pending(h) == 0) ||
2492 (h->interrupts_enabled == 0)));
2493#endif
2494}
2495
1da177e4
LT
2496static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2497{
2498 ctlr_info_t *h = dev_id;
2499 CommandList_struct *c;
2500 unsigned long flags;
33079b21 2501 __u32 a, a1, a2;
1da177e4
LT
2502 int j;
2503 int start_queue = h->next_to_run;
2504
3da8b713 2505 if (interrupt_not_for_us(h))
1da177e4 2506 return IRQ_NONE;
1da177e4
LT
2507 /*
2508 * If there are completed commands in the completion queue,
2509 * we had better do something about it.
2510 */
2511 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
3da8b713 2512 while (interrupt_pending(h)) {
2513 while((a = get_next_completion(h)) != FIFO_EMPTY) {
1da177e4 2514 a1 = a;
33079b21
MM
2515 if ((a & 0x04)) {
2516 a2 = (a >> 3);
2517 if (a2 >= NR_CMDS) {
2518 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2519 fail_all_cmds(h->ctlr);
2520 return IRQ_HANDLED;
2521 }
2522
2523 c = h->cmd_pool + a2;
2524 a = c->busaddr;
2525
2526 } else {
1da177e4 2527 a &= ~3;
33079b21
MM
2528 if ((c = h->cmpQ) == NULL) {
2529 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
1da177e4
LT
2530 continue;
2531 }
2532 while(c->busaddr != a) {
2533 c = c->next;
2534 if (c == h->cmpQ)
2535 break;
2536 }
33079b21 2537 }
1da177e4
LT
2538 /*
2539 * If we've found the command, take it off the
2540 * completion Q and free it
2541 */
2542 if (c->busaddr == a) {
2543 removeQ(&h->cmpQ, c);
2544 if (c->cmd_type == CMD_RWREQ) {
2545 complete_command(h, c, 0);
2546 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2547 complete(c->waiting);
2548 }
2549# ifdef CONFIG_CISS_SCSI_TAPE
2550 else if (c->cmd_type == CMD_SCSI)
2551 complete_scsi_command(c, 0, a1);
2552# endif
2553 continue;
2554 }
2555 }
2556 }
2557
2558 /* check to see if we have maxed out the number of commands that can
2559 * be placed on the queue. If so then exit. We do this check here
2560 * in case the interrupt we serviced was from an ioctl and did not
2561 * free any new commands.
2562 */
2563 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2564 goto cleanup;
2565
2566 /* We have room on the queue for more commands. Now we need to queue
2567 * them up. We will also keep track of the next queue to run so
2568 * that every queue gets a chance to be started first.
2569 */
ad2b9312
MM
2570 for (j=0; j < h->highest_lun + 1; j++){
2571 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
1da177e4
LT
2572 /* make sure the disk has been added and the drive is real
2573 * because this can be called from the middle of init_one.
2574 */
ad2b9312 2575 if(!(h->drv[curr_queue].queue) ||
1da177e4
LT
2576 !(h->drv[curr_queue].heads))
2577 continue;
2578 blk_start_queue(h->gendisk[curr_queue]->queue);
2579
2580 /* check to see if we have maxed out the number of commands
2581 * that can be placed on the queue.
2582 */
2583 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2584 {
2585 if (curr_queue == start_queue){
ad2b9312 2586 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
1da177e4
LT
2587 goto cleanup;
2588 } else {
2589 h->next_to_run = curr_queue;
2590 goto cleanup;
2591 }
2592 } else {
ad2b9312 2593 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1da177e4
LT
2594 }
2595 }
2596
2597cleanup:
2598 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2599 return IRQ_HANDLED;
2600}
1da177e4
LT
2601/*
2602 * We cannot read the structure directly, for portablity we must use
2603 * the io functions.
2604 * This is for debug only.
2605 */
2606#ifdef CCISS_DEBUG
2607static void print_cfg_table( CfgTable_struct *tb)
2608{
2609 int i;
2610 char temp_name[17];
2611
2612 printk("Controller Configuration information\n");
2613 printk("------------------------------------\n");
2614 for(i=0;i<4;i++)
2615 temp_name[i] = readb(&(tb->Signature[i]));
2616 temp_name[4]='\0';
2617 printk(" Signature = %s\n", temp_name);
2618 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2619 printk(" Transport methods supported = 0x%x\n",
2620 readl(&(tb-> TransportSupport)));
2621 printk(" Transport methods active = 0x%x\n",
2622 readl(&(tb->TransportActive)));
2623 printk(" Requested transport Method = 0x%x\n",
2624 readl(&(tb->HostWrite.TransportRequest)));
2625 printk(" Coalese Interrupt Delay = 0x%x\n",
2626 readl(&(tb->HostWrite.CoalIntDelay)));
2627 printk(" Coalese Interrupt Count = 0x%x\n",
2628 readl(&(tb->HostWrite.CoalIntCount)));
2629 printk(" Max outstanding commands = 0x%d\n",
2630 readl(&(tb->CmdsOutMax)));
2631 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2632 for(i=0;i<16;i++)
2633 temp_name[i] = readb(&(tb->ServerName[i]));
2634 temp_name[16] = '\0';
2635 printk(" Server Name = %s\n", temp_name);
2636 printk(" Heartbeat Counter = 0x%x\n\n\n",
2637 readl(&(tb->HeartBeat)));
2638}
2639#endif /* CCISS_DEBUG */
2640
2641static void release_io_mem(ctlr_info_t *c)
2642{
2643 /* if IO mem was not protected do nothing */
2644 if( c->io_mem_addr == 0)
2645 return;
2646 release_region(c->io_mem_addr, c->io_mem_length);
2647 c->io_mem_addr = 0;
2648 c->io_mem_length = 0;
2649}
2650
2651static int find_PCI_BAR_index(struct pci_dev *pdev,
2652 unsigned long pci_bar_addr)
2653{
2654 int i, offset, mem_type, bar_type;
2655 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2656 return 0;
2657 offset = 0;
2658 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2659 bar_type = pci_resource_flags(pdev, i) &
2660 PCI_BASE_ADDRESS_SPACE;
2661 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2662 offset += 4;
2663 else {
2664 mem_type = pci_resource_flags(pdev, i) &
2665 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2666 switch (mem_type) {
2667 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2668 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2669 offset += 4; /* 32 bit */
2670 break;
2671 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2672 offset += 8;
2673 break;
2674 default: /* reserved in PCI 2.2 */
2675 printk(KERN_WARNING "Base address is invalid\n");
2676 return -1;
2677 break;
2678 }
2679 }
2680 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2681 return i+1;
2682 }
2683 return -1;
2684}
2685
fb86a35b
MM
2686/* If MSI/MSI-X is supported by the kernel we will try to enable it on
2687 * controllers that are capable. If not, we use IO-APIC mode.
2688 */
2689
2690static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2691{
2692#ifdef CONFIG_PCI_MSI
2693 int err;
2694 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2695 {0,2}, {0,3}};
2696
2697 /* Some boards advertise MSI but don't really support it */
2698 if ((board_id == 0x40700E11) ||
2699 (board_id == 0x40800E11) ||
2700 (board_id == 0x40820E11) ||
2701 (board_id == 0x40830E11))
2702 goto default_int_mode;
2703
2704 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2705 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2706 if (!err) {
2707 c->intr[0] = cciss_msix_entries[0].vector;
2708 c->intr[1] = cciss_msix_entries[1].vector;
2709 c->intr[2] = cciss_msix_entries[2].vector;
2710 c->intr[3] = cciss_msix_entries[3].vector;
2711 c->msix_vector = 1;
2712 return;
2713 }
2714 if (err > 0) {
2715 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2716 "available\n", err);
2717 } else {
2718 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2719 err);
2720 }
2721 }
2722 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2723 if (!pci_enable_msi(pdev)) {
2724 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2725 c->msi_vector = 1;
2726 return;
2727 } else {
2728 printk(KERN_WARNING "cciss: MSI init failed\n");
2729 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2730 return;
2731 }
2732 }
89a7689e 2733default_int_mode:
fb86a35b
MM
2734#endif /* CONFIG_PCI_MSI */
2735 /* if we get here we're going to use the default interrupt mode */
fb86a35b
MM
2736 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2737 return;
2738}
2739
1da177e4
LT
2740static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2741{
2742 ushort subsystem_vendor_id, subsystem_device_id, command;
2743 __u32 board_id, scratchpad = 0;
2744 __u64 cfg_offset;
2745 __u32 cfg_base_addr;
2746 __u64 cfg_base_addr_index;
2747 int i;
2748
2749 /* check to see if controller has been disabled */
2750 /* BEFORE trying to enable it */
2751 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2752 if(!(command & 0x02))
2753 {
2754 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2755 return(-1);
2756 }
2757
2758 if (pci_enable_device(pdev))
2759 {
2760 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2761 return( -1);
2762 }
1da177e4
LT
2763
2764 subsystem_vendor_id = pdev->subsystem_vendor;
2765 subsystem_device_id = pdev->subsystem_device;
2766 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2767 subsystem_vendor_id);
2768
2769 /* search for our IO range so we can protect it */
2770 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2771 {
2772 /* is this an IO range */
2773 if( pci_resource_flags(pdev, i) & 0x01 ) {
2774 c->io_mem_addr = pci_resource_start(pdev, i);
2775 c->io_mem_length = pci_resource_end(pdev, i) -
2776 pci_resource_start(pdev, i) +1;
2777#ifdef CCISS_DEBUG
2778 printk("IO value found base_addr[%d] %lx %lx\n", i,
2779 c->io_mem_addr, c->io_mem_length);
2780#endif /* CCISS_DEBUG */
2781 /* register the IO range */
2782 if(!request_region( c->io_mem_addr,
2783 c->io_mem_length, "cciss"))
2784 {
2785 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2786 c->io_mem_addr, c->io_mem_length);
2787 c->io_mem_addr= 0;
2788 c->io_mem_length = 0;
2789 }
2790 break;
2791 }
2792 }
2793
2794#ifdef CCISS_DEBUG
2795 printk("command = %x\n", command);
2796 printk("irq = %x\n", pdev->irq);
2797 printk("board_id = %x\n", board_id);
2798#endif /* CCISS_DEBUG */
2799
fb86a35b
MM
2800/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2801 * else we use the IO-APIC interrupt assigned to us by system ROM.
2802 */
2803 cciss_interrupt_mode(c, pdev, board_id);
1da177e4
LT
2804
2805 /*
2806 * Memory base addr is first addr , the second points to the config
2807 * table
2808 */
2809
2810 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2811#ifdef CCISS_DEBUG
2812 printk("address 0 = %x\n", c->paddr);
2813#endif /* CCISS_DEBUG */
2814 c->vaddr = remap_pci_mem(c->paddr, 200);
2815
2816 /* Wait for the board to become ready. (PCI hotplug needs this.)
2817 * We poll for up to 120 secs, once per 100ms. */
2818 for (i=0; i < 1200; i++) {
2819 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2820 if (scratchpad == CCISS_FIRMWARE_READY)
2821 break;
2822 set_current_state(TASK_INTERRUPTIBLE);
2823 schedule_timeout(HZ / 10); /* wait 100ms */
2824 }
2825 if (scratchpad != CCISS_FIRMWARE_READY) {
2826 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2827 return -1;
2828 }
2829
2830 /* get the address index number */
2831 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2832 cfg_base_addr &= (__u32) 0x0000ffff;
2833#ifdef CCISS_DEBUG
2834 printk("cfg base address = %x\n", cfg_base_addr);
2835#endif /* CCISS_DEBUG */
2836 cfg_base_addr_index =
2837 find_PCI_BAR_index(pdev, cfg_base_addr);
2838#ifdef CCISS_DEBUG
2839 printk("cfg base address index = %x\n", cfg_base_addr_index);
2840#endif /* CCISS_DEBUG */
2841 if (cfg_base_addr_index == -1) {
2842 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2843 release_io_mem(c);
2844 return -1;
2845 }
2846
2847 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2848#ifdef CCISS_DEBUG
2849 printk("cfg offset = %x\n", cfg_offset);
2850#endif /* CCISS_DEBUG */
2851 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2852 cfg_base_addr_index) + cfg_offset,
2853 sizeof(CfgTable_struct));
2854 c->board_id = board_id;
2855
2856#ifdef CCISS_DEBUG
945f390f 2857 print_cfg_table(c->cfgtable);
1da177e4
LT
2858#endif /* CCISS_DEBUG */
2859
2860 for(i=0; i<NR_PRODUCTS; i++) {
2861 if (board_id == products[i].board_id) {
2862 c->product_name = products[i].product_name;
2863 c->access = *(products[i].access);
2864 break;
2865 }
2866 }
2867 if (i == NR_PRODUCTS) {
2868 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2869 " to access the Smart Array controller %08lx\n",
2870 (unsigned long)board_id);
2871 return -1;
2872 }
2873 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2874 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2875 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2876 (readb(&c->cfgtable->Signature[3]) != 'S') )
2877 {
2878 printk("Does not appear to be a valid CISS config table\n");
2879 return -1;
2880 }
2881
2882#ifdef CONFIG_X86
2883{
2884 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2885 __u32 prefetch;
2886 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2887 prefetch |= 0x100;
2888 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2889}
2890#endif
2891
2892#ifdef CCISS_DEBUG
2893 printk("Trying to put board into Simple mode\n");
2894#endif /* CCISS_DEBUG */
2895 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2896 /* Update the field, and then ring the doorbell */
2897 writel( CFGTBL_Trans_Simple,
2898 &(c->cfgtable->HostWrite.TransportRequest));
2899 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2900
2901 /* under certain very rare conditions, this can take awhile.
2902 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2903 * as we enter this code.) */
2904 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2905 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2906 break;
2907 /* delay and try again */
2908 set_current_state(TASK_INTERRUPTIBLE);
2909 schedule_timeout(10);
2910 }
2911
2912#ifdef CCISS_DEBUG
2913 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2914#endif /* CCISS_DEBUG */
2915#ifdef CCISS_DEBUG
2916 print_cfg_table(c->cfgtable);
2917#endif /* CCISS_DEBUG */
2918
2919 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2920 {
2921 printk(KERN_WARNING "cciss: unable to get board into"
2922 " simple mode\n");
2923 return -1;
2924 }
2925 return 0;
2926
2927}
2928
2929/*
2930 * Gets information about the local volumes attached to the controller.
2931 */
2932static void cciss_getgeometry(int cntl_num)
2933{
2934 ReportLunData_struct *ld_buff;
2935 ReadCapdata_struct *size_buff;
2936 InquiryData_struct *inq_buff;
2937 int return_code;
2938 int i;
2939 int listlength = 0;
2940 __u32 lunid = 0;
2941 int block_size;
2942 int total_size;
2943
06ff37ff 2944 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1da177e4
LT
2945 if (ld_buff == NULL)
2946 {
2947 printk(KERN_ERR "cciss: out of memory\n");
2948 return;
2949 }
1da177e4
LT
2950 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2951 if (size_buff == NULL)
2952 {
2953 printk(KERN_ERR "cciss: out of memory\n");
2954 kfree(ld_buff);
2955 return;
2956 }
2957 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2958 if (inq_buff == NULL)
2959 {
2960 printk(KERN_ERR "cciss: out of memory\n");
2961 kfree(ld_buff);
2962 kfree(size_buff);
2963 return;
2964 }
2965 /* Get the firmware version */
2966 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2967 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2968 if (return_code == IO_OK)
2969 {
2970 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2971 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2972 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2973 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2974 } else /* send command failed */
2975 {
2976 printk(KERN_WARNING "cciss: unable to determine firmware"
2977 " version of controller\n");
2978 }
2979 /* Get the number of logical volumes */
2980 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2981 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2982
2983 if( return_code == IO_OK)
2984 {
2985#ifdef CCISS_DEBUG
2986 printk("LUN Data\n--------------------------\n");
2987#endif /* CCISS_DEBUG */
2988
2989 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2990 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2991 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2992 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2993 } else /* reading number of logical volumes failed */
2994 {
2995 printk(KERN_WARNING "cciss: report logical volume"
2996 " command failed\n");
2997 listlength = 0;
2998 }
2999 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3000 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
3001 {
3002 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3003 CISS_MAX_LUN);
3004 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3005 }
3006#ifdef CCISS_DEBUG
3007 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3008 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3009 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3010#endif /* CCISS_DEBUG */
3011
3012 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
ddd47442
MM
3013// for(i=0; i< hba[cntl_num]->num_luns; i++)
3014 for(i=0; i < CISS_MAX_LUN; i++)
1da177e4 3015 {
ddd47442
MM
3016 if (i < hba[cntl_num]->num_luns){
3017 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3018 << 24;
3019 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3020 << 16;
3021 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3022 << 8;
1da177e4
LT
3023 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3024
3025 hba[cntl_num]->drv[i].LunID = lunid;
3026
3027
3028#ifdef CCISS_DEBUG
3029 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
ddd47442
MM
3030 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3031 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3032 hba[cntl_num]->drv[i].LunID);
1da177e4
LT
3033#endif /* CCISS_DEBUG */
3034 cciss_read_capacity(cntl_num, i, size_buff, 0,
3035 &total_size, &block_size);
ddd47442
MM
3036 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3037 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3038 } else {
3039 /* initialize raid_level to indicate a free space */
3040 hba[cntl_num]->drv[i].raid_level = -1;
3041 }
1da177e4
LT
3042 }
3043 kfree(ld_buff);
3044 kfree(size_buff);
3045 kfree(inq_buff);
3046}
3047
3048/* Function to find the first free pointer into our hba[] array */
3049/* Returns -1 if no free entries are left. */
3050static int alloc_cciss_hba(void)
3051{
3052 struct gendisk *disk[NWD];
3053 int i, n;
3054 for (n = 0; n < NWD; n++) {
3055 disk[n] = alloc_disk(1 << NWD_SHIFT);
3056 if (!disk[n])
3057 goto out;
3058 }
3059
3060 for(i=0; i< MAX_CTLR; i++) {
3061 if (!hba[i]) {
3062 ctlr_info_t *p;
06ff37ff 3063 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
1da177e4
LT
3064 if (!p)
3065 goto Enomem;
1da177e4
LT
3066 for (n = 0; n < NWD; n++)
3067 p->gendisk[n] = disk[n];
3068 hba[i] = p;
3069 return i;
3070 }
3071 }
3072 printk(KERN_WARNING "cciss: This driver supports a maximum"
3073 " of %d controllers.\n", MAX_CTLR);
3074 goto out;
3075Enomem:
3076 printk(KERN_ERR "cciss: out of memory.\n");
3077out:
3078 while (n--)
3079 put_disk(disk[n]);
3080 return -1;
3081}
3082
3083static void free_hba(int i)
3084{
3085 ctlr_info_t *p = hba[i];
3086 int n;
3087
3088 hba[i] = NULL;
3089 for (n = 0; n < NWD; n++)
3090 put_disk(p->gendisk[n]);
3091 kfree(p);
3092}
3093
3094/*
3095 * This is it. Find all the controllers and register them. I really hate
3096 * stealing all these major device numbers.
3097 * returns the number of block devices registered.
3098 */
3099static int __devinit cciss_init_one(struct pci_dev *pdev,
3100 const struct pci_device_id *ent)
3101{
3102 request_queue_t *q;
3103 int i;
3104 int j;
3105 int rc;
3106
3107 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3108 " bus %d dev %d func %d\n",
3109 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3110 PCI_FUNC(pdev->devfn));
3111 i = alloc_cciss_hba();
3112 if(i < 0)
3113 return (-1);
1f8ef380
MM
3114
3115 hba[i]->busy_initializing = 1;
3116
1da177e4
LT
3117 if (cciss_pci_init(hba[i], pdev) != 0)
3118 goto clean1;
3119
3120 sprintf(hba[i]->devname, "cciss%d", i);
3121 hba[i]->ctlr = i;
3122 hba[i]->pdev = pdev;
3123
3124 /* configure PCI DMA stuff */
eb0df996 3125 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1da177e4 3126 printk("cciss: using DAC cycles\n");
eb0df996 3127 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1da177e4
LT
3128 printk("cciss: not using DAC cycles\n");
3129 else {
3130 printk("cciss: no suitable DMA available\n");
3131 goto clean1;
3132 }
3133
3134 /*
3135 * register with the major number, or get a dynamic major number
3136 * by passing 0 as argument. This is done for greater than
3137 * 8 controller support.
3138 */
3139 if (i < MAX_CTLR_ORIG)
564de74a 3140 hba[i]->major = COMPAQ_CISS_MAJOR + i;
1da177e4
LT
3141 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3142 if(rc == -EBUSY || rc == -EINVAL) {
3143 printk(KERN_ERR
3144 "cciss: Unable to get major number %d for %s "
3145 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3146 goto clean1;
3147 }
3148 else {
3149 if (i >= MAX_CTLR_ORIG)
3150 hba[i]->major = rc;
3151 }
3152
3153 /* make sure the board interrupts are off */
3154 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
fb86a35b 3155 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
8bd0b97e 3156 SA_INTERRUPT | SA_SHIRQ, hba[i]->devname, hba[i])) {
1da177e4 3157 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
fb86a35b 3158 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
1da177e4
LT
3159 goto clean2;
3160 }
3161 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3162 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3163 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3164 &(hba[i]->cmd_pool_dhandle));
3165 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3166 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3167 &(hba[i]->errinfo_pool_dhandle));
3168 if((hba[i]->cmd_pool_bits == NULL)
3169 || (hba[i]->cmd_pool == NULL)
3170 || (hba[i]->errinfo_pool == NULL)) {
3171 printk( KERN_ERR "cciss: out of memory");
3172 goto clean4;
3173 }
3da8b713 3174#ifdef CONFIG_CISS_SCSI_TAPE
3175 hba[i]->scsi_rejects.complete =
3176 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3177 (NR_CMDS + 5), GFP_KERNEL);
3178 if (hba[i]->scsi_rejects.complete == NULL) {
3179 printk( KERN_ERR "cciss: out of memory");
3180 goto clean4;
3181 }
3182#endif
1da177e4 3183 spin_lock_init(&hba[i]->lock);
1da177e4
LT
3184
3185 /* Initialize the pdev driver private data.
3186 have it point to hba[i]. */
3187 pci_set_drvdata(pdev, hba[i]);
3188 /* command and error info recs zeroed out before
3189 they are used */
3190 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3191
3192#ifdef CCISS_DEBUG
3193 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3194#endif /* CCISS_DEBUG */
3195
3196 cciss_getgeometry(i);
3197
3198 cciss_scsi_setup(i);
3199
3200 /* Turn the interrupts on so we can service requests */
3201 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3202
3203 cciss_procinit(i);
d6dbf42e 3204 hba[i]->busy_initializing = 0;
1da177e4 3205
ad2b9312
MM
3206 for(j=0; j < NWD; j++) { /* mfm */
3207 drive_info_struct *drv = &(hba[i]->drv[j]);
3208 struct gendisk *disk = hba[i]->gendisk[j];
3209
3210 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3211 if (!q) {
3212 printk(KERN_ERR
3213 "cciss: unable to allocate queue for disk %d\n",
3214 j);
3215 break;
3216 }
3217 drv->queue = q;
3218
3219 q->backing_dev_info.ra_pages = READ_AHEAD;
a9925a06
JA
3220 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3221
3222 /* This is a hardware imposed limit. */
3223 blk_queue_max_hw_segments(q, MAXSGENTRIES);
1da177e4 3224
a9925a06
JA
3225 /* This is a limit in the driver and could be eliminated. */
3226 blk_queue_max_phys_segments(q, MAXSGENTRIES);
1da177e4 3227
a9925a06 3228 blk_queue_max_sectors(q, 512);
1da177e4 3229
a9925a06 3230 blk_queue_softirq_done(q, cciss_softirq_done);
1da177e4 3231
ad2b9312 3232 q->queuedata = hba[i];
1da177e4
LT
3233 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3234 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3235 disk->major = hba[i]->major;
3236 disk->first_minor = j << NWD_SHIFT;
3237 disk->fops = &cciss_fops;
ad2b9312 3238 disk->queue = q;
1da177e4 3239 disk->private_data = drv;
27c0ff86 3240 disk->driverfs_dev = &pdev->dev;
1da177e4
LT
3241 /* we must register the controller even if no disks exist */
3242 /* this is for the online array utilities */
3243 if(!drv->heads && j)
3244 continue;
ad2b9312 3245 blk_queue_hardsect_size(q, drv->block_size);
1da177e4
LT
3246 set_capacity(disk, drv->nr_blocks);
3247 add_disk(disk);
3248 }
ad2b9312 3249
1da177e4
LT
3250 return(1);
3251
3252clean4:
3da8b713 3253#ifdef CONFIG_CISS_SCSI_TAPE
1acc0b0b 3254 kfree(hba[i]->scsi_rejects.complete);
3da8b713 3255#endif
6044ec88 3256 kfree(hba[i]->cmd_pool_bits);
1da177e4
LT
3257 if(hba[i]->cmd_pool)
3258 pci_free_consistent(hba[i]->pdev,
3259 NR_CMDS * sizeof(CommandList_struct),
3260 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3261 if(hba[i]->errinfo_pool)
3262 pci_free_consistent(hba[i]->pdev,
3263 NR_CMDS * sizeof( ErrorInfo_struct),
3264 hba[i]->errinfo_pool,
3265 hba[i]->errinfo_pool_dhandle);
fb86a35b 3266 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
1da177e4
LT
3267clean2:
3268 unregister_blkdev(hba[i]->major, hba[i]->devname);
3269clean1:
3270 release_io_mem(hba[i]);
1f8ef380 3271 hba[i]->busy_initializing = 0;
61808c2b 3272 free_hba(i);
1da177e4
LT
3273 return(-1);
3274}
3275
3276static void __devexit cciss_remove_one (struct pci_dev *pdev)
3277{
3278 ctlr_info_t *tmp_ptr;
3279 int i, j;
3280 char flush_buf[4];
3281 int return_code;
3282
3283 if (pci_get_drvdata(pdev) == NULL)
3284 {
3285 printk( KERN_ERR "cciss: Unable to remove device \n");
3286 return;
3287 }
3288 tmp_ptr = pci_get_drvdata(pdev);
3289 i = tmp_ptr->ctlr;
3290 if (hba[i] == NULL)
3291 {
3292 printk(KERN_ERR "cciss: device appears to "
3293 "already be removed \n");
3294 return;
3295 }
3296 /* Turn board interrupts off and send the flush cache command */
3297 /* sendcmd will turn off interrupt, and send the flush...
3298 * To write all data in the battery backed cache to disks */
3299 memset(flush_buf, 0, 4);
3300 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3301 TYPE_CMD);
3302 if(return_code != IO_OK)
3303 {
3304 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3305 i);
3306 }
fb86a35b
MM
3307 free_irq(hba[i]->intr[2], hba[i]);
3308
3309#ifdef CONFIG_PCI_MSI
3310 if (hba[i]->msix_vector)
3311 pci_disable_msix(hba[i]->pdev);
3312 else if (hba[i]->msi_vector)
3313 pci_disable_msi(hba[i]->pdev);
3314#endif /* CONFIG_PCI_MSI */
3315
1da177e4
LT
3316 pci_set_drvdata(pdev, NULL);
3317 iounmap(hba[i]->vaddr);
3318 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3319 unregister_blkdev(hba[i]->major, hba[i]->devname);
3320 remove_proc_entry(hba[i]->devname, proc_cciss);
3321
3322 /* remove it from the disk list */
3323 for (j = 0; j < NWD; j++) {
3324 struct gendisk *disk = hba[i]->gendisk[j];
6f5a0f7c 3325 if (disk) {
3326 request_queue_t *q = disk->queue;
3327
3328 if (disk->flags & GENHD_FL_UP)
3329 del_gendisk(disk);
3330 if (q)
3331 blk_cleanup_queue(q);
6a445d3b 3332 }
1da177e4
LT
3333 }
3334
1da177e4
LT
3335 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3336 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3337 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3338 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3339 kfree(hba[i]->cmd_pool_bits);
3da8b713 3340#ifdef CONFIG_CISS_SCSI_TAPE
3341 kfree(hba[i]->scsi_rejects.complete);
3342#endif
1da177e4
LT
3343 release_io_mem(hba[i]);
3344 free_hba(i);
3345}
3346
3347static struct pci_driver cciss_pci_driver = {
3348 .name = "cciss",
3349 .probe = cciss_init_one,
3350 .remove = __devexit_p(cciss_remove_one),
3351 .id_table = cciss_pci_device_id, /* id_table */
3352};
3353
3354/*
3355 * This is it. Register the PCI driver information for the cards we control
3356 * the OS will call our registered routines when it finds one of our cards.
3357 */
3358static int __init cciss_init(void)
3359{
3360 printk(KERN_INFO DRIVER_NAME "\n");
3361
3362 /* Register for our PCI devices */
9bfab8ce 3363 return pci_register_driver(&cciss_pci_driver);
1da177e4
LT
3364}
3365
3366static void __exit cciss_cleanup(void)
3367{
3368 int i;
3369
3370 pci_unregister_driver(&cciss_pci_driver);
3371 /* double check that all controller entrys have been removed */
3372 for (i=0; i< MAX_CTLR; i++)
3373 {
3374 if (hba[i] != NULL)
3375 {
3376 printk(KERN_WARNING "cciss: had to remove"
3377 " controller %d\n", i);
3378 cciss_remove_one(hba[i]->pdev);
3379 }
3380 }
3381 remove_proc_entry("cciss", proc_root_driver);
3382}
3383
33079b21
MM
3384static void fail_all_cmds(unsigned long ctlr)
3385{
3386 /* If we get here, the board is apparently dead. */
3387 ctlr_info_t *h = hba[ctlr];
3388 CommandList_struct *c;
3389 unsigned long flags;
3390
3391 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3392 h->alive = 0; /* the controller apparently died... */
3393
3394 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3395
3396 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3397
3398 /* move everything off the request queue onto the completed queue */
3399 while( (c = h->reqQ) != NULL ) {
3400 removeQ(&(h->reqQ), c);
3401 h->Qdepth--;
3402 addQ (&(h->cmpQ), c);
3403 }
3404
3405 /* Now, fail everything on the completed queue with a HW error */
3406 while( (c = h->cmpQ) != NULL ) {
3407 removeQ(&h->cmpQ, c);
3408 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3409 if (c->cmd_type == CMD_RWREQ) {
3410 complete_command(h, c, 0);
3411 } else if (c->cmd_type == CMD_IOCTL_PEND)
3412 complete(c->waiting);
3413#ifdef CONFIG_CISS_SCSI_TAPE
3414 else if (c->cmd_type == CMD_SCSI)
3415 complete_scsi_command(c, 0, 0);
3416#endif
3417 }
3418 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3419 return;
3420}
3421
1da177e4
LT
3422module_init(cciss_init);
3423module_exit(cciss_cleanup);