]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/block/cciss.c
[PATCH] Add block_device_operations.getgeo block device method
[mirror_ubuntu-zesty-kernel.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
67 0x0E11, 0x4070, 0, 0, 0},
68 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
69 0x0E11, 0x4080, 0, 0, 0},
70 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
71 0x0E11, 0x4082, 0, 0, 0},
72 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
73 0x0E11, 0x4083, 0, 0, 0},
74 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
75 0x0E11, 0x409A, 0, 0, 0},
76 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
77 0x0E11, 0x409B, 0, 0, 0},
78 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
79 0x0E11, 0x409C, 0, 0, 0},
80 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
81 0x0E11, 0x409D, 0, 0, 0},
82 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
102 {0,}
103 };
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
105
106 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
107
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
111 */
112 static struct board_type products[] = {
113 { 0x40700E11, "Smart Array 5300", &SA5_access },
114 { 0x40800E11, "Smart Array 5i", &SA5B_access},
115 { 0x40820E11, "Smart Array 532", &SA5B_access},
116 { 0x40830E11, "Smart Array 5312", &SA5B_access},
117 { 0x409A0E11, "Smart Array 641", &SA5_access},
118 { 0x409B0E11, "Smart Array 642", &SA5_access},
119 { 0x409C0E11, "Smart Array 6400", &SA5_access},
120 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 { 0x40910E11, "Smart Array 6i", &SA5_access},
122 { 0x3225103C, "Smart Array P600", &SA5_access},
123 { 0x3223103C, "Smart Array P800", &SA5_access},
124 { 0x3234103C, "Smart Array P400", &SA5_access},
125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
131 };
132
133 /* How long to wait (in millesconds) for board to go into simple mode */
134 #define MAX_CONFIG_WAIT 30000
135 #define MAX_IOCTL_CONFIG_WAIT 1000
136
137 /*define how many times we will try a command because of bus resets */
138 #define MAX_CMD_RETRIES 3
139
140 #define READ_AHEAD 1024
141 #define NR_CMDS 384 /* #commands that can be outstanding */
142 #define MAX_CTLR 32
143
144 /* Originally cciss driver only supports 8 major numbers */
145 #define MAX_CTLR_ORIG 8
146
147
148 static ctlr_info_t *hba[MAX_CTLR];
149
150 static void do_cciss_request(request_queue_t *q);
151 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
152 static int cciss_open(struct inode *inode, struct file *filep);
153 static int cciss_release(struct inode *inode, struct file *filep);
154 static int cciss_ioctl(struct inode *inode, struct file *filep,
155 unsigned int cmd, unsigned long arg);
156 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
157
158 static int revalidate_allvol(ctlr_info_t *host);
159 static int cciss_revalidate(struct gendisk *disk);
160 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
161 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
162
163 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
164 int withirq, unsigned int *total_size, unsigned int *block_size);
165 static void cciss_geometry_inquiry(int ctlr, int logvol,
166 int withirq, unsigned int total_size,
167 unsigned int block_size, InquiryData_struct *inq_buff,
168 drive_info_struct *drv);
169 static void cciss_getgeometry(int cntl_num);
170
171 static void start_io( ctlr_info_t *h);
172 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
173 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
174 unsigned char *scsi3addr, int cmd_type);
175 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
176 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
177 int cmd_type);
178
179 static void fail_all_cmds(unsigned long ctlr);
180
181 #ifdef CONFIG_PROC_FS
182 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
183 int length, int *eof, void *data);
184 static void cciss_procinit(int i);
185 #else
186 static void cciss_procinit(int i) {}
187 #endif /* CONFIG_PROC_FS */
188
189 #ifdef CONFIG_COMPAT
190 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
191 #endif
192
193 static struct block_device_operations cciss_fops = {
194 .owner = THIS_MODULE,
195 .open = cciss_open,
196 .release = cciss_release,
197 .ioctl = cciss_ioctl,
198 .getgeo = cciss_getgeo,
199 #ifdef CONFIG_COMPAT
200 .compat_ioctl = cciss_compat_ioctl,
201 #endif
202 .revalidate_disk= cciss_revalidate,
203 };
204
205 /*
206 * Enqueuing and dequeuing functions for cmdlists.
207 */
208 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
209 {
210 if (*Qptr == NULL) {
211 *Qptr = c;
212 c->next = c->prev = c;
213 } else {
214 c->prev = (*Qptr)->prev;
215 c->next = (*Qptr);
216 (*Qptr)->prev->next = c;
217 (*Qptr)->prev = c;
218 }
219 }
220
221 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
222 CommandList_struct *c)
223 {
224 if (c && c->next != c) {
225 if (*Qptr == c) *Qptr = c->next;
226 c->prev->next = c->next;
227 c->next->prev = c->prev;
228 } else {
229 *Qptr = NULL;
230 }
231 return c;
232 }
233
234 #include "cciss_scsi.c" /* For SCSI tape support */
235
236 #ifdef CONFIG_PROC_FS
237
238 /*
239 * Report information about this controller.
240 */
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 #define RAID_UNKNOWN 6
244 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
245 "UNKNOWN"};
246
247 static struct proc_dir_entry *proc_cciss;
248
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
251 {
252 off_t pos = 0;
253 off_t len = 0;
254 int size, i, ctlr;
255 ctlr_info_t *h = (ctlr_info_t*)data;
256 drive_info_struct *drv;
257 unsigned long flags;
258 sector_t vol_sz, vol_sz_frac;
259
260 ctlr = h->ctlr;
261
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
264 */
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY;
269 }
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
272
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Current Q depth: %d\n"
279 "Current # commands on controller: %d\n"
280 "Max Q depth since init: %d\n"
281 "Max # commands on controller since init: %d\n"
282 "Max SG entries since init: %d\n\n",
283 h->devname,
284 h->product_name,
285 (unsigned long)h->board_id,
286 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
287 (unsigned int)h->intr,
288 h->num_luns,
289 h->Qdepth, h->commands_outstanding,
290 h->maxQsinceinit, h->max_outstanding, h->maxSG);
291
292 pos += size; len += size;
293 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
294 for(i=0; i<=h->highest_lun; i++) {
295
296 drv = &h->drv[i];
297 if (drv->heads == 0)
298 continue;
299
300 vol_sz = drv->nr_blocks;
301 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
302 vol_sz_frac *= 100;
303 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
304
305 if (drv->raid_level > 5)
306 drv->raid_level = RAID_UNKNOWN;
307 size = sprintf(buffer+len, "cciss/c%dd%d:"
308 "\t%4u.%02uGB\tRAID %s\n",
309 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
310 raid_label[drv->raid_level]);
311 pos += size; len += size;
312 }
313
314 *eof = 1;
315 *start = buffer+offset;
316 len -= offset;
317 if (len>length)
318 len = length;
319 h->busy_configuring = 0;
320 return len;
321 }
322
323 static int
324 cciss_proc_write(struct file *file, const char __user *buffer,
325 unsigned long count, void *data)
326 {
327 unsigned char cmd[80];
328 int len;
329 #ifdef CONFIG_CISS_SCSI_TAPE
330 ctlr_info_t *h = (ctlr_info_t *) data;
331 int rc;
332 #endif
333
334 if (count > sizeof(cmd)-1) return -EINVAL;
335 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
336 cmd[count] = '\0';
337 len = strlen(cmd); // above 3 lines ensure safety
338 if (len && cmd[len-1] == '\n')
339 cmd[--len] = '\0';
340 # ifdef CONFIG_CISS_SCSI_TAPE
341 if (strcmp("engage scsi", cmd)==0) {
342 rc = cciss_engage_scsi(h->ctlr);
343 if (rc != 0) return -rc;
344 return count;
345 }
346 /* might be nice to have "disengage" too, but it's not
347 safely possible. (only 1 module use count, lock issues.) */
348 # endif
349 return -EINVAL;
350 }
351
352 /*
353 * Get us a file in /proc/cciss that says something about each controller.
354 * Create /proc/cciss if it doesn't exist yet.
355 */
356 static void __devinit cciss_procinit(int i)
357 {
358 struct proc_dir_entry *pde;
359
360 if (proc_cciss == NULL) {
361 proc_cciss = proc_mkdir("cciss", proc_root_driver);
362 if (!proc_cciss)
363 return;
364 }
365
366 pde = create_proc_read_entry(hba[i]->devname,
367 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
368 proc_cciss, cciss_proc_get_info, hba[i]);
369 pde->write_proc = cciss_proc_write;
370 }
371 #endif /* CONFIG_PROC_FS */
372
373 /*
374 * For operations that cannot sleep, a command block is allocated at init,
375 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
376 * which ones are free or in use. For operations that can wait for kmalloc
377 * to possible sleep, this routine can be called with get_from_pool set to 0.
378 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
379 */
380 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
381 {
382 CommandList_struct *c;
383 int i;
384 u64bit temp64;
385 dma_addr_t cmd_dma_handle, err_dma_handle;
386
387 if (!get_from_pool)
388 {
389 c = (CommandList_struct *) pci_alloc_consistent(
390 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
391 if(c==NULL)
392 return NULL;
393 memset(c, 0, sizeof(CommandList_struct));
394
395 c->cmdindex = -1;
396
397 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
398 h->pdev, sizeof(ErrorInfo_struct),
399 &err_dma_handle);
400
401 if (c->err_info == NULL)
402 {
403 pci_free_consistent(h->pdev,
404 sizeof(CommandList_struct), c, cmd_dma_handle);
405 return NULL;
406 }
407 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
408 } else /* get it out of the controllers pool */
409 {
410 do {
411 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
412 if (i == NR_CMDS)
413 return NULL;
414 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
415 #ifdef CCISS_DEBUG
416 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
417 #endif
418 c = h->cmd_pool + i;
419 memset(c, 0, sizeof(CommandList_struct));
420 cmd_dma_handle = h->cmd_pool_dhandle
421 + i*sizeof(CommandList_struct);
422 c->err_info = h->errinfo_pool + i;
423 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
424 err_dma_handle = h->errinfo_pool_dhandle
425 + i*sizeof(ErrorInfo_struct);
426 h->nr_allocs++;
427
428 c->cmdindex = i;
429 }
430
431 c->busaddr = (__u32) cmd_dma_handle;
432 temp64.val = (__u64) err_dma_handle;
433 c->ErrDesc.Addr.lower = temp64.val32.lower;
434 c->ErrDesc.Addr.upper = temp64.val32.upper;
435 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
436
437 c->ctlr = h->ctlr;
438 return c;
439
440
441 }
442
443 /*
444 * Frees a command block that was previously allocated with cmd_alloc().
445 */
446 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
447 {
448 int i;
449 u64bit temp64;
450
451 if( !got_from_pool)
452 {
453 temp64.val32.lower = c->ErrDesc.Addr.lower;
454 temp64.val32.upper = c->ErrDesc.Addr.upper;
455 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
456 c->err_info, (dma_addr_t) temp64.val);
457 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
458 c, (dma_addr_t) c->busaddr);
459 } else
460 {
461 i = c - h->cmd_pool;
462 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
463 h->nr_frees++;
464 }
465 }
466
467 static inline ctlr_info_t *get_host(struct gendisk *disk)
468 {
469 return disk->queue->queuedata;
470 }
471
472 static inline drive_info_struct *get_drv(struct gendisk *disk)
473 {
474 return disk->private_data;
475 }
476
477 /*
478 * Open. Make sure the device is really there.
479 */
480 static int cciss_open(struct inode *inode, struct file *filep)
481 {
482 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
483 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
484
485 #ifdef CCISS_DEBUG
486 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
487 #endif /* CCISS_DEBUG */
488
489 if (host->busy_initializing || drv->busy_configuring)
490 return -EBUSY;
491 /*
492 * Root is allowed to open raw volume zero even if it's not configured
493 * so array config can still work. Root is also allowed to open any
494 * volume that has a LUN ID, so it can issue IOCTL to reread the
495 * disk information. I don't think I really like this
496 * but I'm already using way to many device nodes to claim another one
497 * for "raw controller".
498 */
499 if (drv->nr_blocks == 0) {
500 if (iminor(inode) != 0) { /* not node 0? */
501 /* if not node 0 make sure it is a partition = 0 */
502 if (iminor(inode) & 0x0f) {
503 return -ENXIO;
504 /* if it is, make sure we have a LUN ID */
505 } else if (drv->LunID == 0) {
506 return -ENXIO;
507 }
508 }
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
511 }
512 drv->usage_count++;
513 host->usage_count++;
514 return 0;
515 }
516 /*
517 * Close. Sync first.
518 */
519 static int cciss_release(struct inode *inode, struct file *filep)
520 {
521 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
522 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
523
524 #ifdef CCISS_DEBUG
525 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
526 #endif /* CCISS_DEBUG */
527
528 drv->usage_count--;
529 host->usage_count--;
530 return 0;
531 }
532
533 #ifdef CONFIG_COMPAT
534
535 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
536 {
537 int ret;
538 lock_kernel();
539 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
540 unlock_kernel();
541 return ret;
542 }
543
544 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
545 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
546
547 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
548 {
549 switch (cmd) {
550 case CCISS_GETPCIINFO:
551 case CCISS_GETINTINFO:
552 case CCISS_SETINTINFO:
553 case CCISS_GETNODENAME:
554 case CCISS_SETNODENAME:
555 case CCISS_GETHEARTBEAT:
556 case CCISS_GETBUSTYPES:
557 case CCISS_GETFIRMVER:
558 case CCISS_GETDRIVVER:
559 case CCISS_REVALIDVOLS:
560 case CCISS_DEREGDISK:
561 case CCISS_REGNEWDISK:
562 case CCISS_REGNEWD:
563 case CCISS_RESCANDISK:
564 case CCISS_GETLUNINFO:
565 return do_ioctl(f, cmd, arg);
566
567 case CCISS_PASSTHRU32:
568 return cciss_ioctl32_passthru(f, cmd, arg);
569 case CCISS_BIG_PASSTHRU32:
570 return cciss_ioctl32_big_passthru(f, cmd, arg);
571
572 default:
573 return -ENOIOCTLCMD;
574 }
575 }
576
577 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
578 {
579 IOCTL32_Command_struct __user *arg32 =
580 (IOCTL32_Command_struct __user *) arg;
581 IOCTL_Command_struct arg64;
582 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
583 int err;
584 u32 cp;
585
586 err = 0;
587 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
588 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
589 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
590 err |= get_user(arg64.buf_size, &arg32->buf_size);
591 err |= get_user(cp, &arg32->buf);
592 arg64.buf = compat_ptr(cp);
593 err |= copy_to_user(p, &arg64, sizeof(arg64));
594
595 if (err)
596 return -EFAULT;
597
598 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
599 if (err)
600 return err;
601 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
602 if (err)
603 return -EFAULT;
604 return err;
605 }
606
607 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
608 {
609 BIG_IOCTL32_Command_struct __user *arg32 =
610 (BIG_IOCTL32_Command_struct __user *) arg;
611 BIG_IOCTL_Command_struct arg64;
612 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
613 int err;
614 u32 cp;
615
616 err = 0;
617 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
618 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
619 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
620 err |= get_user(arg64.buf_size, &arg32->buf_size);
621 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
622 err |= get_user(cp, &arg32->buf);
623 arg64.buf = compat_ptr(cp);
624 err |= copy_to_user(p, &arg64, sizeof(arg64));
625
626 if (err)
627 return -EFAULT;
628
629 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
630 if (err)
631 return err;
632 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
633 if (err)
634 return -EFAULT;
635 return err;
636 }
637 #endif
638
639 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
640 {
641 drive_info_struct *drv = get_drv(bdev->bd_disk);
642
643 if (!drv->cylinders)
644 return -ENXIO;
645
646 geo->heads = drv->heads;
647 geo->sectors = drv->sectors;
648 geo->cylinders = drv->cylinders;
649 return 0;
650 }
651
652 /*
653 * ioctl
654 */
655 static int cciss_ioctl(struct inode *inode, struct file *filep,
656 unsigned int cmd, unsigned long arg)
657 {
658 struct block_device *bdev = inode->i_bdev;
659 struct gendisk *disk = bdev->bd_disk;
660 ctlr_info_t *host = get_host(disk);
661 drive_info_struct *drv = get_drv(disk);
662 int ctlr = host->ctlr;
663 void __user *argp = (void __user *)arg;
664
665 #ifdef CCISS_DEBUG
666 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
667 #endif /* CCISS_DEBUG */
668
669 switch(cmd) {
670 case CCISS_GETPCIINFO:
671 {
672 cciss_pci_info_struct pciinfo;
673
674 if (!arg) return -EINVAL;
675 pciinfo.domain = pci_domain_nr(host->pdev->bus);
676 pciinfo.bus = host->pdev->bus->number;
677 pciinfo.dev_fn = host->pdev->devfn;
678 pciinfo.board_id = host->board_id;
679 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
680 return -EFAULT;
681 return(0);
682 }
683 case CCISS_GETINTINFO:
684 {
685 cciss_coalint_struct intinfo;
686 if (!arg) return -EINVAL;
687 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
688 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
689 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
690 return -EFAULT;
691 return(0);
692 }
693 case CCISS_SETINTINFO:
694 {
695 cciss_coalint_struct intinfo;
696 unsigned long flags;
697 int i;
698
699 if (!arg) return -EINVAL;
700 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
701 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
702 return -EFAULT;
703 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
704
705 {
706 // printk("cciss_ioctl: delay and count cannot be 0\n");
707 return( -EINVAL);
708 }
709 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
710 /* Update the field, and then ring the doorbell */
711 writel( intinfo.delay,
712 &(host->cfgtable->HostWrite.CoalIntDelay));
713 writel( intinfo.count,
714 &(host->cfgtable->HostWrite.CoalIntCount));
715 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
716
717 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
718 if (!(readl(host->vaddr + SA5_DOORBELL)
719 & CFGTBL_ChangeReq))
720 break;
721 /* delay and try again */
722 udelay(1000);
723 }
724 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
725 if (i >= MAX_IOCTL_CONFIG_WAIT)
726 return -EAGAIN;
727 return(0);
728 }
729 case CCISS_GETNODENAME:
730 {
731 NodeName_type NodeName;
732 int i;
733
734 if (!arg) return -EINVAL;
735 for(i=0;i<16;i++)
736 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
737 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
738 return -EFAULT;
739 return(0);
740 }
741 case CCISS_SETNODENAME:
742 {
743 NodeName_type NodeName;
744 unsigned long flags;
745 int i;
746
747 if (!arg) return -EINVAL;
748 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
749
750 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
751 return -EFAULT;
752
753 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
754
755 /* Update the field, and then ring the doorbell */
756 for(i=0;i<16;i++)
757 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
758
759 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
760
761 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
762 if (!(readl(host->vaddr + SA5_DOORBELL)
763 & CFGTBL_ChangeReq))
764 break;
765 /* delay and try again */
766 udelay(1000);
767 }
768 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
769 if (i >= MAX_IOCTL_CONFIG_WAIT)
770 return -EAGAIN;
771 return(0);
772 }
773
774 case CCISS_GETHEARTBEAT:
775 {
776 Heartbeat_type heartbeat;
777
778 if (!arg) return -EINVAL;
779 heartbeat = readl(&host->cfgtable->HeartBeat);
780 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
781 return -EFAULT;
782 return(0);
783 }
784 case CCISS_GETBUSTYPES:
785 {
786 BusTypes_type BusTypes;
787
788 if (!arg) return -EINVAL;
789 BusTypes = readl(&host->cfgtable->BusTypes);
790 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
791 return -EFAULT;
792 return(0);
793 }
794 case CCISS_GETFIRMVER:
795 {
796 FirmwareVer_type firmware;
797
798 if (!arg) return -EINVAL;
799 memcpy(firmware, host->firm_ver, 4);
800
801 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
802 return -EFAULT;
803 return(0);
804 }
805 case CCISS_GETDRIVVER:
806 {
807 DriverVer_type DriverVer = DRIVER_VERSION;
808
809 if (!arg) return -EINVAL;
810
811 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
812 return -EFAULT;
813 return(0);
814 }
815
816 case CCISS_REVALIDVOLS:
817 if (bdev != bdev->bd_contains || drv != host->drv)
818 return -ENXIO;
819 return revalidate_allvol(host);
820
821 case CCISS_GETLUNINFO: {
822 LogvolInfo_struct luninfo;
823
824 luninfo.LunID = drv->LunID;
825 luninfo.num_opens = drv->usage_count;
826 luninfo.num_parts = 0;
827 if (copy_to_user(argp, &luninfo,
828 sizeof(LogvolInfo_struct)))
829 return -EFAULT;
830 return(0);
831 }
832 case CCISS_DEREGDISK:
833 return rebuild_lun_table(host, disk);
834
835 case CCISS_REGNEWD:
836 return rebuild_lun_table(host, NULL);
837
838 case CCISS_PASSTHRU:
839 {
840 IOCTL_Command_struct iocommand;
841 CommandList_struct *c;
842 char *buff = NULL;
843 u64bit temp64;
844 unsigned long flags;
845 DECLARE_COMPLETION(wait);
846
847 if (!arg) return -EINVAL;
848
849 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
850
851 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
852 return -EFAULT;
853 if((iocommand.buf_size < 1) &&
854 (iocommand.Request.Type.Direction != XFER_NONE))
855 {
856 return -EINVAL;
857 }
858 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
859 /* Check kmalloc limits */
860 if(iocommand.buf_size > 128000)
861 return -EINVAL;
862 #endif
863 if(iocommand.buf_size > 0)
864 {
865 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
866 if( buff == NULL)
867 return -EFAULT;
868 }
869 if (iocommand.Request.Type.Direction == XFER_WRITE)
870 {
871 /* Copy the data into the buffer we created */
872 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
873 {
874 kfree(buff);
875 return -EFAULT;
876 }
877 } else {
878 memset(buff, 0, iocommand.buf_size);
879 }
880 if ((c = cmd_alloc(host , 0)) == NULL)
881 {
882 kfree(buff);
883 return -ENOMEM;
884 }
885 // Fill in the command type
886 c->cmd_type = CMD_IOCTL_PEND;
887 // Fill in Command Header
888 c->Header.ReplyQueue = 0; // unused in simple mode
889 if( iocommand.buf_size > 0) // buffer to fill
890 {
891 c->Header.SGList = 1;
892 c->Header.SGTotal= 1;
893 } else // no buffers to fill
894 {
895 c->Header.SGList = 0;
896 c->Header.SGTotal= 0;
897 }
898 c->Header.LUN = iocommand.LUN_info;
899 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
900
901 // Fill in Request block
902 c->Request = iocommand.Request;
903
904 // Fill in the scatter gather information
905 if (iocommand.buf_size > 0 )
906 {
907 temp64.val = pci_map_single( host->pdev, buff,
908 iocommand.buf_size,
909 PCI_DMA_BIDIRECTIONAL);
910 c->SG[0].Addr.lower = temp64.val32.lower;
911 c->SG[0].Addr.upper = temp64.val32.upper;
912 c->SG[0].Len = iocommand.buf_size;
913 c->SG[0].Ext = 0; // we are not chaining
914 }
915 c->waiting = &wait;
916
917 /* Put the request on the tail of the request queue */
918 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
919 addQ(&host->reqQ, c);
920 host->Qdepth++;
921 start_io(host);
922 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
923
924 wait_for_completion(&wait);
925
926 /* unlock the buffers from DMA */
927 temp64.val32.lower = c->SG[0].Addr.lower;
928 temp64.val32.upper = c->SG[0].Addr.upper;
929 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
930 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
931
932 /* Copy the error information out */
933 iocommand.error_info = *(c->err_info);
934 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
935 {
936 kfree(buff);
937 cmd_free(host, c, 0);
938 return( -EFAULT);
939 }
940
941 if (iocommand.Request.Type.Direction == XFER_READ)
942 {
943 /* Copy the data out of the buffer we created */
944 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
945 {
946 kfree(buff);
947 cmd_free(host, c, 0);
948 return -EFAULT;
949 }
950 }
951 kfree(buff);
952 cmd_free(host, c, 0);
953 return(0);
954 }
955 case CCISS_BIG_PASSTHRU: {
956 BIG_IOCTL_Command_struct *ioc;
957 CommandList_struct *c;
958 unsigned char **buff = NULL;
959 int *buff_size = NULL;
960 u64bit temp64;
961 unsigned long flags;
962 BYTE sg_used = 0;
963 int status = 0;
964 int i;
965 DECLARE_COMPLETION(wait);
966 __u32 left;
967 __u32 sz;
968 BYTE __user *data_ptr;
969
970 if (!arg)
971 return -EINVAL;
972 if (!capable(CAP_SYS_RAWIO))
973 return -EPERM;
974 ioc = (BIG_IOCTL_Command_struct *)
975 kmalloc(sizeof(*ioc), GFP_KERNEL);
976 if (!ioc) {
977 status = -ENOMEM;
978 goto cleanup1;
979 }
980 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
981 status = -EFAULT;
982 goto cleanup1;
983 }
984 if ((ioc->buf_size < 1) &&
985 (ioc->Request.Type.Direction != XFER_NONE)) {
986 status = -EINVAL;
987 goto cleanup1;
988 }
989 /* Check kmalloc limits using all SGs */
990 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
991 status = -EINVAL;
992 goto cleanup1;
993 }
994 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
995 status = -EINVAL;
996 goto cleanup1;
997 }
998 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
999 sizeof(char *), GFP_KERNEL);
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1003 }
1004 memset(buff, 0, MAXSGENTRIES);
1005 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1006 GFP_KERNEL);
1007 if (!buff_size) {
1008 status = -ENOMEM;
1009 goto cleanup1;
1010 }
1011 left = ioc->buf_size;
1012 data_ptr = ioc->buf;
1013 while (left) {
1014 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1015 buff_size[sg_used] = sz;
1016 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1017 if (buff[sg_used] == NULL) {
1018 status = -ENOMEM;
1019 goto cleanup1;
1020 }
1021 if (ioc->Request.Type.Direction == XFER_WRITE) {
1022 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1023 status = -ENOMEM;
1024 goto cleanup1;
1025 }
1026 } else {
1027 memset(buff[sg_used], 0, sz);
1028 }
1029 left -= sz;
1030 data_ptr += sz;
1031 sg_used++;
1032 }
1033 if ((c = cmd_alloc(host , 0)) == NULL) {
1034 status = -ENOMEM;
1035 goto cleanup1;
1036 }
1037 c->cmd_type = CMD_IOCTL_PEND;
1038 c->Header.ReplyQueue = 0;
1039
1040 if( ioc->buf_size > 0) {
1041 c->Header.SGList = sg_used;
1042 c->Header.SGTotal= sg_used;
1043 } else {
1044 c->Header.SGList = 0;
1045 c->Header.SGTotal= 0;
1046 }
1047 c->Header.LUN = ioc->LUN_info;
1048 c->Header.Tag.lower = c->busaddr;
1049
1050 c->Request = ioc->Request;
1051 if (ioc->buf_size > 0 ) {
1052 int i;
1053 for(i=0; i<sg_used; i++) {
1054 temp64.val = pci_map_single( host->pdev, buff[i],
1055 buff_size[i],
1056 PCI_DMA_BIDIRECTIONAL);
1057 c->SG[i].Addr.lower = temp64.val32.lower;
1058 c->SG[i].Addr.upper = temp64.val32.upper;
1059 c->SG[i].Len = buff_size[i];
1060 c->SG[i].Ext = 0; /* we are not chaining */
1061 }
1062 }
1063 c->waiting = &wait;
1064 /* Put the request on the tail of the request queue */
1065 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1066 addQ(&host->reqQ, c);
1067 host->Qdepth++;
1068 start_io(host);
1069 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1070 wait_for_completion(&wait);
1071 /* unlock the buffers from DMA */
1072 for(i=0; i<sg_used; i++) {
1073 temp64.val32.lower = c->SG[i].Addr.lower;
1074 temp64.val32.upper = c->SG[i].Addr.upper;
1075 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1076 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1077 }
1078 /* Copy the error information out */
1079 ioc->error_info = *(c->err_info);
1080 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1081 cmd_free(host, c, 0);
1082 status = -EFAULT;
1083 goto cleanup1;
1084 }
1085 if (ioc->Request.Type.Direction == XFER_READ) {
1086 /* Copy the data out of the buffer we created */
1087 BYTE __user *ptr = ioc->buf;
1088 for(i=0; i< sg_used; i++) {
1089 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1090 cmd_free(host, c, 0);
1091 status = -EFAULT;
1092 goto cleanup1;
1093 }
1094 ptr += buff_size[i];
1095 }
1096 }
1097 cmd_free(host, c, 0);
1098 status = 0;
1099 cleanup1:
1100 if (buff) {
1101 for(i=0; i<sg_used; i++)
1102 kfree(buff[i]);
1103 kfree(buff);
1104 }
1105 kfree(buff_size);
1106 kfree(ioc);
1107 return(status);
1108 }
1109 default:
1110 return -ENOTTY;
1111 }
1112
1113 }
1114
1115 /*
1116 * revalidate_allvol is for online array config utilities. After a
1117 * utility reconfigures the drives in the array, it can use this function
1118 * (through an ioctl) to make the driver zap any previous disk structs for
1119 * that controller and get new ones.
1120 *
1121 * Right now I'm using the getgeometry() function to do this, but this
1122 * function should probably be finer grained and allow you to revalidate one
1123 * particualar logical volume (instead of all of them on a particular
1124 * controller).
1125 */
1126 static int revalidate_allvol(ctlr_info_t *host)
1127 {
1128 int ctlr = host->ctlr, i;
1129 unsigned long flags;
1130
1131 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1132 if (host->usage_count > 1) {
1133 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1134 printk(KERN_WARNING "cciss: Device busy for volume"
1135 " revalidation (usage=%d)\n", host->usage_count);
1136 return -EBUSY;
1137 }
1138 host->usage_count++;
1139 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1140
1141 for(i=0; i< NWD; i++) {
1142 struct gendisk *disk = host->gendisk[i];
1143 if (disk) {
1144 request_queue_t *q = disk->queue;
1145
1146 if (disk->flags & GENHD_FL_UP)
1147 del_gendisk(disk);
1148 if (q)
1149 blk_cleanup_queue(q);
1150 }
1151 }
1152
1153 /*
1154 * Set the partition and block size structures for all volumes
1155 * on this controller to zero. We will reread all of this data
1156 */
1157 memset(host->drv, 0, sizeof(drive_info_struct)
1158 * CISS_MAX_LUN);
1159 /*
1160 * Tell the array controller not to give us any interrupts while
1161 * we check the new geometry. Then turn interrupts back on when
1162 * we're done.
1163 */
1164 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1165 cciss_getgeometry(ctlr);
1166 host->access.set_intr_mask(host, CCISS_INTR_ON);
1167
1168 /* Loop through each real device */
1169 for (i = 0; i < NWD; i++) {
1170 struct gendisk *disk = host->gendisk[i];
1171 drive_info_struct *drv = &(host->drv[i]);
1172 /* we must register the controller even if no disks exist */
1173 /* this is for the online array utilities */
1174 if (!drv->heads && i)
1175 continue;
1176 blk_queue_hardsect_size(drv->queue, drv->block_size);
1177 set_capacity(disk, drv->nr_blocks);
1178 add_disk(disk);
1179 }
1180 host->usage_count--;
1181 return 0;
1182 }
1183
1184 /* This function will check the usage_count of the drive to be updated/added.
1185 * If the usage_count is zero then the drive information will be updated and
1186 * the disk will be re-registered with the kernel. If not then it will be
1187 * left alone for the next reboot. The exception to this is disk 0 which
1188 * will always be left registered with the kernel since it is also the
1189 * controller node. Any changes to disk 0 will show up on the next
1190 * reboot.
1191 */
1192 static void cciss_update_drive_info(int ctlr, int drv_index)
1193 {
1194 ctlr_info_t *h = hba[ctlr];
1195 struct gendisk *disk;
1196 ReadCapdata_struct *size_buff = NULL;
1197 InquiryData_struct *inq_buff = NULL;
1198 unsigned int block_size;
1199 unsigned int total_size;
1200 unsigned long flags = 0;
1201 int ret = 0;
1202
1203 /* if the disk already exists then deregister it before proceeding*/
1204 if (h->drv[drv_index].raid_level != -1){
1205 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1206 h->drv[drv_index].busy_configuring = 1;
1207 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1208 ret = deregister_disk(h->gendisk[drv_index],
1209 &h->drv[drv_index], 0);
1210 h->drv[drv_index].busy_configuring = 0;
1211 }
1212
1213 /* If the disk is in use return */
1214 if (ret)
1215 return;
1216
1217
1218 /* Get information about the disk and modify the driver sturcture */
1219 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1220 if (size_buff == NULL)
1221 goto mem_msg;
1222 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1223 if (inq_buff == NULL)
1224 goto mem_msg;
1225
1226 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1227 &total_size, &block_size);
1228 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1229 inq_buff, &h->drv[drv_index]);
1230
1231 ++h->num_luns;
1232 disk = h->gendisk[drv_index];
1233 set_capacity(disk, h->drv[drv_index].nr_blocks);
1234
1235
1236 /* if it's the controller it's already added */
1237 if (drv_index){
1238 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1239
1240 /* Set up queue information */
1241 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1242 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1243
1244 /* This is a hardware imposed limit. */
1245 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1246
1247 /* This is a limit in the driver and could be eliminated. */
1248 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1249
1250 blk_queue_max_sectors(disk->queue, 512);
1251
1252 disk->queue->queuedata = hba[ctlr];
1253
1254 blk_queue_hardsect_size(disk->queue,
1255 hba[ctlr]->drv[drv_index].block_size);
1256
1257 h->drv[drv_index].queue = disk->queue;
1258 add_disk(disk);
1259 }
1260
1261 freeret:
1262 kfree(size_buff);
1263 kfree(inq_buff);
1264 return;
1265 mem_msg:
1266 printk(KERN_ERR "cciss: out of memory\n");
1267 goto freeret;
1268 }
1269
1270 /* This function will find the first index of the controllers drive array
1271 * that has a -1 for the raid_level and will return that index. This is
1272 * where new drives will be added. If the index to be returned is greater
1273 * than the highest_lun index for the controller then highest_lun is set
1274 * to this new index. If there are no available indexes then -1 is returned.
1275 */
1276 static int cciss_find_free_drive_index(int ctlr)
1277 {
1278 int i;
1279
1280 for (i=0; i < CISS_MAX_LUN; i++){
1281 if (hba[ctlr]->drv[i].raid_level == -1){
1282 if (i > hba[ctlr]->highest_lun)
1283 hba[ctlr]->highest_lun = i;
1284 return i;
1285 }
1286 }
1287 return -1;
1288 }
1289
1290 /* This function will add and remove logical drives from the Logical
1291 * drive array of the controller and maintain persistancy of ordering
1292 * so that mount points are preserved until the next reboot. This allows
1293 * for the removal of logical drives in the middle of the drive array
1294 * without a re-ordering of those drives.
1295 * INPUT
1296 * h = The controller to perform the operations on
1297 * del_disk = The disk to remove if specified. If the value given
1298 * is NULL then no disk is removed.
1299 */
1300 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1301 {
1302 int ctlr = h->ctlr;
1303 int num_luns;
1304 ReportLunData_struct *ld_buff = NULL;
1305 drive_info_struct *drv = NULL;
1306 int return_code;
1307 int listlength = 0;
1308 int i;
1309 int drv_found;
1310 int drv_index = 0;
1311 __u32 lunid = 0;
1312 unsigned long flags;
1313
1314 /* Set busy_configuring flag for this operation */
1315 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1316 if (h->num_luns >= CISS_MAX_LUN){
1317 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1318 return -EINVAL;
1319 }
1320
1321 if (h->busy_configuring){
1322 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1323 return -EBUSY;
1324 }
1325 h->busy_configuring = 1;
1326
1327 /* if del_disk is NULL then we are being called to add a new disk
1328 * and update the logical drive table. If it is not NULL then
1329 * we will check if the disk is in use or not.
1330 */
1331 if (del_disk != NULL){
1332 drv = get_drv(del_disk);
1333 drv->busy_configuring = 1;
1334 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1335 return_code = deregister_disk(del_disk, drv, 1);
1336 drv->busy_configuring = 0;
1337 h->busy_configuring = 0;
1338 return return_code;
1339 } else {
1340 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1341 if (!capable(CAP_SYS_RAWIO))
1342 return -EPERM;
1343
1344 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1345 if (ld_buff == NULL)
1346 goto mem_msg;
1347
1348 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1349 sizeof(ReportLunData_struct), 0, 0, 0,
1350 TYPE_CMD);
1351
1352 if (return_code == IO_OK){
1353 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1354 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1355 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1356 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1357 } else{ /* reading number of logical volumes failed */
1358 printk(KERN_WARNING "cciss: report logical volume"
1359 " command failed\n");
1360 listlength = 0;
1361 goto freeret;
1362 }
1363
1364 num_luns = listlength / 8; /* 8 bytes per entry */
1365 if (num_luns > CISS_MAX_LUN){
1366 num_luns = CISS_MAX_LUN;
1367 printk(KERN_WARNING "cciss: more luns configured"
1368 " on controller than can be handled by"
1369 " this driver.\n");
1370 }
1371
1372 /* Compare controller drive array to drivers drive array.
1373 * Check for updates in the drive information and any new drives
1374 * on the controller.
1375 */
1376 for (i=0; i < num_luns; i++){
1377 int j;
1378
1379 drv_found = 0;
1380
1381 lunid = (0xff &
1382 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1383 lunid |= (0xff &
1384 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1385 lunid |= (0xff &
1386 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1387 lunid |= 0xff &
1388 (unsigned int)(ld_buff->LUN[i][0]);
1389
1390 /* Find if the LUN is already in the drive array
1391 * of the controller. If so then update its info
1392 * if not is use. If it does not exist then find
1393 * the first free index and add it.
1394 */
1395 for (j=0; j <= h->highest_lun; j++){
1396 if (h->drv[j].LunID == lunid){
1397 drv_index = j;
1398 drv_found = 1;
1399 }
1400 }
1401
1402 /* check if the drive was found already in the array */
1403 if (!drv_found){
1404 drv_index = cciss_find_free_drive_index(ctlr);
1405 if (drv_index == -1)
1406 goto freeret;
1407
1408 }
1409 h->drv[drv_index].LunID = lunid;
1410 cciss_update_drive_info(ctlr, drv_index);
1411 } /* end for */
1412 } /* end else */
1413
1414 freeret:
1415 kfree(ld_buff);
1416 h->busy_configuring = 0;
1417 /* We return -1 here to tell the ACU that we have registered/updated
1418 * all of the drives that we can and to keep it from calling us
1419 * additional times.
1420 */
1421 return -1;
1422 mem_msg:
1423 printk(KERN_ERR "cciss: out of memory\n");
1424 goto freeret;
1425 }
1426
1427 /* This function will deregister the disk and it's queue from the
1428 * kernel. It must be called with the controller lock held and the
1429 * drv structures busy_configuring flag set. It's parameters are:
1430 *
1431 * disk = This is the disk to be deregistered
1432 * drv = This is the drive_info_struct associated with the disk to be
1433 * deregistered. It contains information about the disk used
1434 * by the driver.
1435 * clear_all = This flag determines whether or not the disk information
1436 * is going to be completely cleared out and the highest_lun
1437 * reset. Sometimes we want to clear out information about
1438 * the disk in preperation for re-adding it. In this case
1439 * the highest_lun should be left unchanged and the LunID
1440 * should not be cleared.
1441 */
1442 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1443 int clear_all)
1444 {
1445 ctlr_info_t *h = get_host(disk);
1446
1447 if (!capable(CAP_SYS_RAWIO))
1448 return -EPERM;
1449
1450 /* make sure logical volume is NOT is use */
1451 if(clear_all || (h->gendisk[0] == disk)) {
1452 if (drv->usage_count > 1)
1453 return -EBUSY;
1454 }
1455 else
1456 if( drv->usage_count > 0 )
1457 return -EBUSY;
1458
1459 /* invalidate the devices and deregister the disk. If it is disk
1460 * zero do not deregister it but just zero out it's values. This
1461 * allows us to delete disk zero but keep the controller registered.
1462 */
1463 if (h->gendisk[0] != disk){
1464 if (disk) {
1465 request_queue_t *q = disk->queue;
1466 if (disk->flags & GENHD_FL_UP)
1467 del_gendisk(disk);
1468 if (q) {
1469 blk_cleanup_queue(q);
1470 drv->queue = NULL;
1471 }
1472 }
1473 }
1474
1475 --h->num_luns;
1476 /* zero out the disk size info */
1477 drv->nr_blocks = 0;
1478 drv->block_size = 0;
1479 drv->heads = 0;
1480 drv->sectors = 0;
1481 drv->cylinders = 0;
1482 drv->raid_level = -1; /* This can be used as a flag variable to
1483 * indicate that this element of the drive
1484 * array is free.
1485 */
1486
1487 if (clear_all){
1488 /* check to see if it was the last disk */
1489 if (drv == h->drv + h->highest_lun) {
1490 /* if so, find the new hightest lun */
1491 int i, newhighest =-1;
1492 for(i=0; i<h->highest_lun; i++) {
1493 /* if the disk has size > 0, it is available */
1494 if (h->drv[i].heads)
1495 newhighest = i;
1496 }
1497 h->highest_lun = newhighest;
1498 }
1499
1500 drv->LunID = 0;
1501 }
1502 return(0);
1503 }
1504
1505 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1506 size_t size,
1507 unsigned int use_unit_num, /* 0: address the controller,
1508 1: address logical volume log_unit,
1509 2: periph device address is scsi3addr */
1510 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1511 int cmd_type)
1512 {
1513 ctlr_info_t *h= hba[ctlr];
1514 u64bit buff_dma_handle;
1515 int status = IO_OK;
1516
1517 c->cmd_type = CMD_IOCTL_PEND;
1518 c->Header.ReplyQueue = 0;
1519 if( buff != NULL) {
1520 c->Header.SGList = 1;
1521 c->Header.SGTotal= 1;
1522 } else {
1523 c->Header.SGList = 0;
1524 c->Header.SGTotal= 0;
1525 }
1526 c->Header.Tag.lower = c->busaddr;
1527
1528 c->Request.Type.Type = cmd_type;
1529 if (cmd_type == TYPE_CMD) {
1530 switch(cmd) {
1531 case CISS_INQUIRY:
1532 /* If the logical unit number is 0 then, this is going
1533 to controller so It's a physical command
1534 mode = 0 target = 0. So we have nothing to write.
1535 otherwise, if use_unit_num == 1,
1536 mode = 1(volume set addressing) target = LUNID
1537 otherwise, if use_unit_num == 2,
1538 mode = 0(periph dev addr) target = scsi3addr */
1539 if (use_unit_num == 1) {
1540 c->Header.LUN.LogDev.VolId=
1541 h->drv[log_unit].LunID;
1542 c->Header.LUN.LogDev.Mode = 1;
1543 } else if (use_unit_num == 2) {
1544 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1545 c->Header.LUN.LogDev.Mode = 0;
1546 }
1547 /* are we trying to read a vital product page */
1548 if(page_code != 0) {
1549 c->Request.CDB[1] = 0x01;
1550 c->Request.CDB[2] = page_code;
1551 }
1552 c->Request.CDBLen = 6;
1553 c->Request.Type.Attribute = ATTR_SIMPLE;
1554 c->Request.Type.Direction = XFER_READ;
1555 c->Request.Timeout = 0;
1556 c->Request.CDB[0] = CISS_INQUIRY;
1557 c->Request.CDB[4] = size & 0xFF;
1558 break;
1559 case CISS_REPORT_LOG:
1560 case CISS_REPORT_PHYS:
1561 /* Talking to controller so It's a physical command
1562 mode = 00 target = 0. Nothing to write.
1563 */
1564 c->Request.CDBLen = 12;
1565 c->Request.Type.Attribute = ATTR_SIMPLE;
1566 c->Request.Type.Direction = XFER_READ;
1567 c->Request.Timeout = 0;
1568 c->Request.CDB[0] = cmd;
1569 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1570 c->Request.CDB[7] = (size >> 16) & 0xFF;
1571 c->Request.CDB[8] = (size >> 8) & 0xFF;
1572 c->Request.CDB[9] = size & 0xFF;
1573 break;
1574
1575 case CCISS_READ_CAPACITY:
1576 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1577 c->Header.LUN.LogDev.Mode = 1;
1578 c->Request.CDBLen = 10;
1579 c->Request.Type.Attribute = ATTR_SIMPLE;
1580 c->Request.Type.Direction = XFER_READ;
1581 c->Request.Timeout = 0;
1582 c->Request.CDB[0] = cmd;
1583 break;
1584 case CCISS_CACHE_FLUSH:
1585 c->Request.CDBLen = 12;
1586 c->Request.Type.Attribute = ATTR_SIMPLE;
1587 c->Request.Type.Direction = XFER_WRITE;
1588 c->Request.Timeout = 0;
1589 c->Request.CDB[0] = BMIC_WRITE;
1590 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1591 break;
1592 default:
1593 printk(KERN_WARNING
1594 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1595 return(IO_ERROR);
1596 }
1597 } else if (cmd_type == TYPE_MSG) {
1598 switch (cmd) {
1599 case 0: /* ABORT message */
1600 c->Request.CDBLen = 12;
1601 c->Request.Type.Attribute = ATTR_SIMPLE;
1602 c->Request.Type.Direction = XFER_WRITE;
1603 c->Request.Timeout = 0;
1604 c->Request.CDB[0] = cmd; /* abort */
1605 c->Request.CDB[1] = 0; /* abort a command */
1606 /* buff contains the tag of the command to abort */
1607 memcpy(&c->Request.CDB[4], buff, 8);
1608 break;
1609 case 1: /* RESET message */
1610 c->Request.CDBLen = 12;
1611 c->Request.Type.Attribute = ATTR_SIMPLE;
1612 c->Request.Type.Direction = XFER_WRITE;
1613 c->Request.Timeout = 0;
1614 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1615 c->Request.CDB[0] = cmd; /* reset */
1616 c->Request.CDB[1] = 0x04; /* reset a LUN */
1617 case 3: /* No-Op message */
1618 c->Request.CDBLen = 1;
1619 c->Request.Type.Attribute = ATTR_SIMPLE;
1620 c->Request.Type.Direction = XFER_WRITE;
1621 c->Request.Timeout = 0;
1622 c->Request.CDB[0] = cmd;
1623 break;
1624 default:
1625 printk(KERN_WARNING
1626 "cciss%d: unknown message type %d\n",
1627 ctlr, cmd);
1628 return IO_ERROR;
1629 }
1630 } else {
1631 printk(KERN_WARNING
1632 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1633 return IO_ERROR;
1634 }
1635 /* Fill in the scatter gather information */
1636 if (size > 0) {
1637 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1638 buff, size, PCI_DMA_BIDIRECTIONAL);
1639 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1640 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1641 c->SG[0].Len = size;
1642 c->SG[0].Ext = 0; /* we are not chaining */
1643 }
1644 return status;
1645 }
1646 static int sendcmd_withirq(__u8 cmd,
1647 int ctlr,
1648 void *buff,
1649 size_t size,
1650 unsigned int use_unit_num,
1651 unsigned int log_unit,
1652 __u8 page_code,
1653 int cmd_type)
1654 {
1655 ctlr_info_t *h = hba[ctlr];
1656 CommandList_struct *c;
1657 u64bit buff_dma_handle;
1658 unsigned long flags;
1659 int return_status;
1660 DECLARE_COMPLETION(wait);
1661
1662 if ((c = cmd_alloc(h , 0)) == NULL)
1663 return -ENOMEM;
1664 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1665 log_unit, page_code, NULL, cmd_type);
1666 if (return_status != IO_OK) {
1667 cmd_free(h, c, 0);
1668 return return_status;
1669 }
1670 resend_cmd2:
1671 c->waiting = &wait;
1672
1673 /* Put the request on the tail of the queue and send it */
1674 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1675 addQ(&h->reqQ, c);
1676 h->Qdepth++;
1677 start_io(h);
1678 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1679
1680 wait_for_completion(&wait);
1681
1682 if(c->err_info->CommandStatus != 0)
1683 { /* an error has occurred */
1684 switch(c->err_info->CommandStatus)
1685 {
1686 case CMD_TARGET_STATUS:
1687 printk(KERN_WARNING "cciss: cmd %p has "
1688 " completed with errors\n", c);
1689 if( c->err_info->ScsiStatus)
1690 {
1691 printk(KERN_WARNING "cciss: cmd %p "
1692 "has SCSI Status = %x\n",
1693 c,
1694 c->err_info->ScsiStatus);
1695 }
1696
1697 break;
1698 case CMD_DATA_UNDERRUN:
1699 case CMD_DATA_OVERRUN:
1700 /* expected for inquire and report lun commands */
1701 break;
1702 case CMD_INVALID:
1703 printk(KERN_WARNING "cciss: Cmd %p is "
1704 "reported invalid\n", c);
1705 return_status = IO_ERROR;
1706 break;
1707 case CMD_PROTOCOL_ERR:
1708 printk(KERN_WARNING "cciss: cmd %p has "
1709 "protocol error \n", c);
1710 return_status = IO_ERROR;
1711 break;
1712 case CMD_HARDWARE_ERR:
1713 printk(KERN_WARNING "cciss: cmd %p had "
1714 " hardware error\n", c);
1715 return_status = IO_ERROR;
1716 break;
1717 case CMD_CONNECTION_LOST:
1718 printk(KERN_WARNING "cciss: cmd %p had "
1719 "connection lost\n", c);
1720 return_status = IO_ERROR;
1721 break;
1722 case CMD_ABORTED:
1723 printk(KERN_WARNING "cciss: cmd %p was "
1724 "aborted\n", c);
1725 return_status = IO_ERROR;
1726 break;
1727 case CMD_ABORT_FAILED:
1728 printk(KERN_WARNING "cciss: cmd %p reports "
1729 "abort failed\n", c);
1730 return_status = IO_ERROR;
1731 break;
1732 case CMD_UNSOLICITED_ABORT:
1733 printk(KERN_WARNING
1734 "cciss%d: unsolicited abort %p\n",
1735 ctlr, c);
1736 if (c->retry_count < MAX_CMD_RETRIES) {
1737 printk(KERN_WARNING
1738 "cciss%d: retrying %p\n",
1739 ctlr, c);
1740 c->retry_count++;
1741 /* erase the old error information */
1742 memset(c->err_info, 0,
1743 sizeof(ErrorInfo_struct));
1744 return_status = IO_OK;
1745 INIT_COMPLETION(wait);
1746 goto resend_cmd2;
1747 }
1748 return_status = IO_ERROR;
1749 break;
1750 default:
1751 printk(KERN_WARNING "cciss: cmd %p returned "
1752 "unknown status %x\n", c,
1753 c->err_info->CommandStatus);
1754 return_status = IO_ERROR;
1755 }
1756 }
1757 /* unlock the buffers from DMA */
1758 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1759 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1760 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1761 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1762 cmd_free(h, c, 0);
1763 return(return_status);
1764
1765 }
1766 static void cciss_geometry_inquiry(int ctlr, int logvol,
1767 int withirq, unsigned int total_size,
1768 unsigned int block_size, InquiryData_struct *inq_buff,
1769 drive_info_struct *drv)
1770 {
1771 int return_code;
1772 memset(inq_buff, 0, sizeof(InquiryData_struct));
1773 if (withirq)
1774 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1775 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1776 else
1777 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1778 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1779 if (return_code == IO_OK) {
1780 if(inq_buff->data_byte[8] == 0xFF) {
1781 printk(KERN_WARNING
1782 "cciss: reading geometry failed, volume "
1783 "does not support reading geometry\n");
1784 drv->block_size = block_size;
1785 drv->nr_blocks = total_size;
1786 drv->heads = 255;
1787 drv->sectors = 32; // Sectors per track
1788 drv->cylinders = total_size / 255 / 32;
1789 } else {
1790 unsigned int t;
1791
1792 drv->block_size = block_size;
1793 drv->nr_blocks = total_size;
1794 drv->heads = inq_buff->data_byte[6];
1795 drv->sectors = inq_buff->data_byte[7];
1796 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1797 drv->cylinders += inq_buff->data_byte[5];
1798 drv->raid_level = inq_buff->data_byte[8];
1799 t = drv->heads * drv->sectors;
1800 if (t > 1) {
1801 drv->cylinders = total_size/t;
1802 }
1803 }
1804 } else { /* Get geometry failed */
1805 printk(KERN_WARNING "cciss: reading geometry failed\n");
1806 }
1807 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1808 drv->heads, drv->sectors, drv->cylinders);
1809 }
1810 static void
1811 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1812 int withirq, unsigned int *total_size, unsigned int *block_size)
1813 {
1814 int return_code;
1815 memset(buf, 0, sizeof(*buf));
1816 if (withirq)
1817 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1818 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1819 else
1820 return_code = sendcmd(CCISS_READ_CAPACITY,
1821 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1822 if (return_code == IO_OK) {
1823 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1824 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1825 } else { /* read capacity command failed */
1826 printk(KERN_WARNING "cciss: read capacity failed\n");
1827 *total_size = 0;
1828 *block_size = BLOCK_SIZE;
1829 }
1830 printk(KERN_INFO " blocks= %u block_size= %d\n",
1831 *total_size, *block_size);
1832 return;
1833 }
1834
1835 static int cciss_revalidate(struct gendisk *disk)
1836 {
1837 ctlr_info_t *h = get_host(disk);
1838 drive_info_struct *drv = get_drv(disk);
1839 int logvol;
1840 int FOUND=0;
1841 unsigned int block_size;
1842 unsigned int total_size;
1843 ReadCapdata_struct *size_buff = NULL;
1844 InquiryData_struct *inq_buff = NULL;
1845
1846 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1847 {
1848 if(h->drv[logvol].LunID == drv->LunID) {
1849 FOUND=1;
1850 break;
1851 }
1852 }
1853
1854 if (!FOUND) return 1;
1855
1856 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1857 if (size_buff == NULL)
1858 {
1859 printk(KERN_WARNING "cciss: out of memory\n");
1860 return 1;
1861 }
1862 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1863 if (inq_buff == NULL)
1864 {
1865 printk(KERN_WARNING "cciss: out of memory\n");
1866 kfree(size_buff);
1867 return 1;
1868 }
1869
1870 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1871 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1872
1873 blk_queue_hardsect_size(drv->queue, drv->block_size);
1874 set_capacity(disk, drv->nr_blocks);
1875
1876 kfree(size_buff);
1877 kfree(inq_buff);
1878 return 0;
1879 }
1880
1881 /*
1882 * Wait polling for a command to complete.
1883 * The memory mapped FIFO is polled for the completion.
1884 * Used only at init time, interrupts from the HBA are disabled.
1885 */
1886 static unsigned long pollcomplete(int ctlr)
1887 {
1888 unsigned long done;
1889 int i;
1890
1891 /* Wait (up to 20 seconds) for a command to complete */
1892
1893 for (i = 20 * HZ; i > 0; i--) {
1894 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1895 if (done == FIFO_EMPTY)
1896 schedule_timeout_uninterruptible(1);
1897 else
1898 return (done);
1899 }
1900 /* Invalid address to tell caller we ran out of time */
1901 return 1;
1902 }
1903
1904 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1905 {
1906 /* We get in here if sendcmd() is polling for completions
1907 and gets some command back that it wasn't expecting --
1908 something other than that which it just sent down.
1909 Ordinarily, that shouldn't happen, but it can happen when
1910 the scsi tape stuff gets into error handling mode, and
1911 starts using sendcmd() to try to abort commands and
1912 reset tape drives. In that case, sendcmd may pick up
1913 completions of commands that were sent to logical drives
1914 through the block i/o system, or cciss ioctls completing, etc.
1915 In that case, we need to save those completions for later
1916 processing by the interrupt handler.
1917 */
1918
1919 #ifdef CONFIG_CISS_SCSI_TAPE
1920 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1921
1922 /* If it's not the scsi tape stuff doing error handling, (abort */
1923 /* or reset) then we don't expect anything weird. */
1924 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1925 #endif
1926 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1927 "Invalid command list address returned! (%lx)\n",
1928 ctlr, complete);
1929 /* not much we can do. */
1930 #ifdef CONFIG_CISS_SCSI_TAPE
1931 return 1;
1932 }
1933
1934 /* We've sent down an abort or reset, but something else
1935 has completed */
1936 if (srl->ncompletions >= (NR_CMDS + 2)) {
1937 /* Uh oh. No room to save it for later... */
1938 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1939 "reject list overflow, command lost!\n", ctlr);
1940 return 1;
1941 }
1942 /* Save it for later */
1943 srl->complete[srl->ncompletions] = complete;
1944 srl->ncompletions++;
1945 #endif
1946 return 0;
1947 }
1948
1949 /*
1950 * Send a command to the controller, and wait for it to complete.
1951 * Only used at init time.
1952 */
1953 static int sendcmd(
1954 __u8 cmd,
1955 int ctlr,
1956 void *buff,
1957 size_t size,
1958 unsigned int use_unit_num, /* 0: address the controller,
1959 1: address logical volume log_unit,
1960 2: periph device address is scsi3addr */
1961 unsigned int log_unit,
1962 __u8 page_code,
1963 unsigned char *scsi3addr,
1964 int cmd_type)
1965 {
1966 CommandList_struct *c;
1967 int i;
1968 unsigned long complete;
1969 ctlr_info_t *info_p= hba[ctlr];
1970 u64bit buff_dma_handle;
1971 int status, done = 0;
1972
1973 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1974 printk(KERN_WARNING "cciss: unable to get memory");
1975 return(IO_ERROR);
1976 }
1977 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1978 log_unit, page_code, scsi3addr, cmd_type);
1979 if (status != IO_OK) {
1980 cmd_free(info_p, c, 1);
1981 return status;
1982 }
1983 resend_cmd1:
1984 /*
1985 * Disable interrupt
1986 */
1987 #ifdef CCISS_DEBUG
1988 printk(KERN_DEBUG "cciss: turning intr off\n");
1989 #endif /* CCISS_DEBUG */
1990 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1991
1992 /* Make sure there is room in the command FIFO */
1993 /* Actually it should be completely empty at this time */
1994 /* unless we are in here doing error handling for the scsi */
1995 /* tape side of the driver. */
1996 for (i = 200000; i > 0; i--)
1997 {
1998 /* if fifo isn't full go */
1999 if (!(info_p->access.fifo_full(info_p)))
2000 {
2001
2002 break;
2003 }
2004 udelay(10);
2005 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2006 " waiting!\n", ctlr);
2007 }
2008 /*
2009 * Send the cmd
2010 */
2011 info_p->access.submit_command(info_p, c);
2012 done = 0;
2013 do {
2014 complete = pollcomplete(ctlr);
2015
2016 #ifdef CCISS_DEBUG
2017 printk(KERN_DEBUG "cciss: command completed\n");
2018 #endif /* CCISS_DEBUG */
2019
2020 if (complete == 1) {
2021 printk( KERN_WARNING
2022 "cciss cciss%d: SendCmd Timeout out, "
2023 "No command list address returned!\n",
2024 ctlr);
2025 status = IO_ERROR;
2026 done = 1;
2027 break;
2028 }
2029
2030 /* This will need to change for direct lookup completions */
2031 if ( (complete & CISS_ERROR_BIT)
2032 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2033 {
2034 /* if data overrun or underun on Report command
2035 ignore it
2036 */
2037 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2038 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2039 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2040 ((c->err_info->CommandStatus ==
2041 CMD_DATA_OVERRUN) ||
2042 (c->err_info->CommandStatus ==
2043 CMD_DATA_UNDERRUN)
2044 ))
2045 {
2046 complete = c->busaddr;
2047 } else {
2048 if (c->err_info->CommandStatus ==
2049 CMD_UNSOLICITED_ABORT) {
2050 printk(KERN_WARNING "cciss%d: "
2051 "unsolicited abort %p\n",
2052 ctlr, c);
2053 if (c->retry_count < MAX_CMD_RETRIES) {
2054 printk(KERN_WARNING
2055 "cciss%d: retrying %p\n",
2056 ctlr, c);
2057 c->retry_count++;
2058 /* erase the old error */
2059 /* information */
2060 memset(c->err_info, 0,
2061 sizeof(ErrorInfo_struct));
2062 goto resend_cmd1;
2063 } else {
2064 printk(KERN_WARNING
2065 "cciss%d: retried %p too "
2066 "many times\n", ctlr, c);
2067 status = IO_ERROR;
2068 goto cleanup1;
2069 }
2070 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2071 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2072 status = IO_ERROR;
2073 goto cleanup1;
2074 }
2075 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2076 " Error %x \n", ctlr,
2077 c->err_info->CommandStatus);
2078 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2079 " offensive info\n"
2080 " size %x\n num %x value %x\n", ctlr,
2081 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2082 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2083 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2084 status = IO_ERROR;
2085 goto cleanup1;
2086 }
2087 }
2088 /* This will need changing for direct lookup completions */
2089 if (complete != c->busaddr) {
2090 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2091 BUG(); /* we are pretty much hosed if we get here. */
2092 }
2093 continue;
2094 } else
2095 done = 1;
2096 } while (!done);
2097
2098 cleanup1:
2099 /* unlock the data buffer from DMA */
2100 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2101 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2102 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2103 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2104 #ifdef CONFIG_CISS_SCSI_TAPE
2105 /* if we saved some commands for later, process them now. */
2106 if (info_p->scsi_rejects.ncompletions > 0)
2107 do_cciss_intr(0, info_p, NULL);
2108 #endif
2109 cmd_free(info_p, c, 1);
2110 return (status);
2111 }
2112 /*
2113 * Map (physical) PCI mem into (virtual) kernel space
2114 */
2115 static void __iomem *remap_pci_mem(ulong base, ulong size)
2116 {
2117 ulong page_base = ((ulong) base) & PAGE_MASK;
2118 ulong page_offs = ((ulong) base) - page_base;
2119 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2120
2121 return page_remapped ? (page_remapped + page_offs) : NULL;
2122 }
2123
2124 /*
2125 * Takes jobs of the Q and sends them to the hardware, then puts it on
2126 * the Q to wait for completion.
2127 */
2128 static void start_io( ctlr_info_t *h)
2129 {
2130 CommandList_struct *c;
2131
2132 while(( c = h->reqQ) != NULL )
2133 {
2134 /* can't do anything if fifo is full */
2135 if ((h->access.fifo_full(h))) {
2136 printk(KERN_WARNING "cciss: fifo full\n");
2137 break;
2138 }
2139
2140 /* Get the frist entry from the Request Q */
2141 removeQ(&(h->reqQ), c);
2142 h->Qdepth--;
2143
2144 /* Tell the controller execute command */
2145 h->access.submit_command(h, c);
2146
2147 /* Put job onto the completed Q */
2148 addQ (&(h->cmpQ), c);
2149 }
2150 }
2151
2152 static inline void complete_buffers(struct bio *bio, int status)
2153 {
2154 while (bio) {
2155 struct bio *xbh = bio->bi_next;
2156 int nr_sectors = bio_sectors(bio);
2157
2158 bio->bi_next = NULL;
2159 blk_finished_io(len);
2160 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
2161 bio = xbh;
2162 }
2163
2164 }
2165 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2166 /* Zeros out the error record and then resends the command back */
2167 /* to the controller */
2168 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2169 {
2170 /* erase the old error information */
2171 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2172
2173 /* add it to software queue and then send it to the controller */
2174 addQ(&(h->reqQ),c);
2175 h->Qdepth++;
2176 if(h->Qdepth > h->maxQsinceinit)
2177 h->maxQsinceinit = h->Qdepth;
2178
2179 start_io(h);
2180 }
2181 /* checks the status of the job and calls complete buffers to mark all
2182 * buffers for the completed job.
2183 */
2184 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2185 int timeout)
2186 {
2187 int status = 1;
2188 int i;
2189 int retry_cmd = 0;
2190 u64bit temp64;
2191
2192 if (timeout)
2193 status = 0;
2194
2195 if(cmd->err_info->CommandStatus != 0)
2196 { /* an error has occurred */
2197 switch(cmd->err_info->CommandStatus)
2198 {
2199 unsigned char sense_key;
2200 case CMD_TARGET_STATUS:
2201 status = 0;
2202
2203 if( cmd->err_info->ScsiStatus == 0x02)
2204 {
2205 printk(KERN_WARNING "cciss: cmd %p "
2206 "has CHECK CONDITION "
2207 " byte 2 = 0x%x\n", cmd,
2208 cmd->err_info->SenseInfo[2]
2209 );
2210 /* check the sense key */
2211 sense_key = 0xf &
2212 cmd->err_info->SenseInfo[2];
2213 /* no status or recovered error */
2214 if((sense_key == 0x0) ||
2215 (sense_key == 0x1))
2216 {
2217 status = 1;
2218 }
2219 } else
2220 {
2221 printk(KERN_WARNING "cciss: cmd %p "
2222 "has SCSI Status 0x%x\n",
2223 cmd, cmd->err_info->ScsiStatus);
2224 }
2225 break;
2226 case CMD_DATA_UNDERRUN:
2227 printk(KERN_WARNING "cciss: cmd %p has"
2228 " completed with data underrun "
2229 "reported\n", cmd);
2230 break;
2231 case CMD_DATA_OVERRUN:
2232 printk(KERN_WARNING "cciss: cmd %p has"
2233 " completed with data overrun "
2234 "reported\n", cmd);
2235 break;
2236 case CMD_INVALID:
2237 printk(KERN_WARNING "cciss: cmd %p is "
2238 "reported invalid\n", cmd);
2239 status = 0;
2240 break;
2241 case CMD_PROTOCOL_ERR:
2242 printk(KERN_WARNING "cciss: cmd %p has "
2243 "protocol error \n", cmd);
2244 status = 0;
2245 break;
2246 case CMD_HARDWARE_ERR:
2247 printk(KERN_WARNING "cciss: cmd %p had "
2248 " hardware error\n", cmd);
2249 status = 0;
2250 break;
2251 case CMD_CONNECTION_LOST:
2252 printk(KERN_WARNING "cciss: cmd %p had "
2253 "connection lost\n", cmd);
2254 status=0;
2255 break;
2256 case CMD_ABORTED:
2257 printk(KERN_WARNING "cciss: cmd %p was "
2258 "aborted\n", cmd);
2259 status=0;
2260 break;
2261 case CMD_ABORT_FAILED:
2262 printk(KERN_WARNING "cciss: cmd %p reports "
2263 "abort failed\n", cmd);
2264 status=0;
2265 break;
2266 case CMD_UNSOLICITED_ABORT:
2267 printk(KERN_WARNING "cciss%d: unsolicited "
2268 "abort %p\n", h->ctlr, cmd);
2269 if (cmd->retry_count < MAX_CMD_RETRIES) {
2270 retry_cmd=1;
2271 printk(KERN_WARNING
2272 "cciss%d: retrying %p\n",
2273 h->ctlr, cmd);
2274 cmd->retry_count++;
2275 } else
2276 printk(KERN_WARNING
2277 "cciss%d: %p retried too "
2278 "many times\n", h->ctlr, cmd);
2279 status=0;
2280 break;
2281 case CMD_TIMEOUT:
2282 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2283 cmd);
2284 status=0;
2285 break;
2286 default:
2287 printk(KERN_WARNING "cciss: cmd %p returned "
2288 "unknown status %x\n", cmd,
2289 cmd->err_info->CommandStatus);
2290 status=0;
2291 }
2292 }
2293 /* We need to return this command */
2294 if(retry_cmd) {
2295 resend_cciss_cmd(h,cmd);
2296 return;
2297 }
2298 /* command did not need to be retried */
2299 /* unmap the DMA mapping for all the scatter gather elements */
2300 for(i=0; i<cmd->Header.SGList; i++) {
2301 temp64.val32.lower = cmd->SG[i].Addr.lower;
2302 temp64.val32.upper = cmd->SG[i].Addr.upper;
2303 pci_unmap_page(hba[cmd->ctlr]->pdev,
2304 temp64.val, cmd->SG[i].Len,
2305 (cmd->Request.Type.Direction == XFER_READ) ?
2306 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2307 }
2308 complete_buffers(cmd->rq->bio, status);
2309
2310 #ifdef CCISS_DEBUG
2311 printk("Done with %p\n", cmd->rq);
2312 #endif /* CCISS_DEBUG */
2313
2314 end_that_request_last(cmd->rq, status ? 1 : -EIO);
2315 cmd_free(h,cmd,1);
2316 }
2317
2318 /*
2319 * Get a request and submit it to the controller.
2320 */
2321 static void do_cciss_request(request_queue_t *q)
2322 {
2323 ctlr_info_t *h= q->queuedata;
2324 CommandList_struct *c;
2325 int start_blk, seg;
2326 struct request *creq;
2327 u64bit temp64;
2328 struct scatterlist tmp_sg[MAXSGENTRIES];
2329 drive_info_struct *drv;
2330 int i, dir;
2331
2332 /* We call start_io here in case there is a command waiting on the
2333 * queue that has not been sent.
2334 */
2335 if (blk_queue_plugged(q))
2336 goto startio;
2337
2338 queue:
2339 creq = elv_next_request(q);
2340 if (!creq)
2341 goto startio;
2342
2343 if (creq->nr_phys_segments > MAXSGENTRIES)
2344 BUG();
2345
2346 if (( c = cmd_alloc(h, 1)) == NULL)
2347 goto full;
2348
2349 blkdev_dequeue_request(creq);
2350
2351 spin_unlock_irq(q->queue_lock);
2352
2353 c->cmd_type = CMD_RWREQ;
2354 c->rq = creq;
2355
2356 /* fill in the request */
2357 drv = creq->rq_disk->private_data;
2358 c->Header.ReplyQueue = 0; // unused in simple mode
2359 /* got command from pool, so use the command block index instead */
2360 /* for direct lookups. */
2361 /* The first 2 bits are reserved for controller error reporting. */
2362 c->Header.Tag.lower = (c->cmdindex << 3);
2363 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2364 c->Header.LUN.LogDev.VolId= drv->LunID;
2365 c->Header.LUN.LogDev.Mode = 1;
2366 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2367 c->Request.Type.Type = TYPE_CMD; // It is a command.
2368 c->Request.Type.Attribute = ATTR_SIMPLE;
2369 c->Request.Type.Direction =
2370 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2371 c->Request.Timeout = 0; // Don't time out
2372 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2373 start_blk = creq->sector;
2374 #ifdef CCISS_DEBUG
2375 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2376 (int) creq->nr_sectors);
2377 #endif /* CCISS_DEBUG */
2378
2379 seg = blk_rq_map_sg(q, creq, tmp_sg);
2380
2381 /* get the DMA records for the setup */
2382 if (c->Request.Type.Direction == XFER_READ)
2383 dir = PCI_DMA_FROMDEVICE;
2384 else
2385 dir = PCI_DMA_TODEVICE;
2386
2387 for (i=0; i<seg; i++)
2388 {
2389 c->SG[i].Len = tmp_sg[i].length;
2390 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2391 tmp_sg[i].offset, tmp_sg[i].length,
2392 dir);
2393 c->SG[i].Addr.lower = temp64.val32.lower;
2394 c->SG[i].Addr.upper = temp64.val32.upper;
2395 c->SG[i].Ext = 0; // we are not chaining
2396 }
2397 /* track how many SG entries we are using */
2398 if( seg > h->maxSG)
2399 h->maxSG = seg;
2400
2401 #ifdef CCISS_DEBUG
2402 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2403 #endif /* CCISS_DEBUG */
2404
2405 c->Header.SGList = c->Header.SGTotal = seg;
2406 c->Request.CDB[1]= 0;
2407 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2408 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2409 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2410 c->Request.CDB[5]= start_blk & 0xff;
2411 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2412 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2413 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2414 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2415
2416 spin_lock_irq(q->queue_lock);
2417
2418 addQ(&(h->reqQ),c);
2419 h->Qdepth++;
2420 if(h->Qdepth > h->maxQsinceinit)
2421 h->maxQsinceinit = h->Qdepth;
2422
2423 goto queue;
2424 full:
2425 blk_stop_queue(q);
2426 startio:
2427 /* We will already have the driver lock here so not need
2428 * to lock it.
2429 */
2430 start_io(h);
2431 }
2432
2433 static inline unsigned long get_next_completion(ctlr_info_t *h)
2434 {
2435 #ifdef CONFIG_CISS_SCSI_TAPE
2436 /* Any rejects from sendcmd() lying around? Process them first */
2437 if (h->scsi_rejects.ncompletions == 0)
2438 return h->access.command_completed(h);
2439 else {
2440 struct sendcmd_reject_list *srl;
2441 int n;
2442 srl = &h->scsi_rejects;
2443 n = --srl->ncompletions;
2444 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2445 printk("p");
2446 return srl->complete[n];
2447 }
2448 #else
2449 return h->access.command_completed(h);
2450 #endif
2451 }
2452
2453 static inline int interrupt_pending(ctlr_info_t *h)
2454 {
2455 #ifdef CONFIG_CISS_SCSI_TAPE
2456 return ( h->access.intr_pending(h)
2457 || (h->scsi_rejects.ncompletions > 0));
2458 #else
2459 return h->access.intr_pending(h);
2460 #endif
2461 }
2462
2463 static inline long interrupt_not_for_us(ctlr_info_t *h)
2464 {
2465 #ifdef CONFIG_CISS_SCSI_TAPE
2466 return (((h->access.intr_pending(h) == 0) ||
2467 (h->interrupts_enabled == 0))
2468 && (h->scsi_rejects.ncompletions == 0));
2469 #else
2470 return (((h->access.intr_pending(h) == 0) ||
2471 (h->interrupts_enabled == 0)));
2472 #endif
2473 }
2474
2475 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2476 {
2477 ctlr_info_t *h = dev_id;
2478 CommandList_struct *c;
2479 unsigned long flags;
2480 __u32 a, a1, a2;
2481 int j;
2482 int start_queue = h->next_to_run;
2483
2484 if (interrupt_not_for_us(h))
2485 return IRQ_NONE;
2486 /*
2487 * If there are completed commands in the completion queue,
2488 * we had better do something about it.
2489 */
2490 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2491 while (interrupt_pending(h)) {
2492 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2493 a1 = a;
2494 if ((a & 0x04)) {
2495 a2 = (a >> 3);
2496 if (a2 >= NR_CMDS) {
2497 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2498 fail_all_cmds(h->ctlr);
2499 return IRQ_HANDLED;
2500 }
2501
2502 c = h->cmd_pool + a2;
2503 a = c->busaddr;
2504
2505 } else {
2506 a &= ~3;
2507 if ((c = h->cmpQ) == NULL) {
2508 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2509 continue;
2510 }
2511 while(c->busaddr != a) {
2512 c = c->next;
2513 if (c == h->cmpQ)
2514 break;
2515 }
2516 }
2517 /*
2518 * If we've found the command, take it off the
2519 * completion Q and free it
2520 */
2521 if (c->busaddr == a) {
2522 removeQ(&h->cmpQ, c);
2523 if (c->cmd_type == CMD_RWREQ) {
2524 complete_command(h, c, 0);
2525 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2526 complete(c->waiting);
2527 }
2528 # ifdef CONFIG_CISS_SCSI_TAPE
2529 else if (c->cmd_type == CMD_SCSI)
2530 complete_scsi_command(c, 0, a1);
2531 # endif
2532 continue;
2533 }
2534 }
2535 }
2536
2537 /* check to see if we have maxed out the number of commands that can
2538 * be placed on the queue. If so then exit. We do this check here
2539 * in case the interrupt we serviced was from an ioctl and did not
2540 * free any new commands.
2541 */
2542 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2543 goto cleanup;
2544
2545 /* We have room on the queue for more commands. Now we need to queue
2546 * them up. We will also keep track of the next queue to run so
2547 * that every queue gets a chance to be started first.
2548 */
2549 for (j=0; j < h->highest_lun + 1; j++){
2550 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2551 /* make sure the disk has been added and the drive is real
2552 * because this can be called from the middle of init_one.
2553 */
2554 if(!(h->drv[curr_queue].queue) ||
2555 !(h->drv[curr_queue].heads))
2556 continue;
2557 blk_start_queue(h->gendisk[curr_queue]->queue);
2558
2559 /* check to see if we have maxed out the number of commands
2560 * that can be placed on the queue.
2561 */
2562 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2563 {
2564 if (curr_queue == start_queue){
2565 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2566 goto cleanup;
2567 } else {
2568 h->next_to_run = curr_queue;
2569 goto cleanup;
2570 }
2571 } else {
2572 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2573 }
2574 }
2575
2576 cleanup:
2577 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2578 return IRQ_HANDLED;
2579 }
2580 /*
2581 * We cannot read the structure directly, for portablity we must use
2582 * the io functions.
2583 * This is for debug only.
2584 */
2585 #ifdef CCISS_DEBUG
2586 static void print_cfg_table( CfgTable_struct *tb)
2587 {
2588 int i;
2589 char temp_name[17];
2590
2591 printk("Controller Configuration information\n");
2592 printk("------------------------------------\n");
2593 for(i=0;i<4;i++)
2594 temp_name[i] = readb(&(tb->Signature[i]));
2595 temp_name[4]='\0';
2596 printk(" Signature = %s\n", temp_name);
2597 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2598 printk(" Transport methods supported = 0x%x\n",
2599 readl(&(tb-> TransportSupport)));
2600 printk(" Transport methods active = 0x%x\n",
2601 readl(&(tb->TransportActive)));
2602 printk(" Requested transport Method = 0x%x\n",
2603 readl(&(tb->HostWrite.TransportRequest)));
2604 printk(" Coalese Interrupt Delay = 0x%x\n",
2605 readl(&(tb->HostWrite.CoalIntDelay)));
2606 printk(" Coalese Interrupt Count = 0x%x\n",
2607 readl(&(tb->HostWrite.CoalIntCount)));
2608 printk(" Max outstanding commands = 0x%d\n",
2609 readl(&(tb->CmdsOutMax)));
2610 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2611 for(i=0;i<16;i++)
2612 temp_name[i] = readb(&(tb->ServerName[i]));
2613 temp_name[16] = '\0';
2614 printk(" Server Name = %s\n", temp_name);
2615 printk(" Heartbeat Counter = 0x%x\n\n\n",
2616 readl(&(tb->HeartBeat)));
2617 }
2618 #endif /* CCISS_DEBUG */
2619
2620 static void release_io_mem(ctlr_info_t *c)
2621 {
2622 /* if IO mem was not protected do nothing */
2623 if( c->io_mem_addr == 0)
2624 return;
2625 release_region(c->io_mem_addr, c->io_mem_length);
2626 c->io_mem_addr = 0;
2627 c->io_mem_length = 0;
2628 }
2629
2630 static int find_PCI_BAR_index(struct pci_dev *pdev,
2631 unsigned long pci_bar_addr)
2632 {
2633 int i, offset, mem_type, bar_type;
2634 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2635 return 0;
2636 offset = 0;
2637 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2638 bar_type = pci_resource_flags(pdev, i) &
2639 PCI_BASE_ADDRESS_SPACE;
2640 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2641 offset += 4;
2642 else {
2643 mem_type = pci_resource_flags(pdev, i) &
2644 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2645 switch (mem_type) {
2646 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2647 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2648 offset += 4; /* 32 bit */
2649 break;
2650 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2651 offset += 8;
2652 break;
2653 default: /* reserved in PCI 2.2 */
2654 printk(KERN_WARNING "Base address is invalid\n");
2655 return -1;
2656 break;
2657 }
2658 }
2659 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2660 return i+1;
2661 }
2662 return -1;
2663 }
2664
2665 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2666 {
2667 ushort subsystem_vendor_id, subsystem_device_id, command;
2668 __u32 board_id, scratchpad = 0;
2669 __u64 cfg_offset;
2670 __u32 cfg_base_addr;
2671 __u64 cfg_base_addr_index;
2672 int i;
2673
2674 /* check to see if controller has been disabled */
2675 /* BEFORE trying to enable it */
2676 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2677 if(!(command & 0x02))
2678 {
2679 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2680 return(-1);
2681 }
2682
2683 if (pci_enable_device(pdev))
2684 {
2685 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2686 return( -1);
2687 }
2688
2689 subsystem_vendor_id = pdev->subsystem_vendor;
2690 subsystem_device_id = pdev->subsystem_device;
2691 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2692 subsystem_vendor_id);
2693
2694 /* search for our IO range so we can protect it */
2695 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2696 {
2697 /* is this an IO range */
2698 if( pci_resource_flags(pdev, i) & 0x01 ) {
2699 c->io_mem_addr = pci_resource_start(pdev, i);
2700 c->io_mem_length = pci_resource_end(pdev, i) -
2701 pci_resource_start(pdev, i) +1;
2702 #ifdef CCISS_DEBUG
2703 printk("IO value found base_addr[%d] %lx %lx\n", i,
2704 c->io_mem_addr, c->io_mem_length);
2705 #endif /* CCISS_DEBUG */
2706 /* register the IO range */
2707 if(!request_region( c->io_mem_addr,
2708 c->io_mem_length, "cciss"))
2709 {
2710 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2711 c->io_mem_addr, c->io_mem_length);
2712 c->io_mem_addr= 0;
2713 c->io_mem_length = 0;
2714 }
2715 break;
2716 }
2717 }
2718
2719 #ifdef CCISS_DEBUG
2720 printk("command = %x\n", command);
2721 printk("irq = %x\n", pdev->irq);
2722 printk("board_id = %x\n", board_id);
2723 #endif /* CCISS_DEBUG */
2724
2725 c->intr = pdev->irq;
2726
2727 /*
2728 * Memory base addr is first addr , the second points to the config
2729 * table
2730 */
2731
2732 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2733 #ifdef CCISS_DEBUG
2734 printk("address 0 = %x\n", c->paddr);
2735 #endif /* CCISS_DEBUG */
2736 c->vaddr = remap_pci_mem(c->paddr, 200);
2737
2738 /* Wait for the board to become ready. (PCI hotplug needs this.)
2739 * We poll for up to 120 secs, once per 100ms. */
2740 for (i=0; i < 1200; i++) {
2741 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2742 if (scratchpad == CCISS_FIRMWARE_READY)
2743 break;
2744 set_current_state(TASK_INTERRUPTIBLE);
2745 schedule_timeout(HZ / 10); /* wait 100ms */
2746 }
2747 if (scratchpad != CCISS_FIRMWARE_READY) {
2748 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2749 return -1;
2750 }
2751
2752 /* get the address index number */
2753 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2754 cfg_base_addr &= (__u32) 0x0000ffff;
2755 #ifdef CCISS_DEBUG
2756 printk("cfg base address = %x\n", cfg_base_addr);
2757 #endif /* CCISS_DEBUG */
2758 cfg_base_addr_index =
2759 find_PCI_BAR_index(pdev, cfg_base_addr);
2760 #ifdef CCISS_DEBUG
2761 printk("cfg base address index = %x\n", cfg_base_addr_index);
2762 #endif /* CCISS_DEBUG */
2763 if (cfg_base_addr_index == -1) {
2764 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2765 release_io_mem(c);
2766 return -1;
2767 }
2768
2769 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2770 #ifdef CCISS_DEBUG
2771 printk("cfg offset = %x\n", cfg_offset);
2772 #endif /* CCISS_DEBUG */
2773 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2774 cfg_base_addr_index) + cfg_offset,
2775 sizeof(CfgTable_struct));
2776 c->board_id = board_id;
2777
2778 #ifdef CCISS_DEBUG
2779 print_cfg_table(c->cfgtable);
2780 #endif /* CCISS_DEBUG */
2781
2782 for(i=0; i<NR_PRODUCTS; i++) {
2783 if (board_id == products[i].board_id) {
2784 c->product_name = products[i].product_name;
2785 c->access = *(products[i].access);
2786 break;
2787 }
2788 }
2789 if (i == NR_PRODUCTS) {
2790 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2791 " to access the Smart Array controller %08lx\n",
2792 (unsigned long)board_id);
2793 return -1;
2794 }
2795 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2796 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2797 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2798 (readb(&c->cfgtable->Signature[3]) != 'S') )
2799 {
2800 printk("Does not appear to be a valid CISS config table\n");
2801 return -1;
2802 }
2803
2804 #ifdef CONFIG_X86
2805 {
2806 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2807 __u32 prefetch;
2808 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2809 prefetch |= 0x100;
2810 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2811 }
2812 #endif
2813
2814 #ifdef CCISS_DEBUG
2815 printk("Trying to put board into Simple mode\n");
2816 #endif /* CCISS_DEBUG */
2817 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2818 /* Update the field, and then ring the doorbell */
2819 writel( CFGTBL_Trans_Simple,
2820 &(c->cfgtable->HostWrite.TransportRequest));
2821 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2822
2823 /* under certain very rare conditions, this can take awhile.
2824 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2825 * as we enter this code.) */
2826 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2827 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2828 break;
2829 /* delay and try again */
2830 set_current_state(TASK_INTERRUPTIBLE);
2831 schedule_timeout(10);
2832 }
2833
2834 #ifdef CCISS_DEBUG
2835 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2836 #endif /* CCISS_DEBUG */
2837 #ifdef CCISS_DEBUG
2838 print_cfg_table(c->cfgtable);
2839 #endif /* CCISS_DEBUG */
2840
2841 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2842 {
2843 printk(KERN_WARNING "cciss: unable to get board into"
2844 " simple mode\n");
2845 return -1;
2846 }
2847 return 0;
2848
2849 }
2850
2851 /*
2852 * Gets information about the local volumes attached to the controller.
2853 */
2854 static void cciss_getgeometry(int cntl_num)
2855 {
2856 ReportLunData_struct *ld_buff;
2857 ReadCapdata_struct *size_buff;
2858 InquiryData_struct *inq_buff;
2859 int return_code;
2860 int i;
2861 int listlength = 0;
2862 __u32 lunid = 0;
2863 int block_size;
2864 int total_size;
2865
2866 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2867 if (ld_buff == NULL)
2868 {
2869 printk(KERN_ERR "cciss: out of memory\n");
2870 return;
2871 }
2872 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2873 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2874 if (size_buff == NULL)
2875 {
2876 printk(KERN_ERR "cciss: out of memory\n");
2877 kfree(ld_buff);
2878 return;
2879 }
2880 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2881 if (inq_buff == NULL)
2882 {
2883 printk(KERN_ERR "cciss: out of memory\n");
2884 kfree(ld_buff);
2885 kfree(size_buff);
2886 return;
2887 }
2888 /* Get the firmware version */
2889 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2890 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2891 if (return_code == IO_OK)
2892 {
2893 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2894 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2895 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2896 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2897 } else /* send command failed */
2898 {
2899 printk(KERN_WARNING "cciss: unable to determine firmware"
2900 " version of controller\n");
2901 }
2902 /* Get the number of logical volumes */
2903 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2904 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2905
2906 if( return_code == IO_OK)
2907 {
2908 #ifdef CCISS_DEBUG
2909 printk("LUN Data\n--------------------------\n");
2910 #endif /* CCISS_DEBUG */
2911
2912 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2913 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2914 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2915 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2916 } else /* reading number of logical volumes failed */
2917 {
2918 printk(KERN_WARNING "cciss: report logical volume"
2919 " command failed\n");
2920 listlength = 0;
2921 }
2922 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2923 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
2924 {
2925 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
2926 CISS_MAX_LUN);
2927 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2928 }
2929 #ifdef CCISS_DEBUG
2930 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2931 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2932 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2933 #endif /* CCISS_DEBUG */
2934
2935 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2936 // for(i=0; i< hba[cntl_num]->num_luns; i++)
2937 for(i=0; i < CISS_MAX_LUN; i++)
2938 {
2939 if (i < hba[cntl_num]->num_luns){
2940 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
2941 << 24;
2942 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
2943 << 16;
2944 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
2945 << 8;
2946 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2947
2948 hba[cntl_num]->drv[i].LunID = lunid;
2949
2950
2951 #ifdef CCISS_DEBUG
2952 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2953 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
2954 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
2955 hba[cntl_num]->drv[i].LunID);
2956 #endif /* CCISS_DEBUG */
2957 cciss_read_capacity(cntl_num, i, size_buff, 0,
2958 &total_size, &block_size);
2959 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
2960 block_size, inq_buff, &hba[cntl_num]->drv[i]);
2961 } else {
2962 /* initialize raid_level to indicate a free space */
2963 hba[cntl_num]->drv[i].raid_level = -1;
2964 }
2965 }
2966 kfree(ld_buff);
2967 kfree(size_buff);
2968 kfree(inq_buff);
2969 }
2970
2971 /* Function to find the first free pointer into our hba[] array */
2972 /* Returns -1 if no free entries are left. */
2973 static int alloc_cciss_hba(void)
2974 {
2975 struct gendisk *disk[NWD];
2976 int i, n;
2977 for (n = 0; n < NWD; n++) {
2978 disk[n] = alloc_disk(1 << NWD_SHIFT);
2979 if (!disk[n])
2980 goto out;
2981 }
2982
2983 for(i=0; i< MAX_CTLR; i++) {
2984 if (!hba[i]) {
2985 ctlr_info_t *p;
2986 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2987 if (!p)
2988 goto Enomem;
2989 memset(p, 0, sizeof(ctlr_info_t));
2990 for (n = 0; n < NWD; n++)
2991 p->gendisk[n] = disk[n];
2992 hba[i] = p;
2993 return i;
2994 }
2995 }
2996 printk(KERN_WARNING "cciss: This driver supports a maximum"
2997 " of %d controllers.\n", MAX_CTLR);
2998 goto out;
2999 Enomem:
3000 printk(KERN_ERR "cciss: out of memory.\n");
3001 out:
3002 while (n--)
3003 put_disk(disk[n]);
3004 return -1;
3005 }
3006
3007 static void free_hba(int i)
3008 {
3009 ctlr_info_t *p = hba[i];
3010 int n;
3011
3012 hba[i] = NULL;
3013 for (n = 0; n < NWD; n++)
3014 put_disk(p->gendisk[n]);
3015 kfree(p);
3016 }
3017
3018 /*
3019 * This is it. Find all the controllers and register them. I really hate
3020 * stealing all these major device numbers.
3021 * returns the number of block devices registered.
3022 */
3023 static int __devinit cciss_init_one(struct pci_dev *pdev,
3024 const struct pci_device_id *ent)
3025 {
3026 request_queue_t *q;
3027 int i;
3028 int j;
3029 int rc;
3030
3031 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3032 " bus %d dev %d func %d\n",
3033 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3034 PCI_FUNC(pdev->devfn));
3035 i = alloc_cciss_hba();
3036 if(i < 0)
3037 return (-1);
3038
3039 hba[i]->busy_initializing = 1;
3040
3041 if (cciss_pci_init(hba[i], pdev) != 0)
3042 goto clean1;
3043
3044 sprintf(hba[i]->devname, "cciss%d", i);
3045 hba[i]->ctlr = i;
3046 hba[i]->pdev = pdev;
3047
3048 /* configure PCI DMA stuff */
3049 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3050 printk("cciss: using DAC cycles\n");
3051 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3052 printk("cciss: not using DAC cycles\n");
3053 else {
3054 printk("cciss: no suitable DMA available\n");
3055 goto clean1;
3056 }
3057
3058 /*
3059 * register with the major number, or get a dynamic major number
3060 * by passing 0 as argument. This is done for greater than
3061 * 8 controller support.
3062 */
3063 if (i < MAX_CTLR_ORIG)
3064 hba[i]->major = MAJOR_NR + i;
3065 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3066 if(rc == -EBUSY || rc == -EINVAL) {
3067 printk(KERN_ERR
3068 "cciss: Unable to get major number %d for %s "
3069 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3070 goto clean1;
3071 }
3072 else {
3073 if (i >= MAX_CTLR_ORIG)
3074 hba[i]->major = rc;
3075 }
3076
3077 /* make sure the board interrupts are off */
3078 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3079 if( request_irq(hba[i]->intr, do_cciss_intr,
3080 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3081 hba[i]->devname, hba[i])) {
3082 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3083 hba[i]->intr, hba[i]->devname);
3084 goto clean2;
3085 }
3086 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3087 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3088 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3089 &(hba[i]->cmd_pool_dhandle));
3090 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3091 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3092 &(hba[i]->errinfo_pool_dhandle));
3093 if((hba[i]->cmd_pool_bits == NULL)
3094 || (hba[i]->cmd_pool == NULL)
3095 || (hba[i]->errinfo_pool == NULL)) {
3096 printk( KERN_ERR "cciss: out of memory");
3097 goto clean4;
3098 }
3099 #ifdef CONFIG_CISS_SCSI_TAPE
3100 hba[i]->scsi_rejects.complete =
3101 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3102 (NR_CMDS + 5), GFP_KERNEL);
3103 if (hba[i]->scsi_rejects.complete == NULL) {
3104 printk( KERN_ERR "cciss: out of memory");
3105 goto clean4;
3106 }
3107 #endif
3108 spin_lock_init(&hba[i]->lock);
3109
3110 /* Initialize the pdev driver private data.
3111 have it point to hba[i]. */
3112 pci_set_drvdata(pdev, hba[i]);
3113 /* command and error info recs zeroed out before
3114 they are used */
3115 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3116
3117 #ifdef CCISS_DEBUG
3118 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3119 #endif /* CCISS_DEBUG */
3120
3121 cciss_getgeometry(i);
3122
3123 cciss_scsi_setup(i);
3124
3125 /* Turn the interrupts on so we can service requests */
3126 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3127
3128 cciss_procinit(i);
3129 hba[i]->busy_initializing = 0;
3130
3131 for(j=0; j < NWD; j++) { /* mfm */
3132 drive_info_struct *drv = &(hba[i]->drv[j]);
3133 struct gendisk *disk = hba[i]->gendisk[j];
3134
3135 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3136 if (!q) {
3137 printk(KERN_ERR
3138 "cciss: unable to allocate queue for disk %d\n",
3139 j);
3140 break;
3141 }
3142 drv->queue = q;
3143
3144 q->backing_dev_info.ra_pages = READ_AHEAD;
3145 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3146
3147 /* This is a hardware imposed limit. */
3148 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3149
3150 /* This is a limit in the driver and could be eliminated. */
3151 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3152
3153 blk_queue_max_sectors(q, 512);
3154
3155 q->queuedata = hba[i];
3156 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3157 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3158 disk->major = hba[i]->major;
3159 disk->first_minor = j << NWD_SHIFT;
3160 disk->fops = &cciss_fops;
3161 disk->queue = q;
3162 disk->private_data = drv;
3163 /* we must register the controller even if no disks exist */
3164 /* this is for the online array utilities */
3165 if(!drv->heads && j)
3166 continue;
3167 blk_queue_hardsect_size(q, drv->block_size);
3168 set_capacity(disk, drv->nr_blocks);
3169 add_disk(disk);
3170 }
3171
3172 return(1);
3173
3174 clean4:
3175 #ifdef CONFIG_CISS_SCSI_TAPE
3176 if(hba[i]->scsi_rejects.complete)
3177 kfree(hba[i]->scsi_rejects.complete);
3178 #endif
3179 kfree(hba[i]->cmd_pool_bits);
3180 if(hba[i]->cmd_pool)
3181 pci_free_consistent(hba[i]->pdev,
3182 NR_CMDS * sizeof(CommandList_struct),
3183 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3184 if(hba[i]->errinfo_pool)
3185 pci_free_consistent(hba[i]->pdev,
3186 NR_CMDS * sizeof( ErrorInfo_struct),
3187 hba[i]->errinfo_pool,
3188 hba[i]->errinfo_pool_dhandle);
3189 free_irq(hba[i]->intr, hba[i]);
3190 clean2:
3191 unregister_blkdev(hba[i]->major, hba[i]->devname);
3192 clean1:
3193 release_io_mem(hba[i]);
3194 free_hba(i);
3195 hba[i]->busy_initializing = 0;
3196 return(-1);
3197 }
3198
3199 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3200 {
3201 ctlr_info_t *tmp_ptr;
3202 int i, j;
3203 char flush_buf[4];
3204 int return_code;
3205
3206 if (pci_get_drvdata(pdev) == NULL)
3207 {
3208 printk( KERN_ERR "cciss: Unable to remove device \n");
3209 return;
3210 }
3211 tmp_ptr = pci_get_drvdata(pdev);
3212 i = tmp_ptr->ctlr;
3213 if (hba[i] == NULL)
3214 {
3215 printk(KERN_ERR "cciss: device appears to "
3216 "already be removed \n");
3217 return;
3218 }
3219 /* Turn board interrupts off and send the flush cache command */
3220 /* sendcmd will turn off interrupt, and send the flush...
3221 * To write all data in the battery backed cache to disks */
3222 memset(flush_buf, 0, 4);
3223 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3224 TYPE_CMD);
3225 if(return_code != IO_OK)
3226 {
3227 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3228 i);
3229 }
3230 free_irq(hba[i]->intr, hba[i]);
3231 pci_set_drvdata(pdev, NULL);
3232 iounmap(hba[i]->vaddr);
3233 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3234 unregister_blkdev(hba[i]->major, hba[i]->devname);
3235 remove_proc_entry(hba[i]->devname, proc_cciss);
3236
3237 /* remove it from the disk list */
3238 for (j = 0; j < NWD; j++) {
3239 struct gendisk *disk = hba[i]->gendisk[j];
3240 if (disk) {
3241 request_queue_t *q = disk->queue;
3242
3243 if (disk->flags & GENHD_FL_UP)
3244 del_gendisk(disk);
3245 if (q)
3246 blk_cleanup_queue(q);
3247 }
3248 }
3249
3250 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3251 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3252 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3253 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3254 kfree(hba[i]->cmd_pool_bits);
3255 #ifdef CONFIG_CISS_SCSI_TAPE
3256 kfree(hba[i]->scsi_rejects.complete);
3257 #endif
3258 release_io_mem(hba[i]);
3259 free_hba(i);
3260 }
3261
3262 static struct pci_driver cciss_pci_driver = {
3263 .name = "cciss",
3264 .probe = cciss_init_one,
3265 .remove = __devexit_p(cciss_remove_one),
3266 .id_table = cciss_pci_device_id, /* id_table */
3267 };
3268
3269 /*
3270 * This is it. Register the PCI driver information for the cards we control
3271 * the OS will call our registered routines when it finds one of our cards.
3272 */
3273 static int __init cciss_init(void)
3274 {
3275 printk(KERN_INFO DRIVER_NAME "\n");
3276
3277 /* Register for our PCI devices */
3278 return pci_module_init(&cciss_pci_driver);
3279 }
3280
3281 static void __exit cciss_cleanup(void)
3282 {
3283 int i;
3284
3285 pci_unregister_driver(&cciss_pci_driver);
3286 /* double check that all controller entrys have been removed */
3287 for (i=0; i< MAX_CTLR; i++)
3288 {
3289 if (hba[i] != NULL)
3290 {
3291 printk(KERN_WARNING "cciss: had to remove"
3292 " controller %d\n", i);
3293 cciss_remove_one(hba[i]->pdev);
3294 }
3295 }
3296 remove_proc_entry("cciss", proc_root_driver);
3297 }
3298
3299 static void fail_all_cmds(unsigned long ctlr)
3300 {
3301 /* If we get here, the board is apparently dead. */
3302 ctlr_info_t *h = hba[ctlr];
3303 CommandList_struct *c;
3304 unsigned long flags;
3305
3306 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3307 h->alive = 0; /* the controller apparently died... */
3308
3309 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3310
3311 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3312
3313 /* move everything off the request queue onto the completed queue */
3314 while( (c = h->reqQ) != NULL ) {
3315 removeQ(&(h->reqQ), c);
3316 h->Qdepth--;
3317 addQ (&(h->cmpQ), c);
3318 }
3319
3320 /* Now, fail everything on the completed queue with a HW error */
3321 while( (c = h->cmpQ) != NULL ) {
3322 removeQ(&h->cmpQ, c);
3323 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3324 if (c->cmd_type == CMD_RWREQ) {
3325 complete_command(h, c, 0);
3326 } else if (c->cmd_type == CMD_IOCTL_PEND)
3327 complete(c->waiting);
3328 #ifdef CONFIG_CISS_SCSI_TAPE
3329 else if (c->cmd_type == CMD_SCSI)
3330 complete_scsi_command(c, 0, 0);
3331 #endif
3332 }
3333 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3334 return;
3335 }
3336
3337 module_init(cciss_init);
3338 module_exit(cciss_cleanup);