]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/block/cciss.c
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[mirror_ubuntu-bionic-kernel.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
75 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
85 {0,}
86 };
87
88 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
89
90 /* board_id = Subsystem Device ID & Vendor ID
91 * product = Marketing Name for the board
92 * access = Address of the struct of function pointers
93 */
94 static struct board_type products[] = {
95 {0x40700E11, "Smart Array 5300", &SA5_access},
96 {0x40800E11, "Smart Array 5i", &SA5B_access},
97 {0x40820E11, "Smart Array 532", &SA5B_access},
98 {0x40830E11, "Smart Array 5312", &SA5B_access},
99 {0x409A0E11, "Smart Array 641", &SA5_access},
100 {0x409B0E11, "Smart Array 642", &SA5_access},
101 {0x409C0E11, "Smart Array 6400", &SA5_access},
102 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
103 {0x40910E11, "Smart Array 6i", &SA5_access},
104 {0x3225103C, "Smart Array P600", &SA5_access},
105 {0x3223103C, "Smart Array P800", &SA5_access},
106 {0x3234103C, "Smart Array P400", &SA5_access},
107 {0x3235103C, "Smart Array P400i", &SA5_access},
108 {0x3211103C, "Smart Array E200i", &SA5_access},
109 {0x3212103C, "Smart Array E200", &SA5_access},
110 {0x3213103C, "Smart Array E200i", &SA5_access},
111 {0x3214103C, "Smart Array E200i", &SA5_access},
112 {0x3215103C, "Smart Array E200i", &SA5_access},
113 {0x3233103C, "Smart Array E500", &SA5_access},
114 };
115
116 /* How long to wait (in milliseconds) for board to go into simple mode */
117 #define MAX_CONFIG_WAIT 30000
118 #define MAX_IOCTL_CONFIG_WAIT 1000
119
120 /*define how many times we will try a command because of bus resets */
121 #define MAX_CMD_RETRIES 3
122
123 #define READ_AHEAD 1024
124 #define NR_CMDS 384 /* #commands that can be outstanding */
125 #define MAX_CTLR 32
126
127 /* Originally cciss driver only supports 8 major numbers */
128 #define MAX_CTLR_ORIG 8
129
130 static ctlr_info_t *hba[MAX_CTLR];
131
132 static void do_cciss_request(request_queue_t *q);
133 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
134 static int cciss_open(struct inode *inode, struct file *filep);
135 static int cciss_release(struct inode *inode, struct file *filep);
136 static int cciss_ioctl(struct inode *inode, struct file *filep,
137 unsigned int cmd, unsigned long arg);
138 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
139
140 static int revalidate_allvol(ctlr_info_t *host);
141 static int cciss_revalidate(struct gendisk *disk);
142 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
143 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
144 int clear_all);
145
146 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
147 sector_t *total_size, unsigned int *block_size);
148 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
149 sector_t *total_size, unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol,
151 int withirq, sector_t total_size,
152 unsigned int block_size, InquiryData_struct *inq_buff,
153 drive_info_struct *drv);
154 static void cciss_getgeometry(int cntl_num);
155 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
156 __u32);
157 static void start_io(ctlr_info_t *h);
158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
159 unsigned int use_unit_num, unsigned int log_unit,
160 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
161 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
162 unsigned int use_unit_num, unsigned int log_unit,
163 __u8 page_code, int cmd_type);
164
165 static void fail_all_cmds(unsigned long ctlr);
166
167 #ifdef CONFIG_PROC_FS
168 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
169 int length, int *eof, void *data);
170 static void cciss_procinit(int i);
171 #else
172 static void cciss_procinit(int i)
173 {
174 }
175 #endif /* CONFIG_PROC_FS */
176
177 #ifdef CONFIG_COMPAT
178 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
179 #endif
180
181 static struct block_device_operations cciss_fops = {
182 .owner = THIS_MODULE,
183 .open = cciss_open,
184 .release = cciss_release,
185 .ioctl = cciss_ioctl,
186 .getgeo = cciss_getgeo,
187 #ifdef CONFIG_COMPAT
188 .compat_ioctl = cciss_compat_ioctl,
189 #endif
190 .revalidate_disk = cciss_revalidate,
191 };
192
193 /*
194 * Enqueuing and dequeuing functions for cmdlists.
195 */
196 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
197 {
198 if (*Qptr == NULL) {
199 *Qptr = c;
200 c->next = c->prev = c;
201 } else {
202 c->prev = (*Qptr)->prev;
203 c->next = (*Qptr);
204 (*Qptr)->prev->next = c;
205 (*Qptr)->prev = c;
206 }
207 }
208
209 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
210 CommandList_struct *c)
211 {
212 if (c && c->next != c) {
213 if (*Qptr == c)
214 *Qptr = c->next;
215 c->prev->next = c->next;
216 c->next->prev = c->prev;
217 } else {
218 *Qptr = NULL;
219 }
220 return c;
221 }
222
223 #include "cciss_scsi.c" /* For SCSI tape support */
224
225 #ifdef CONFIG_PROC_FS
226
227 /*
228 * Report information about this controller.
229 */
230 #define ENG_GIG 1000000000
231 #define ENG_GIG_FACTOR (ENG_GIG/512)
232 #define RAID_UNKNOWN 6
233 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
234 "UNKNOWN"
235 };
236
237 static struct proc_dir_entry *proc_cciss;
238
239 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
240 int length, int *eof, void *data)
241 {
242 off_t pos = 0;
243 off_t len = 0;
244 int size, i, ctlr;
245 ctlr_info_t *h = (ctlr_info_t *) data;
246 drive_info_struct *drv;
247 unsigned long flags;
248 sector_t vol_sz, vol_sz_frac;
249
250 ctlr = h->ctlr;
251
252 /* prevent displaying bogus info during configuration
253 * or deconfiguration of a logical volume
254 */
255 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
256 if (h->busy_configuring) {
257 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
258 return -EBUSY;
259 }
260 h->busy_configuring = 1;
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
262
263 size = sprintf(buffer, "%s: HP %s Controller\n"
264 "Board ID: 0x%08lx\n"
265 "Firmware Version: %c%c%c%c\n"
266 "IRQ: %d\n"
267 "Logical drives: %d\n"
268 "Current Q depth: %d\n"
269 "Current # commands on controller: %d\n"
270 "Max Q depth since init: %d\n"
271 "Max # commands on controller since init: %d\n"
272 "Max SG entries since init: %d\n\n",
273 h->devname,
274 h->product_name,
275 (unsigned long)h->board_id,
276 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
277 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
278 h->num_luns, h->Qdepth, h->commands_outstanding,
279 h->maxQsinceinit, h->max_outstanding, h->maxSG);
280
281 pos += size;
282 len += size;
283 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
284 for (i = 0; i <= h->highest_lun; i++) {
285
286 drv = &h->drv[i];
287 if (drv->heads == 0)
288 continue;
289
290 vol_sz = drv->nr_blocks;
291 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
292 vol_sz_frac *= 100;
293 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
294
295 if (drv->raid_level > 5)
296 drv->raid_level = RAID_UNKNOWN;
297 size = sprintf(buffer + len, "cciss/c%dd%d:"
298 "\t%4u.%02uGB\tRAID %s\n",
299 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
300 raid_label[drv->raid_level]);
301 pos += size;
302 len += size;
303 }
304
305 *eof = 1;
306 *start = buffer + offset;
307 len -= offset;
308 if (len > length)
309 len = length;
310 h->busy_configuring = 0;
311 return len;
312 }
313
314 static int
315 cciss_proc_write(struct file *file, const char __user *buffer,
316 unsigned long count, void *data)
317 {
318 unsigned char cmd[80];
319 int len;
320 #ifdef CONFIG_CISS_SCSI_TAPE
321 ctlr_info_t *h = (ctlr_info_t *) data;
322 int rc;
323 #endif
324
325 if (count > sizeof(cmd) - 1)
326 return -EINVAL;
327 if (copy_from_user(cmd, buffer, count))
328 return -EFAULT;
329 cmd[count] = '\0';
330 len = strlen(cmd); // above 3 lines ensure safety
331 if (len && cmd[len - 1] == '\n')
332 cmd[--len] = '\0';
333 # ifdef CONFIG_CISS_SCSI_TAPE
334 if (strcmp("engage scsi", cmd) == 0) {
335 rc = cciss_engage_scsi(h->ctlr);
336 if (rc != 0)
337 return -rc;
338 return count;
339 }
340 /* might be nice to have "disengage" too, but it's not
341 safely possible. (only 1 module use count, lock issues.) */
342 # endif
343 return -EINVAL;
344 }
345
346 /*
347 * Get us a file in /proc/cciss that says something about each controller.
348 * Create /proc/cciss if it doesn't exist yet.
349 */
350 static void __devinit cciss_procinit(int i)
351 {
352 struct proc_dir_entry *pde;
353
354 if (proc_cciss == NULL) {
355 proc_cciss = proc_mkdir("cciss", proc_root_driver);
356 if (!proc_cciss)
357 return;
358 }
359
360 pde = create_proc_read_entry(hba[i]->devname,
361 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
362 proc_cciss, cciss_proc_get_info, hba[i]);
363 pde->write_proc = cciss_proc_write;
364 }
365 #endif /* CONFIG_PROC_FS */
366
367 /*
368 * For operations that cannot sleep, a command block is allocated at init,
369 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
370 * which ones are free or in use. For operations that can wait for kmalloc
371 * to possible sleep, this routine can be called with get_from_pool set to 0.
372 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
373 */
374 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
375 {
376 CommandList_struct *c;
377 int i;
378 u64bit temp64;
379 dma_addr_t cmd_dma_handle, err_dma_handle;
380
381 if (!get_from_pool) {
382 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
383 sizeof(CommandList_struct), &cmd_dma_handle);
384 if (c == NULL)
385 return NULL;
386 memset(c, 0, sizeof(CommandList_struct));
387
388 c->cmdindex = -1;
389
390 c->err_info = (ErrorInfo_struct *)
391 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
392 &err_dma_handle);
393
394 if (c->err_info == NULL) {
395 pci_free_consistent(h->pdev,
396 sizeof(CommandList_struct), c, cmd_dma_handle);
397 return NULL;
398 }
399 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
400 } else { /* get it out of the controllers pool */
401
402 do {
403 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
404 if (i == NR_CMDS)
405 return NULL;
406 } while (test_and_set_bit
407 (i & (BITS_PER_LONG - 1),
408 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
409 #ifdef CCISS_DEBUG
410 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
411 #endif
412 c = h->cmd_pool + i;
413 memset(c, 0, sizeof(CommandList_struct));
414 cmd_dma_handle = h->cmd_pool_dhandle
415 + i * sizeof(CommandList_struct);
416 c->err_info = h->errinfo_pool + i;
417 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
418 err_dma_handle = h->errinfo_pool_dhandle
419 + i * sizeof(ErrorInfo_struct);
420 h->nr_allocs++;
421
422 c->cmdindex = i;
423 }
424
425 c->busaddr = (__u32) cmd_dma_handle;
426 temp64.val = (__u64) err_dma_handle;
427 c->ErrDesc.Addr.lower = temp64.val32.lower;
428 c->ErrDesc.Addr.upper = temp64.val32.upper;
429 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
430
431 c->ctlr = h->ctlr;
432 return c;
433 }
434
435 /*
436 * Frees a command block that was previously allocated with cmd_alloc().
437 */
438 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
439 {
440 int i;
441 u64bit temp64;
442
443 if (!got_from_pool) {
444 temp64.val32.lower = c->ErrDesc.Addr.lower;
445 temp64.val32.upper = c->ErrDesc.Addr.upper;
446 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
447 c->err_info, (dma_addr_t) temp64.val);
448 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
449 c, (dma_addr_t) c->busaddr);
450 } else {
451 i = c - h->cmd_pool;
452 clear_bit(i & (BITS_PER_LONG - 1),
453 h->cmd_pool_bits + (i / BITS_PER_LONG));
454 h->nr_frees++;
455 }
456 }
457
458 static inline ctlr_info_t *get_host(struct gendisk *disk)
459 {
460 return disk->queue->queuedata;
461 }
462
463 static inline drive_info_struct *get_drv(struct gendisk *disk)
464 {
465 return disk->private_data;
466 }
467
468 /*
469 * Open. Make sure the device is really there.
470 */
471 static int cciss_open(struct inode *inode, struct file *filep)
472 {
473 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
474 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
475
476 #ifdef CCISS_DEBUG
477 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
478 #endif /* CCISS_DEBUG */
479
480 if (host->busy_initializing || drv->busy_configuring)
481 return -EBUSY;
482 /*
483 * Root is allowed to open raw volume zero even if it's not configured
484 * so array config can still work. Root is also allowed to open any
485 * volume that has a LUN ID, so it can issue IOCTL to reread the
486 * disk information. I don't think I really like this
487 * but I'm already using way to many device nodes to claim another one
488 * for "raw controller".
489 */
490 if (drv->nr_blocks == 0) {
491 if (iminor(inode) != 0) { /* not node 0? */
492 /* if not node 0 make sure it is a partition = 0 */
493 if (iminor(inode) & 0x0f) {
494 return -ENXIO;
495 /* if it is, make sure we have a LUN ID */
496 } else if (drv->LunID == 0) {
497 return -ENXIO;
498 }
499 }
500 if (!capable(CAP_SYS_ADMIN))
501 return -EPERM;
502 }
503 drv->usage_count++;
504 host->usage_count++;
505 return 0;
506 }
507
508 /*
509 * Close. Sync first.
510 */
511 static int cciss_release(struct inode *inode, struct file *filep)
512 {
513 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
514 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
515
516 #ifdef CCISS_DEBUG
517 printk(KERN_DEBUG "cciss_release %s\n",
518 inode->i_bdev->bd_disk->disk_name);
519 #endif /* CCISS_DEBUG */
520
521 drv->usage_count--;
522 host->usage_count--;
523 return 0;
524 }
525
526 #ifdef CONFIG_COMPAT
527
528 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
529 {
530 int ret;
531 lock_kernel();
532 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
533 unlock_kernel();
534 return ret;
535 }
536
537 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
538 unsigned long arg);
539 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
540 unsigned long arg);
541
542 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
543 {
544 switch (cmd) {
545 case CCISS_GETPCIINFO:
546 case CCISS_GETINTINFO:
547 case CCISS_SETINTINFO:
548 case CCISS_GETNODENAME:
549 case CCISS_SETNODENAME:
550 case CCISS_GETHEARTBEAT:
551 case CCISS_GETBUSTYPES:
552 case CCISS_GETFIRMVER:
553 case CCISS_GETDRIVVER:
554 case CCISS_REVALIDVOLS:
555 case CCISS_DEREGDISK:
556 case CCISS_REGNEWDISK:
557 case CCISS_REGNEWD:
558 case CCISS_RESCANDISK:
559 case CCISS_GETLUNINFO:
560 return do_ioctl(f, cmd, arg);
561
562 case CCISS_PASSTHRU32:
563 return cciss_ioctl32_passthru(f, cmd, arg);
564 case CCISS_BIG_PASSTHRU32:
565 return cciss_ioctl32_big_passthru(f, cmd, arg);
566
567 default:
568 return -ENOIOCTLCMD;
569 }
570 }
571
572 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
573 unsigned long arg)
574 {
575 IOCTL32_Command_struct __user *arg32 =
576 (IOCTL32_Command_struct __user *) arg;
577 IOCTL_Command_struct arg64;
578 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
579 int err;
580 u32 cp;
581
582 err = 0;
583 err |=
584 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
585 sizeof(arg64.LUN_info));
586 err |=
587 copy_from_user(&arg64.Request, &arg32->Request,
588 sizeof(arg64.Request));
589 err |=
590 copy_from_user(&arg64.error_info, &arg32->error_info,
591 sizeof(arg64.error_info));
592 err |= get_user(arg64.buf_size, &arg32->buf_size);
593 err |= get_user(cp, &arg32->buf);
594 arg64.buf = compat_ptr(cp);
595 err |= copy_to_user(p, &arg64, sizeof(arg64));
596
597 if (err)
598 return -EFAULT;
599
600 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
601 if (err)
602 return err;
603 err |=
604 copy_in_user(&arg32->error_info, &p->error_info,
605 sizeof(arg32->error_info));
606 if (err)
607 return -EFAULT;
608 return err;
609 }
610
611 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
612 unsigned long arg)
613 {
614 BIG_IOCTL32_Command_struct __user *arg32 =
615 (BIG_IOCTL32_Command_struct __user *) arg;
616 BIG_IOCTL_Command_struct arg64;
617 BIG_IOCTL_Command_struct __user *p =
618 compat_alloc_user_space(sizeof(arg64));
619 int err;
620 u32 cp;
621
622 err = 0;
623 err |=
624 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
625 sizeof(arg64.LUN_info));
626 err |=
627 copy_from_user(&arg64.Request, &arg32->Request,
628 sizeof(arg64.Request));
629 err |=
630 copy_from_user(&arg64.error_info, &arg32->error_info,
631 sizeof(arg64.error_info));
632 err |= get_user(arg64.buf_size, &arg32->buf_size);
633 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
634 err |= get_user(cp, &arg32->buf);
635 arg64.buf = compat_ptr(cp);
636 err |= copy_to_user(p, &arg64, sizeof(arg64));
637
638 if (err)
639 return -EFAULT;
640
641 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
642 if (err)
643 return err;
644 err |=
645 copy_in_user(&arg32->error_info, &p->error_info,
646 sizeof(arg32->error_info));
647 if (err)
648 return -EFAULT;
649 return err;
650 }
651 #endif
652
653 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
654 {
655 drive_info_struct *drv = get_drv(bdev->bd_disk);
656
657 if (!drv->cylinders)
658 return -ENXIO;
659
660 geo->heads = drv->heads;
661 geo->sectors = drv->sectors;
662 geo->cylinders = drv->cylinders;
663 return 0;
664 }
665
666 /*
667 * ioctl
668 */
669 static int cciss_ioctl(struct inode *inode, struct file *filep,
670 unsigned int cmd, unsigned long arg)
671 {
672 struct block_device *bdev = inode->i_bdev;
673 struct gendisk *disk = bdev->bd_disk;
674 ctlr_info_t *host = get_host(disk);
675 drive_info_struct *drv = get_drv(disk);
676 int ctlr = host->ctlr;
677 void __user *argp = (void __user *)arg;
678
679 #ifdef CCISS_DEBUG
680 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
681 #endif /* CCISS_DEBUG */
682
683 switch (cmd) {
684 case CCISS_GETPCIINFO:
685 {
686 cciss_pci_info_struct pciinfo;
687
688 if (!arg)
689 return -EINVAL;
690 pciinfo.domain = pci_domain_nr(host->pdev->bus);
691 pciinfo.bus = host->pdev->bus->number;
692 pciinfo.dev_fn = host->pdev->devfn;
693 pciinfo.board_id = host->board_id;
694 if (copy_to_user
695 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
696 return -EFAULT;
697 return 0;
698 }
699 case CCISS_GETINTINFO:
700 {
701 cciss_coalint_struct intinfo;
702 if (!arg)
703 return -EINVAL;
704 intinfo.delay =
705 readl(&host->cfgtable->HostWrite.CoalIntDelay);
706 intinfo.count =
707 readl(&host->cfgtable->HostWrite.CoalIntCount);
708 if (copy_to_user
709 (argp, &intinfo, sizeof(cciss_coalint_struct)))
710 return -EFAULT;
711 return 0;
712 }
713 case CCISS_SETINTINFO:
714 {
715 cciss_coalint_struct intinfo;
716 unsigned long flags;
717 int i;
718
719 if (!arg)
720 return -EINVAL;
721 if (!capable(CAP_SYS_ADMIN))
722 return -EPERM;
723 if (copy_from_user
724 (&intinfo, argp, sizeof(cciss_coalint_struct)))
725 return -EFAULT;
726 if ((intinfo.delay == 0) && (intinfo.count == 0))
727 {
728 // printk("cciss_ioctl: delay and count cannot be 0\n");
729 return -EINVAL;
730 }
731 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
732 /* Update the field, and then ring the doorbell */
733 writel(intinfo.delay,
734 &(host->cfgtable->HostWrite.CoalIntDelay));
735 writel(intinfo.count,
736 &(host->cfgtable->HostWrite.CoalIntCount));
737 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
738
739 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
740 if (!(readl(host->vaddr + SA5_DOORBELL)
741 & CFGTBL_ChangeReq))
742 break;
743 /* delay and try again */
744 udelay(1000);
745 }
746 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
747 if (i >= MAX_IOCTL_CONFIG_WAIT)
748 return -EAGAIN;
749 return 0;
750 }
751 case CCISS_GETNODENAME:
752 {
753 NodeName_type NodeName;
754 int i;
755
756 if (!arg)
757 return -EINVAL;
758 for (i = 0; i < 16; i++)
759 NodeName[i] =
760 readb(&host->cfgtable->ServerName[i]);
761 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
762 return -EFAULT;
763 return 0;
764 }
765 case CCISS_SETNODENAME:
766 {
767 NodeName_type NodeName;
768 unsigned long flags;
769 int i;
770
771 if (!arg)
772 return -EINVAL;
773 if (!capable(CAP_SYS_ADMIN))
774 return -EPERM;
775
776 if (copy_from_user
777 (NodeName, argp, sizeof(NodeName_type)))
778 return -EFAULT;
779
780 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
781
782 /* Update the field, and then ring the doorbell */
783 for (i = 0; i < 16; i++)
784 writeb(NodeName[i],
785 &host->cfgtable->ServerName[i]);
786
787 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
788
789 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
790 if (!(readl(host->vaddr + SA5_DOORBELL)
791 & CFGTBL_ChangeReq))
792 break;
793 /* delay and try again */
794 udelay(1000);
795 }
796 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
797 if (i >= MAX_IOCTL_CONFIG_WAIT)
798 return -EAGAIN;
799 return 0;
800 }
801
802 case CCISS_GETHEARTBEAT:
803 {
804 Heartbeat_type heartbeat;
805
806 if (!arg)
807 return -EINVAL;
808 heartbeat = readl(&host->cfgtable->HeartBeat);
809 if (copy_to_user
810 (argp, &heartbeat, sizeof(Heartbeat_type)))
811 return -EFAULT;
812 return 0;
813 }
814 case CCISS_GETBUSTYPES:
815 {
816 BusTypes_type BusTypes;
817
818 if (!arg)
819 return -EINVAL;
820 BusTypes = readl(&host->cfgtable->BusTypes);
821 if (copy_to_user
822 (argp, &BusTypes, sizeof(BusTypes_type)))
823 return -EFAULT;
824 return 0;
825 }
826 case CCISS_GETFIRMVER:
827 {
828 FirmwareVer_type firmware;
829
830 if (!arg)
831 return -EINVAL;
832 memcpy(firmware, host->firm_ver, 4);
833
834 if (copy_to_user
835 (argp, firmware, sizeof(FirmwareVer_type)))
836 return -EFAULT;
837 return 0;
838 }
839 case CCISS_GETDRIVVER:
840 {
841 DriverVer_type DriverVer = DRIVER_VERSION;
842
843 if (!arg)
844 return -EINVAL;
845
846 if (copy_to_user
847 (argp, &DriverVer, sizeof(DriverVer_type)))
848 return -EFAULT;
849 return 0;
850 }
851
852 case CCISS_REVALIDVOLS:
853 if (bdev != bdev->bd_contains || drv != host->drv)
854 return -ENXIO;
855 return revalidate_allvol(host);
856
857 case CCISS_GETLUNINFO:{
858 LogvolInfo_struct luninfo;
859
860 luninfo.LunID = drv->LunID;
861 luninfo.num_opens = drv->usage_count;
862 luninfo.num_parts = 0;
863 if (copy_to_user(argp, &luninfo,
864 sizeof(LogvolInfo_struct)))
865 return -EFAULT;
866 return 0;
867 }
868 case CCISS_DEREGDISK:
869 return rebuild_lun_table(host, disk);
870
871 case CCISS_REGNEWD:
872 return rebuild_lun_table(host, NULL);
873
874 case CCISS_PASSTHRU:
875 {
876 IOCTL_Command_struct iocommand;
877 CommandList_struct *c;
878 char *buff = NULL;
879 u64bit temp64;
880 unsigned long flags;
881 DECLARE_COMPLETION_ONSTACK(wait);
882
883 if (!arg)
884 return -EINVAL;
885
886 if (!capable(CAP_SYS_RAWIO))
887 return -EPERM;
888
889 if (copy_from_user
890 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
891 return -EFAULT;
892 if ((iocommand.buf_size < 1) &&
893 (iocommand.Request.Type.Direction != XFER_NONE)) {
894 return -EINVAL;
895 }
896 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
897 /* Check kmalloc limits */
898 if (iocommand.buf_size > 128000)
899 return -EINVAL;
900 #endif
901 if (iocommand.buf_size > 0) {
902 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
903 if (buff == NULL)
904 return -EFAULT;
905 }
906 if (iocommand.Request.Type.Direction == XFER_WRITE) {
907 /* Copy the data into the buffer we created */
908 if (copy_from_user
909 (buff, iocommand.buf, iocommand.buf_size)) {
910 kfree(buff);
911 return -EFAULT;
912 }
913 } else {
914 memset(buff, 0, iocommand.buf_size);
915 }
916 if ((c = cmd_alloc(host, 0)) == NULL) {
917 kfree(buff);
918 return -ENOMEM;
919 }
920 // Fill in the command type
921 c->cmd_type = CMD_IOCTL_PEND;
922 // Fill in Command Header
923 c->Header.ReplyQueue = 0; // unused in simple mode
924 if (iocommand.buf_size > 0) // buffer to fill
925 {
926 c->Header.SGList = 1;
927 c->Header.SGTotal = 1;
928 } else // no buffers to fill
929 {
930 c->Header.SGList = 0;
931 c->Header.SGTotal = 0;
932 }
933 c->Header.LUN = iocommand.LUN_info;
934 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
935
936 // Fill in Request block
937 c->Request = iocommand.Request;
938
939 // Fill in the scatter gather information
940 if (iocommand.buf_size > 0) {
941 temp64.val = pci_map_single(host->pdev, buff,
942 iocommand.buf_size,
943 PCI_DMA_BIDIRECTIONAL);
944 c->SG[0].Addr.lower = temp64.val32.lower;
945 c->SG[0].Addr.upper = temp64.val32.upper;
946 c->SG[0].Len = iocommand.buf_size;
947 c->SG[0].Ext = 0; // we are not chaining
948 }
949 c->waiting = &wait;
950
951 /* Put the request on the tail of the request queue */
952 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
953 addQ(&host->reqQ, c);
954 host->Qdepth++;
955 start_io(host);
956 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
957
958 wait_for_completion(&wait);
959
960 /* unlock the buffers from DMA */
961 temp64.val32.lower = c->SG[0].Addr.lower;
962 temp64.val32.upper = c->SG[0].Addr.upper;
963 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
964 iocommand.buf_size,
965 PCI_DMA_BIDIRECTIONAL);
966
967 /* Copy the error information out */
968 iocommand.error_info = *(c->err_info);
969 if (copy_to_user
970 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
971 kfree(buff);
972 cmd_free(host, c, 0);
973 return -EFAULT;
974 }
975
976 if (iocommand.Request.Type.Direction == XFER_READ) {
977 /* Copy the data out of the buffer we created */
978 if (copy_to_user
979 (iocommand.buf, buff, iocommand.buf_size)) {
980 kfree(buff);
981 cmd_free(host, c, 0);
982 return -EFAULT;
983 }
984 }
985 kfree(buff);
986 cmd_free(host, c, 0);
987 return 0;
988 }
989 case CCISS_BIG_PASSTHRU:{
990 BIG_IOCTL_Command_struct *ioc;
991 CommandList_struct *c;
992 unsigned char **buff = NULL;
993 int *buff_size = NULL;
994 u64bit temp64;
995 unsigned long flags;
996 BYTE sg_used = 0;
997 int status = 0;
998 int i;
999 DECLARE_COMPLETION_ONSTACK(wait);
1000 __u32 left;
1001 __u32 sz;
1002 BYTE __user *data_ptr;
1003
1004 if (!arg)
1005 return -EINVAL;
1006 if (!capable(CAP_SYS_RAWIO))
1007 return -EPERM;
1008 ioc = (BIG_IOCTL_Command_struct *)
1009 kmalloc(sizeof(*ioc), GFP_KERNEL);
1010 if (!ioc) {
1011 status = -ENOMEM;
1012 goto cleanup1;
1013 }
1014 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1015 status = -EFAULT;
1016 goto cleanup1;
1017 }
1018 if ((ioc->buf_size < 1) &&
1019 (ioc->Request.Type.Direction != XFER_NONE)) {
1020 status = -EINVAL;
1021 goto cleanup1;
1022 }
1023 /* Check kmalloc limits using all SGs */
1024 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1025 status = -EINVAL;
1026 goto cleanup1;
1027 }
1028 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1029 status = -EINVAL;
1030 goto cleanup1;
1031 }
1032 buff =
1033 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1034 if (!buff) {
1035 status = -ENOMEM;
1036 goto cleanup1;
1037 }
1038 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1039 GFP_KERNEL);
1040 if (!buff_size) {
1041 status = -ENOMEM;
1042 goto cleanup1;
1043 }
1044 left = ioc->buf_size;
1045 data_ptr = ioc->buf;
1046 while (left) {
1047 sz = (left >
1048 ioc->malloc_size) ? ioc->
1049 malloc_size : left;
1050 buff_size[sg_used] = sz;
1051 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1052 if (buff[sg_used] == NULL) {
1053 status = -ENOMEM;
1054 goto cleanup1;
1055 }
1056 if (ioc->Request.Type.Direction == XFER_WRITE) {
1057 if (copy_from_user
1058 (buff[sg_used], data_ptr, sz)) {
1059 status = -ENOMEM;
1060 goto cleanup1;
1061 }
1062 } else {
1063 memset(buff[sg_used], 0, sz);
1064 }
1065 left -= sz;
1066 data_ptr += sz;
1067 sg_used++;
1068 }
1069 if ((c = cmd_alloc(host, 0)) == NULL) {
1070 status = -ENOMEM;
1071 goto cleanup1;
1072 }
1073 c->cmd_type = CMD_IOCTL_PEND;
1074 c->Header.ReplyQueue = 0;
1075
1076 if (ioc->buf_size > 0) {
1077 c->Header.SGList = sg_used;
1078 c->Header.SGTotal = sg_used;
1079 } else {
1080 c->Header.SGList = 0;
1081 c->Header.SGTotal = 0;
1082 }
1083 c->Header.LUN = ioc->LUN_info;
1084 c->Header.Tag.lower = c->busaddr;
1085
1086 c->Request = ioc->Request;
1087 if (ioc->buf_size > 0) {
1088 int i;
1089 for (i = 0; i < sg_used; i++) {
1090 temp64.val =
1091 pci_map_single(host->pdev, buff[i],
1092 buff_size[i],
1093 PCI_DMA_BIDIRECTIONAL);
1094 c->SG[i].Addr.lower =
1095 temp64.val32.lower;
1096 c->SG[i].Addr.upper =
1097 temp64.val32.upper;
1098 c->SG[i].Len = buff_size[i];
1099 c->SG[i].Ext = 0; /* we are not chaining */
1100 }
1101 }
1102 c->waiting = &wait;
1103 /* Put the request on the tail of the request queue */
1104 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1105 addQ(&host->reqQ, c);
1106 host->Qdepth++;
1107 start_io(host);
1108 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1109 wait_for_completion(&wait);
1110 /* unlock the buffers from DMA */
1111 for (i = 0; i < sg_used; i++) {
1112 temp64.val32.lower = c->SG[i].Addr.lower;
1113 temp64.val32.upper = c->SG[i].Addr.upper;
1114 pci_unmap_single(host->pdev,
1115 (dma_addr_t) temp64.val, buff_size[i],
1116 PCI_DMA_BIDIRECTIONAL);
1117 }
1118 /* Copy the error information out */
1119 ioc->error_info = *(c->err_info);
1120 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1121 cmd_free(host, c, 0);
1122 status = -EFAULT;
1123 goto cleanup1;
1124 }
1125 if (ioc->Request.Type.Direction == XFER_READ) {
1126 /* Copy the data out of the buffer we created */
1127 BYTE __user *ptr = ioc->buf;
1128 for (i = 0; i < sg_used; i++) {
1129 if (copy_to_user
1130 (ptr, buff[i], buff_size[i])) {
1131 cmd_free(host, c, 0);
1132 status = -EFAULT;
1133 goto cleanup1;
1134 }
1135 ptr += buff_size[i];
1136 }
1137 }
1138 cmd_free(host, c, 0);
1139 status = 0;
1140 cleanup1:
1141 if (buff) {
1142 for (i = 0; i < sg_used; i++)
1143 kfree(buff[i]);
1144 kfree(buff);
1145 }
1146 kfree(buff_size);
1147 kfree(ioc);
1148 return status;
1149 }
1150 default:
1151 return -ENOTTY;
1152 }
1153 }
1154
1155 /*
1156 * revalidate_allvol is for online array config utilities. After a
1157 * utility reconfigures the drives in the array, it can use this function
1158 * (through an ioctl) to make the driver zap any previous disk structs for
1159 * that controller and get new ones.
1160 *
1161 * Right now I'm using the getgeometry() function to do this, but this
1162 * function should probably be finer grained and allow you to revalidate one
1163 * particular logical volume (instead of all of them on a particular
1164 * controller).
1165 */
1166 static int revalidate_allvol(ctlr_info_t *host)
1167 {
1168 int ctlr = host->ctlr, i;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1172 if (host->usage_count > 1) {
1173 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1174 printk(KERN_WARNING "cciss: Device busy for volume"
1175 " revalidation (usage=%d)\n", host->usage_count);
1176 return -EBUSY;
1177 }
1178 host->usage_count++;
1179 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1180
1181 for (i = 0; i < NWD; i++) {
1182 struct gendisk *disk = host->gendisk[i];
1183 if (disk) {
1184 request_queue_t *q = disk->queue;
1185
1186 if (disk->flags & GENHD_FL_UP)
1187 del_gendisk(disk);
1188 if (q)
1189 blk_cleanup_queue(q);
1190 }
1191 }
1192
1193 /*
1194 * Set the partition and block size structures for all volumes
1195 * on this controller to zero. We will reread all of this data
1196 */
1197 memset(host->drv, 0, sizeof(drive_info_struct)
1198 * CISS_MAX_LUN);
1199 /*
1200 * Tell the array controller not to give us any interrupts while
1201 * we check the new geometry. Then turn interrupts back on when
1202 * we're done.
1203 */
1204 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1205 cciss_getgeometry(ctlr);
1206 host->access.set_intr_mask(host, CCISS_INTR_ON);
1207
1208 /* Loop through each real device */
1209 for (i = 0; i < NWD; i++) {
1210 struct gendisk *disk = host->gendisk[i];
1211 drive_info_struct *drv = &(host->drv[i]);
1212 /* we must register the controller even if no disks exist */
1213 /* this is for the online array utilities */
1214 if (!drv->heads && i)
1215 continue;
1216 blk_queue_hardsect_size(drv->queue, drv->block_size);
1217 set_capacity(disk, drv->nr_blocks);
1218 add_disk(disk);
1219 }
1220 host->usage_count--;
1221 return 0;
1222 }
1223
1224 static inline void complete_buffers(struct bio *bio, int status)
1225 {
1226 while (bio) {
1227 struct bio *xbh = bio->bi_next;
1228 int nr_sectors = bio_sectors(bio);
1229
1230 bio->bi_next = NULL;
1231 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1232 bio = xbh;
1233 }
1234 }
1235
1236 static void cciss_check_queues(ctlr_info_t *h)
1237 {
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278 }
1279
1280 static void cciss_softirq_done(struct request *rq)
1281 {
1282 CommandList_struct *cmd = rq->completion_data;
1283 ctlr_info_t *h = hba[cmd->ctlr];
1284 unsigned long flags;
1285 u64bit temp64;
1286 int i, ddir;
1287
1288 if (cmd->Request.Type.Direction == XFER_READ)
1289 ddir = PCI_DMA_FROMDEVICE;
1290 else
1291 ddir = PCI_DMA_TODEVICE;
1292
1293 /* command did not need to be retried */
1294 /* unmap the DMA mapping for all the scatter gather elements */
1295 for (i = 0; i < cmd->Header.SGList; i++) {
1296 temp64.val32.lower = cmd->SG[i].Addr.lower;
1297 temp64.val32.upper = cmd->SG[i].Addr.upper;
1298 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1299 }
1300
1301 complete_buffers(rq->bio, rq->errors);
1302
1303 #ifdef CCISS_DEBUG
1304 printk("Done with %p\n", rq);
1305 #endif /* CCISS_DEBUG */
1306
1307 add_disk_randomness(rq->rq_disk);
1308 spin_lock_irqsave(&h->lock, flags);
1309 end_that_request_last(rq, rq->errors);
1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1312 spin_unlock_irqrestore(&h->lock, flags);
1313 }
1314
1315 /* This function will check the usage_count of the drive to be updated/added.
1316 * If the usage_count is zero then the drive information will be updated and
1317 * the disk will be re-registered with the kernel. If not then it will be
1318 * left alone for the next reboot. The exception to this is disk 0 which
1319 * will always be left registered with the kernel since it is also the
1320 * controller node. Any changes to disk 0 will show up on the next
1321 * reboot.
1322 */
1323 static void cciss_update_drive_info(int ctlr, int drv_index)
1324 {
1325 ctlr_info_t *h = hba[ctlr];
1326 struct gendisk *disk;
1327 InquiryData_struct *inq_buff = NULL;
1328 unsigned int block_size;
1329 sector_t total_size;
1330 unsigned long flags = 0;
1331 int ret = 0;
1332
1333 /* if the disk already exists then deregister it before proceeding */
1334 if (h->drv[drv_index].raid_level != -1) {
1335 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1336 h->drv[drv_index].busy_configuring = 1;
1337 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1338 ret = deregister_disk(h->gendisk[drv_index],
1339 &h->drv[drv_index], 0);
1340 h->drv[drv_index].busy_configuring = 0;
1341 }
1342
1343 /* If the disk is in use return */
1344 if (ret)
1345 return;
1346
1347 /* Get information about the disk and modify the driver structure */
1348 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1349 if (inq_buff == NULL)
1350 goto mem_msg;
1351
1352 cciss_read_capacity(ctlr, drv_index, 1,
1353 &total_size, &block_size);
1354
1355 /* total size = last LBA + 1 */
1356 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1357 /* so we assume this volume this must be >2TB in size */
1358 if (total_size == (__u32) 0) {
1359 cciss_read_capacity_16(ctlr, drv_index, 1,
1360 &total_size, &block_size);
1361 h->cciss_read = CCISS_READ_16;
1362 h->cciss_write = CCISS_WRITE_16;
1363 } else {
1364 h->cciss_read = CCISS_READ_10;
1365 h->cciss_write = CCISS_WRITE_10;
1366 }
1367 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1368 inq_buff, &h->drv[drv_index]);
1369
1370 ++h->num_luns;
1371 disk = h->gendisk[drv_index];
1372 set_capacity(disk, h->drv[drv_index].nr_blocks);
1373
1374 /* if it's the controller it's already added */
1375 if (drv_index) {
1376 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1377
1378 /* Set up queue information */
1379 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1380 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1381
1382 /* This is a hardware imposed limit. */
1383 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1384
1385 /* This is a limit in the driver and could be eliminated. */
1386 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1387
1388 blk_queue_max_sectors(disk->queue, 512);
1389
1390 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1391
1392 disk->queue->queuedata = hba[ctlr];
1393
1394 blk_queue_hardsect_size(disk->queue,
1395 hba[ctlr]->drv[drv_index].block_size);
1396
1397 h->drv[drv_index].queue = disk->queue;
1398 add_disk(disk);
1399 }
1400
1401 freeret:
1402 kfree(inq_buff);
1403 return;
1404 mem_msg:
1405 printk(KERN_ERR "cciss: out of memory\n");
1406 goto freeret;
1407 }
1408
1409 /* This function will find the first index of the controllers drive array
1410 * that has a -1 for the raid_level and will return that index. This is
1411 * where new drives will be added. If the index to be returned is greater
1412 * than the highest_lun index for the controller then highest_lun is set
1413 * to this new index. If there are no available indexes then -1 is returned.
1414 */
1415 static int cciss_find_free_drive_index(int ctlr)
1416 {
1417 int i;
1418
1419 for (i = 0; i < CISS_MAX_LUN; i++) {
1420 if (hba[ctlr]->drv[i].raid_level == -1) {
1421 if (i > hba[ctlr]->highest_lun)
1422 hba[ctlr]->highest_lun = i;
1423 return i;
1424 }
1425 }
1426 return -1;
1427 }
1428
1429 /* This function will add and remove logical drives from the Logical
1430 * drive array of the controller and maintain persistency of ordering
1431 * so that mount points are preserved until the next reboot. This allows
1432 * for the removal of logical drives in the middle of the drive array
1433 * without a re-ordering of those drives.
1434 * INPUT
1435 * h = The controller to perform the operations on
1436 * del_disk = The disk to remove if specified. If the value given
1437 * is NULL then no disk is removed.
1438 */
1439 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1440 {
1441 int ctlr = h->ctlr;
1442 int num_luns;
1443 ReportLunData_struct *ld_buff = NULL;
1444 drive_info_struct *drv = NULL;
1445 int return_code;
1446 int listlength = 0;
1447 int i;
1448 int drv_found;
1449 int drv_index = 0;
1450 __u32 lunid = 0;
1451 unsigned long flags;
1452
1453 /* Set busy_configuring flag for this operation */
1454 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1455 if (h->num_luns >= CISS_MAX_LUN) {
1456 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1457 return -EINVAL;
1458 }
1459
1460 if (h->busy_configuring) {
1461 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1462 return -EBUSY;
1463 }
1464 h->busy_configuring = 1;
1465
1466 /* if del_disk is NULL then we are being called to add a new disk
1467 * and update the logical drive table. If it is not NULL then
1468 * we will check if the disk is in use or not.
1469 */
1470 if (del_disk != NULL) {
1471 drv = get_drv(del_disk);
1472 drv->busy_configuring = 1;
1473 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1474 return_code = deregister_disk(del_disk, drv, 1);
1475 drv->busy_configuring = 0;
1476 h->busy_configuring = 0;
1477 return return_code;
1478 } else {
1479 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1480 if (!capable(CAP_SYS_RAWIO))
1481 return -EPERM;
1482
1483 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1484 if (ld_buff == NULL)
1485 goto mem_msg;
1486
1487 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1488 sizeof(ReportLunData_struct), 0,
1489 0, 0, TYPE_CMD);
1490
1491 if (return_code == IO_OK) {
1492 listlength |=
1493 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1494 << 24;
1495 listlength |=
1496 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1497 << 16;
1498 listlength |=
1499 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1500 << 8;
1501 listlength |=
1502 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1503 } else { /* reading number of logical volumes failed */
1504 printk(KERN_WARNING "cciss: report logical volume"
1505 " command failed\n");
1506 listlength = 0;
1507 goto freeret;
1508 }
1509
1510 num_luns = listlength / 8; /* 8 bytes per entry */
1511 if (num_luns > CISS_MAX_LUN) {
1512 num_luns = CISS_MAX_LUN;
1513 printk(KERN_WARNING "cciss: more luns configured"
1514 " on controller than can be handled by"
1515 " this driver.\n");
1516 }
1517
1518 /* Compare controller drive array to drivers drive array.
1519 * Check for updates in the drive information and any new drives
1520 * on the controller.
1521 */
1522 for (i = 0; i < num_luns; i++) {
1523 int j;
1524
1525 drv_found = 0;
1526
1527 lunid = (0xff &
1528 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1529 lunid |= (0xff &
1530 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1531 lunid |= (0xff &
1532 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1533 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1534
1535 /* Find if the LUN is already in the drive array
1536 * of the controller. If so then update its info
1537 * if not is use. If it does not exist then find
1538 * the first free index and add it.
1539 */
1540 for (j = 0; j <= h->highest_lun; j++) {
1541 if (h->drv[j].LunID == lunid) {
1542 drv_index = j;
1543 drv_found = 1;
1544 }
1545 }
1546
1547 /* check if the drive was found already in the array */
1548 if (!drv_found) {
1549 drv_index = cciss_find_free_drive_index(ctlr);
1550 if (drv_index == -1)
1551 goto freeret;
1552
1553 }
1554 h->drv[drv_index].LunID = lunid;
1555 cciss_update_drive_info(ctlr, drv_index);
1556 } /* end for */
1557 } /* end else */
1558
1559 freeret:
1560 kfree(ld_buff);
1561 h->busy_configuring = 0;
1562 /* We return -1 here to tell the ACU that we have registered/updated
1563 * all of the drives that we can and to keep it from calling us
1564 * additional times.
1565 */
1566 return -1;
1567 mem_msg:
1568 printk(KERN_ERR "cciss: out of memory\n");
1569 goto freeret;
1570 }
1571
1572 /* This function will deregister the disk and it's queue from the
1573 * kernel. It must be called with the controller lock held and the
1574 * drv structures busy_configuring flag set. It's parameters are:
1575 *
1576 * disk = This is the disk to be deregistered
1577 * drv = This is the drive_info_struct associated with the disk to be
1578 * deregistered. It contains information about the disk used
1579 * by the driver.
1580 * clear_all = This flag determines whether or not the disk information
1581 * is going to be completely cleared out and the highest_lun
1582 * reset. Sometimes we want to clear out information about
1583 * the disk in preparation for re-adding it. In this case
1584 * the highest_lun should be left unchanged and the LunID
1585 * should not be cleared.
1586 */
1587 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1588 int clear_all)
1589 {
1590 ctlr_info_t *h = get_host(disk);
1591
1592 if (!capable(CAP_SYS_RAWIO))
1593 return -EPERM;
1594
1595 /* make sure logical volume is NOT is use */
1596 if (clear_all || (h->gendisk[0] == disk)) {
1597 if (drv->usage_count > 1)
1598 return -EBUSY;
1599 } else if (drv->usage_count > 0)
1600 return -EBUSY;
1601
1602 /* invalidate the devices and deregister the disk. If it is disk
1603 * zero do not deregister it but just zero out it's values. This
1604 * allows us to delete disk zero but keep the controller registered.
1605 */
1606 if (h->gendisk[0] != disk) {
1607 if (disk) {
1608 request_queue_t *q = disk->queue;
1609 if (disk->flags & GENHD_FL_UP)
1610 del_gendisk(disk);
1611 if (q) {
1612 blk_cleanup_queue(q);
1613 drv->queue = NULL;
1614 }
1615 }
1616 }
1617
1618 --h->num_luns;
1619 /* zero out the disk size info */
1620 drv->nr_blocks = 0;
1621 drv->block_size = 0;
1622 drv->heads = 0;
1623 drv->sectors = 0;
1624 drv->cylinders = 0;
1625 drv->raid_level = -1; /* This can be used as a flag variable to
1626 * indicate that this element of the drive
1627 * array is free.
1628 */
1629
1630 if (clear_all) {
1631 /* check to see if it was the last disk */
1632 if (drv == h->drv + h->highest_lun) {
1633 /* if so, find the new hightest lun */
1634 int i, newhighest = -1;
1635 for (i = 0; i < h->highest_lun; i++) {
1636 /* if the disk has size > 0, it is available */
1637 if (h->drv[i].heads)
1638 newhighest = i;
1639 }
1640 h->highest_lun = newhighest;
1641 }
1642
1643 drv->LunID = 0;
1644 }
1645 return 0;
1646 }
1647
1648 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1649 1: address logical volume log_unit,
1650 2: periph device address is scsi3addr */
1651 unsigned int log_unit, __u8 page_code,
1652 unsigned char *scsi3addr, int cmd_type)
1653 {
1654 ctlr_info_t *h = hba[ctlr];
1655 u64bit buff_dma_handle;
1656 int status = IO_OK;
1657
1658 c->cmd_type = CMD_IOCTL_PEND;
1659 c->Header.ReplyQueue = 0;
1660 if (buff != NULL) {
1661 c->Header.SGList = 1;
1662 c->Header.SGTotal = 1;
1663 } else {
1664 c->Header.SGList = 0;
1665 c->Header.SGTotal = 0;
1666 }
1667 c->Header.Tag.lower = c->busaddr;
1668
1669 c->Request.Type.Type = cmd_type;
1670 if (cmd_type == TYPE_CMD) {
1671 switch (cmd) {
1672 case CISS_INQUIRY:
1673 /* If the logical unit number is 0 then, this is going
1674 to controller so It's a physical command
1675 mode = 0 target = 0. So we have nothing to write.
1676 otherwise, if use_unit_num == 1,
1677 mode = 1(volume set addressing) target = LUNID
1678 otherwise, if use_unit_num == 2,
1679 mode = 0(periph dev addr) target = scsi3addr */
1680 if (use_unit_num == 1) {
1681 c->Header.LUN.LogDev.VolId =
1682 h->drv[log_unit].LunID;
1683 c->Header.LUN.LogDev.Mode = 1;
1684 } else if (use_unit_num == 2) {
1685 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1686 8);
1687 c->Header.LUN.LogDev.Mode = 0;
1688 }
1689 /* are we trying to read a vital product page */
1690 if (page_code != 0) {
1691 c->Request.CDB[1] = 0x01;
1692 c->Request.CDB[2] = page_code;
1693 }
1694 c->Request.CDBLen = 6;
1695 c->Request.Type.Attribute = ATTR_SIMPLE;
1696 c->Request.Type.Direction = XFER_READ;
1697 c->Request.Timeout = 0;
1698 c->Request.CDB[0] = CISS_INQUIRY;
1699 c->Request.CDB[4] = size & 0xFF;
1700 break;
1701 case CISS_REPORT_LOG:
1702 case CISS_REPORT_PHYS:
1703 /* Talking to controller so It's a physical command
1704 mode = 00 target = 0. Nothing to write.
1705 */
1706 c->Request.CDBLen = 12;
1707 c->Request.Type.Attribute = ATTR_SIMPLE;
1708 c->Request.Type.Direction = XFER_READ;
1709 c->Request.Timeout = 0;
1710 c->Request.CDB[0] = cmd;
1711 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1712 c->Request.CDB[7] = (size >> 16) & 0xFF;
1713 c->Request.CDB[8] = (size >> 8) & 0xFF;
1714 c->Request.CDB[9] = size & 0xFF;
1715 break;
1716
1717 case CCISS_READ_CAPACITY:
1718 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1719 c->Header.LUN.LogDev.Mode = 1;
1720 c->Request.CDBLen = 10;
1721 c->Request.Type.Attribute = ATTR_SIMPLE;
1722 c->Request.Type.Direction = XFER_READ;
1723 c->Request.Timeout = 0;
1724 c->Request.CDB[0] = cmd;
1725 break;
1726 case CCISS_READ_CAPACITY_16:
1727 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1728 c->Header.LUN.LogDev.Mode = 1;
1729 c->Request.CDBLen = 16;
1730 c->Request.Type.Attribute = ATTR_SIMPLE;
1731 c->Request.Type.Direction = XFER_READ;
1732 c->Request.Timeout = 0;
1733 c->Request.CDB[0] = cmd;
1734 c->Request.CDB[1] = 0x10;
1735 c->Request.CDB[10] = (size >> 24) & 0xFF;
1736 c->Request.CDB[11] = (size >> 16) & 0xFF;
1737 c->Request.CDB[12] = (size >> 8) & 0xFF;
1738 c->Request.CDB[13] = size & 0xFF;
1739 c->Request.Timeout = 0;
1740 c->Request.CDB[0] = cmd;
1741 break;
1742 case CCISS_CACHE_FLUSH:
1743 c->Request.CDBLen = 12;
1744 c->Request.Type.Attribute = ATTR_SIMPLE;
1745 c->Request.Type.Direction = XFER_WRITE;
1746 c->Request.Timeout = 0;
1747 c->Request.CDB[0] = BMIC_WRITE;
1748 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1749 break;
1750 default:
1751 printk(KERN_WARNING
1752 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1753 return IO_ERROR;
1754 }
1755 } else if (cmd_type == TYPE_MSG) {
1756 switch (cmd) {
1757 case 0: /* ABORT message */
1758 c->Request.CDBLen = 12;
1759 c->Request.Type.Attribute = ATTR_SIMPLE;
1760 c->Request.Type.Direction = XFER_WRITE;
1761 c->Request.Timeout = 0;
1762 c->Request.CDB[0] = cmd; /* abort */
1763 c->Request.CDB[1] = 0; /* abort a command */
1764 /* buff contains the tag of the command to abort */
1765 memcpy(&c->Request.CDB[4], buff, 8);
1766 break;
1767 case 1: /* RESET message */
1768 c->Request.CDBLen = 12;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_WRITE;
1771 c->Request.Timeout = 0;
1772 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1773 c->Request.CDB[0] = cmd; /* reset */
1774 c->Request.CDB[1] = 0x04; /* reset a LUN */
1775 break;
1776 case 3: /* No-Op message */
1777 c->Request.CDBLen = 1;
1778 c->Request.Type.Attribute = ATTR_SIMPLE;
1779 c->Request.Type.Direction = XFER_WRITE;
1780 c->Request.Timeout = 0;
1781 c->Request.CDB[0] = cmd;
1782 break;
1783 default:
1784 printk(KERN_WARNING
1785 "cciss%d: unknown message type %d\n", ctlr, cmd);
1786 return IO_ERROR;
1787 }
1788 } else {
1789 printk(KERN_WARNING
1790 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1791 return IO_ERROR;
1792 }
1793 /* Fill in the scatter gather information */
1794 if (size > 0) {
1795 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1796 buff, size,
1797 PCI_DMA_BIDIRECTIONAL);
1798 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1799 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1800 c->SG[0].Len = size;
1801 c->SG[0].Ext = 0; /* we are not chaining */
1802 }
1803 return status;
1804 }
1805
1806 static int sendcmd_withirq(__u8 cmd,
1807 int ctlr,
1808 void *buff,
1809 size_t size,
1810 unsigned int use_unit_num,
1811 unsigned int log_unit, __u8 page_code, int cmd_type)
1812 {
1813 ctlr_info_t *h = hba[ctlr];
1814 CommandList_struct *c;
1815 u64bit buff_dma_handle;
1816 unsigned long flags;
1817 int return_status;
1818 DECLARE_COMPLETION_ONSTACK(wait);
1819
1820 if ((c = cmd_alloc(h, 0)) == NULL)
1821 return -ENOMEM;
1822 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1823 log_unit, page_code, NULL, cmd_type);
1824 if (return_status != IO_OK) {
1825 cmd_free(h, c, 0);
1826 return return_status;
1827 }
1828 resend_cmd2:
1829 c->waiting = &wait;
1830
1831 /* Put the request on the tail of the queue and send it */
1832 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1833 addQ(&h->reqQ, c);
1834 h->Qdepth++;
1835 start_io(h);
1836 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1837
1838 wait_for_completion(&wait);
1839
1840 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1841 switch (c->err_info->CommandStatus) {
1842 case CMD_TARGET_STATUS:
1843 printk(KERN_WARNING "cciss: cmd %p has "
1844 " completed with errors\n", c);
1845 if (c->err_info->ScsiStatus) {
1846 printk(KERN_WARNING "cciss: cmd %p "
1847 "has SCSI Status = %x\n",
1848 c, c->err_info->ScsiStatus);
1849 }
1850
1851 break;
1852 case CMD_DATA_UNDERRUN:
1853 case CMD_DATA_OVERRUN:
1854 /* expected for inquire and report lun commands */
1855 break;
1856 case CMD_INVALID:
1857 printk(KERN_WARNING "cciss: Cmd %p is "
1858 "reported invalid\n", c);
1859 return_status = IO_ERROR;
1860 break;
1861 case CMD_PROTOCOL_ERR:
1862 printk(KERN_WARNING "cciss: cmd %p has "
1863 "protocol error \n", c);
1864 return_status = IO_ERROR;
1865 break;
1866 case CMD_HARDWARE_ERR:
1867 printk(KERN_WARNING "cciss: cmd %p had "
1868 " hardware error\n", c);
1869 return_status = IO_ERROR;
1870 break;
1871 case CMD_CONNECTION_LOST:
1872 printk(KERN_WARNING "cciss: cmd %p had "
1873 "connection lost\n", c);
1874 return_status = IO_ERROR;
1875 break;
1876 case CMD_ABORTED:
1877 printk(KERN_WARNING "cciss: cmd %p was "
1878 "aborted\n", c);
1879 return_status = IO_ERROR;
1880 break;
1881 case CMD_ABORT_FAILED:
1882 printk(KERN_WARNING "cciss: cmd %p reports "
1883 "abort failed\n", c);
1884 return_status = IO_ERROR;
1885 break;
1886 case CMD_UNSOLICITED_ABORT:
1887 printk(KERN_WARNING
1888 "cciss%d: unsolicited abort %p\n", ctlr, c);
1889 if (c->retry_count < MAX_CMD_RETRIES) {
1890 printk(KERN_WARNING
1891 "cciss%d: retrying %p\n", ctlr, c);
1892 c->retry_count++;
1893 /* erase the old error information */
1894 memset(c->err_info, 0,
1895 sizeof(ErrorInfo_struct));
1896 return_status = IO_OK;
1897 INIT_COMPLETION(wait);
1898 goto resend_cmd2;
1899 }
1900 return_status = IO_ERROR;
1901 break;
1902 default:
1903 printk(KERN_WARNING "cciss: cmd %p returned "
1904 "unknown status %x\n", c,
1905 c->err_info->CommandStatus);
1906 return_status = IO_ERROR;
1907 }
1908 }
1909 /* unlock the buffers from DMA */
1910 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1911 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1912 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1913 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1914 cmd_free(h, c, 0);
1915 return return_status;
1916 }
1917
1918 static void cciss_geometry_inquiry(int ctlr, int logvol,
1919 int withirq, sector_t total_size,
1920 unsigned int block_size,
1921 InquiryData_struct *inq_buff,
1922 drive_info_struct *drv)
1923 {
1924 int return_code;
1925 unsigned long t;
1926
1927 memset(inq_buff, 0, sizeof(InquiryData_struct));
1928 if (withirq)
1929 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1930 inq_buff, sizeof(*inq_buff), 1,
1931 logvol, 0xC1, TYPE_CMD);
1932 else
1933 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1934 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1935 TYPE_CMD);
1936 if (return_code == IO_OK) {
1937 if (inq_buff->data_byte[8] == 0xFF) {
1938 printk(KERN_WARNING
1939 "cciss: reading geometry failed, volume "
1940 "does not support reading geometry\n");
1941 drv->heads = 255;
1942 drv->sectors = 32; // Sectors per track
1943 } else {
1944 drv->heads = inq_buff->data_byte[6];
1945 drv->sectors = inq_buff->data_byte[7];
1946 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1947 drv->cylinders += inq_buff->data_byte[5];
1948 drv->raid_level = inq_buff->data_byte[8];
1949 }
1950 drv->block_size = block_size;
1951 drv->nr_blocks = total_size;
1952 t = drv->heads * drv->sectors;
1953 if (t > 1) {
1954 unsigned rem = sector_div(total_size, t);
1955 if (rem)
1956 total_size++;
1957 drv->cylinders = total_size;
1958 }
1959 } else { /* Get geometry failed */
1960 printk(KERN_WARNING "cciss: reading geometry failed\n");
1961 }
1962 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1963 drv->heads, drv->sectors, drv->cylinders);
1964 }
1965
1966 static void
1967 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1968 unsigned int *block_size)
1969 {
1970 ReadCapdata_struct *buf;
1971 int return_code;
1972 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1973 if (buf == NULL) {
1974 printk(KERN_WARNING "cciss: out of memory\n");
1975 return;
1976 }
1977 memset(buf, 0, sizeof(ReadCapdata_struct));
1978 if (withirq)
1979 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1980 ctlr, buf, sizeof(ReadCapdata_struct),
1981 1, logvol, 0, TYPE_CMD);
1982 else
1983 return_code = sendcmd(CCISS_READ_CAPACITY,
1984 ctlr, buf, sizeof(ReadCapdata_struct),
1985 1, logvol, 0, NULL, TYPE_CMD);
1986 if (return_code == IO_OK) {
1987 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1988 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1989 } else { /* read capacity command failed */
1990 printk(KERN_WARNING "cciss: read capacity failed\n");
1991 *total_size = 0;
1992 *block_size = BLOCK_SIZE;
1993 }
1994 if (*total_size != (__u32) 0)
1995 printk(KERN_INFO " blocks= %lld block_size= %d\n",
1996 *total_size, *block_size);
1997 kfree(buf);
1998 return;
1999 }
2000
2001 static void
2002 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2003 {
2004 ReadCapdata_struct_16 *buf;
2005 int return_code;
2006 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2007 if (buf == NULL) {
2008 printk(KERN_WARNING "cciss: out of memory\n");
2009 return;
2010 }
2011 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2012 if (withirq) {
2013 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2014 ctlr, buf, sizeof(ReadCapdata_struct_16),
2015 1, logvol, 0, TYPE_CMD);
2016 }
2017 else {
2018 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2019 ctlr, buf, sizeof(ReadCapdata_struct_16),
2020 1, logvol, 0, NULL, TYPE_CMD);
2021 }
2022 if (return_code == IO_OK) {
2023 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2024 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2025 } else { /* read capacity command failed */
2026 printk(KERN_WARNING "cciss: read capacity failed\n");
2027 *total_size = 0;
2028 *block_size = BLOCK_SIZE;
2029 }
2030 printk(KERN_INFO " blocks= %lld block_size= %d\n",
2031 *total_size, *block_size);
2032 kfree(buf);
2033 return;
2034 }
2035
2036 static int cciss_revalidate(struct gendisk *disk)
2037 {
2038 ctlr_info_t *h = get_host(disk);
2039 drive_info_struct *drv = get_drv(disk);
2040 int logvol;
2041 int FOUND = 0;
2042 unsigned int block_size;
2043 sector_t total_size;
2044 InquiryData_struct *inq_buff = NULL;
2045
2046 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2047 if (h->drv[logvol].LunID == drv->LunID) {
2048 FOUND = 1;
2049 break;
2050 }
2051 }
2052
2053 if (!FOUND)
2054 return 1;
2055
2056 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2057 if (inq_buff == NULL) {
2058 printk(KERN_WARNING "cciss: out of memory\n");
2059 return 1;
2060 }
2061 if (h->cciss_read == CCISS_READ_10) {
2062 cciss_read_capacity(h->ctlr, logvol, 1,
2063 &total_size, &block_size);
2064 } else {
2065 cciss_read_capacity_16(h->ctlr, logvol, 1,
2066 &total_size, &block_size);
2067 }
2068 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2069 inq_buff, drv);
2070
2071 blk_queue_hardsect_size(drv->queue, drv->block_size);
2072 set_capacity(disk, drv->nr_blocks);
2073
2074 kfree(inq_buff);
2075 return 0;
2076 }
2077
2078 /*
2079 * Wait polling for a command to complete.
2080 * The memory mapped FIFO is polled for the completion.
2081 * Used only at init time, interrupts from the HBA are disabled.
2082 */
2083 static unsigned long pollcomplete(int ctlr)
2084 {
2085 unsigned long done;
2086 int i;
2087
2088 /* Wait (up to 20 seconds) for a command to complete */
2089
2090 for (i = 20 * HZ; i > 0; i--) {
2091 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2092 if (done == FIFO_EMPTY)
2093 schedule_timeout_uninterruptible(1);
2094 else
2095 return done;
2096 }
2097 /* Invalid address to tell caller we ran out of time */
2098 return 1;
2099 }
2100
2101 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2102 {
2103 /* We get in here if sendcmd() is polling for completions
2104 and gets some command back that it wasn't expecting --
2105 something other than that which it just sent down.
2106 Ordinarily, that shouldn't happen, but it can happen when
2107 the scsi tape stuff gets into error handling mode, and
2108 starts using sendcmd() to try to abort commands and
2109 reset tape drives. In that case, sendcmd may pick up
2110 completions of commands that were sent to logical drives
2111 through the block i/o system, or cciss ioctls completing, etc.
2112 In that case, we need to save those completions for later
2113 processing by the interrupt handler.
2114 */
2115
2116 #ifdef CONFIG_CISS_SCSI_TAPE
2117 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2118
2119 /* If it's not the scsi tape stuff doing error handling, (abort */
2120 /* or reset) then we don't expect anything weird. */
2121 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2122 #endif
2123 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2124 "Invalid command list address returned! (%lx)\n",
2125 ctlr, complete);
2126 /* not much we can do. */
2127 #ifdef CONFIG_CISS_SCSI_TAPE
2128 return 1;
2129 }
2130
2131 /* We've sent down an abort or reset, but something else
2132 has completed */
2133 if (srl->ncompletions >= (NR_CMDS + 2)) {
2134 /* Uh oh. No room to save it for later... */
2135 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2136 "reject list overflow, command lost!\n", ctlr);
2137 return 1;
2138 }
2139 /* Save it for later */
2140 srl->complete[srl->ncompletions] = complete;
2141 srl->ncompletions++;
2142 #endif
2143 return 0;
2144 }
2145
2146 /*
2147 * Send a command to the controller, and wait for it to complete.
2148 * Only used at init time.
2149 */
2150 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2151 1: address logical volume log_unit,
2152 2: periph device address is scsi3addr */
2153 unsigned int log_unit,
2154 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2155 {
2156 CommandList_struct *c;
2157 int i;
2158 unsigned long complete;
2159 ctlr_info_t *info_p = hba[ctlr];
2160 u64bit buff_dma_handle;
2161 int status, done = 0;
2162
2163 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2164 printk(KERN_WARNING "cciss: unable to get memory");
2165 return IO_ERROR;
2166 }
2167 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2168 log_unit, page_code, scsi3addr, cmd_type);
2169 if (status != IO_OK) {
2170 cmd_free(info_p, c, 1);
2171 return status;
2172 }
2173 resend_cmd1:
2174 /*
2175 * Disable interrupt
2176 */
2177 #ifdef CCISS_DEBUG
2178 printk(KERN_DEBUG "cciss: turning intr off\n");
2179 #endif /* CCISS_DEBUG */
2180 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2181
2182 /* Make sure there is room in the command FIFO */
2183 /* Actually it should be completely empty at this time */
2184 /* unless we are in here doing error handling for the scsi */
2185 /* tape side of the driver. */
2186 for (i = 200000; i > 0; i--) {
2187 /* if fifo isn't full go */
2188 if (!(info_p->access.fifo_full(info_p))) {
2189
2190 break;
2191 }
2192 udelay(10);
2193 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2194 " waiting!\n", ctlr);
2195 }
2196 /*
2197 * Send the cmd
2198 */
2199 info_p->access.submit_command(info_p, c);
2200 done = 0;
2201 do {
2202 complete = pollcomplete(ctlr);
2203
2204 #ifdef CCISS_DEBUG
2205 printk(KERN_DEBUG "cciss: command completed\n");
2206 #endif /* CCISS_DEBUG */
2207
2208 if (complete == 1) {
2209 printk(KERN_WARNING
2210 "cciss cciss%d: SendCmd Timeout out, "
2211 "No command list address returned!\n", ctlr);
2212 status = IO_ERROR;
2213 done = 1;
2214 break;
2215 }
2216
2217 /* This will need to change for direct lookup completions */
2218 if ((complete & CISS_ERROR_BIT)
2219 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2220 /* if data overrun or underun on Report command
2221 ignore it
2222 */
2223 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2224 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2225 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2226 ((c->err_info->CommandStatus ==
2227 CMD_DATA_OVERRUN) ||
2228 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2229 )) {
2230 complete = c->busaddr;
2231 } else {
2232 if (c->err_info->CommandStatus ==
2233 CMD_UNSOLICITED_ABORT) {
2234 printk(KERN_WARNING "cciss%d: "
2235 "unsolicited abort %p\n",
2236 ctlr, c);
2237 if (c->retry_count < MAX_CMD_RETRIES) {
2238 printk(KERN_WARNING
2239 "cciss%d: retrying %p\n",
2240 ctlr, c);
2241 c->retry_count++;
2242 /* erase the old error */
2243 /* information */
2244 memset(c->err_info, 0,
2245 sizeof
2246 (ErrorInfo_struct));
2247 goto resend_cmd1;
2248 } else {
2249 printk(KERN_WARNING
2250 "cciss%d: retried %p too "
2251 "many times\n", ctlr, c);
2252 status = IO_ERROR;
2253 goto cleanup1;
2254 }
2255 } else if (c->err_info->CommandStatus ==
2256 CMD_UNABORTABLE) {
2257 printk(KERN_WARNING
2258 "cciss%d: command could not be aborted.\n",
2259 ctlr);
2260 status = IO_ERROR;
2261 goto cleanup1;
2262 }
2263 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2264 " Error %x \n", ctlr,
2265 c->err_info->CommandStatus);
2266 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2267 " offensive info\n"
2268 " size %x\n num %x value %x\n",
2269 ctlr,
2270 c->err_info->MoreErrInfo.Invalid_Cmd.
2271 offense_size,
2272 c->err_info->MoreErrInfo.Invalid_Cmd.
2273 offense_num,
2274 c->err_info->MoreErrInfo.Invalid_Cmd.
2275 offense_value);
2276 status = IO_ERROR;
2277 goto cleanup1;
2278 }
2279 }
2280 /* This will need changing for direct lookup completions */
2281 if (complete != c->busaddr) {
2282 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2283 BUG(); /* we are pretty much hosed if we get here. */
2284 }
2285 continue;
2286 } else
2287 done = 1;
2288 } while (!done);
2289
2290 cleanup1:
2291 /* unlock the data buffer from DMA */
2292 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2293 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2294 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2295 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2296 #ifdef CONFIG_CISS_SCSI_TAPE
2297 /* if we saved some commands for later, process them now. */
2298 if (info_p->scsi_rejects.ncompletions > 0)
2299 do_cciss_intr(0, info_p);
2300 #endif
2301 cmd_free(info_p, c, 1);
2302 return status;
2303 }
2304
2305 /*
2306 * Map (physical) PCI mem into (virtual) kernel space
2307 */
2308 static void __iomem *remap_pci_mem(ulong base, ulong size)
2309 {
2310 ulong page_base = ((ulong) base) & PAGE_MASK;
2311 ulong page_offs = ((ulong) base) - page_base;
2312 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2313
2314 return page_remapped ? (page_remapped + page_offs) : NULL;
2315 }
2316
2317 /*
2318 * Takes jobs of the Q and sends them to the hardware, then puts it on
2319 * the Q to wait for completion.
2320 */
2321 static void start_io(ctlr_info_t *h)
2322 {
2323 CommandList_struct *c;
2324
2325 while ((c = h->reqQ) != NULL) {
2326 /* can't do anything if fifo is full */
2327 if ((h->access.fifo_full(h))) {
2328 printk(KERN_WARNING "cciss: fifo full\n");
2329 break;
2330 }
2331
2332 /* Get the first entry from the Request Q */
2333 removeQ(&(h->reqQ), c);
2334 h->Qdepth--;
2335
2336 /* Tell the controller execute command */
2337 h->access.submit_command(h, c);
2338
2339 /* Put job onto the completed Q */
2340 addQ(&(h->cmpQ), c);
2341 }
2342 }
2343
2344 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2345 /* Zeros out the error record and then resends the command back */
2346 /* to the controller */
2347 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2348 {
2349 /* erase the old error information */
2350 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2351
2352 /* add it to software queue and then send it to the controller */
2353 addQ(&(h->reqQ), c);
2354 h->Qdepth++;
2355 if (h->Qdepth > h->maxQsinceinit)
2356 h->maxQsinceinit = h->Qdepth;
2357
2358 start_io(h);
2359 }
2360
2361 /* checks the status of the job and calls complete buffers to mark all
2362 * buffers for the completed job. Note that this function does not need
2363 * to hold the hba/queue lock.
2364 */
2365 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2366 int timeout)
2367 {
2368 int status = 1;
2369 int retry_cmd = 0;
2370
2371 if (timeout)
2372 status = 0;
2373
2374 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2375 switch (cmd->err_info->CommandStatus) {
2376 unsigned char sense_key;
2377 case CMD_TARGET_STATUS:
2378 status = 0;
2379
2380 if (cmd->err_info->ScsiStatus == 0x02) {
2381 printk(KERN_WARNING "cciss: cmd %p "
2382 "has CHECK CONDITION "
2383 " byte 2 = 0x%x\n", cmd,
2384 cmd->err_info->SenseInfo[2]
2385 );
2386 /* check the sense key */
2387 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2388 /* no status or recovered error */
2389 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2390 status = 1;
2391 }
2392 } else {
2393 printk(KERN_WARNING "cciss: cmd %p "
2394 "has SCSI Status 0x%x\n",
2395 cmd, cmd->err_info->ScsiStatus);
2396 }
2397 break;
2398 case CMD_DATA_UNDERRUN:
2399 printk(KERN_WARNING "cciss: cmd %p has"
2400 " completed with data underrun "
2401 "reported\n", cmd);
2402 break;
2403 case CMD_DATA_OVERRUN:
2404 printk(KERN_WARNING "cciss: cmd %p has"
2405 " completed with data overrun "
2406 "reported\n", cmd);
2407 break;
2408 case CMD_INVALID:
2409 printk(KERN_WARNING "cciss: cmd %p is "
2410 "reported invalid\n", cmd);
2411 status = 0;
2412 break;
2413 case CMD_PROTOCOL_ERR:
2414 printk(KERN_WARNING "cciss: cmd %p has "
2415 "protocol error \n", cmd);
2416 status = 0;
2417 break;
2418 case CMD_HARDWARE_ERR:
2419 printk(KERN_WARNING "cciss: cmd %p had "
2420 " hardware error\n", cmd);
2421 status = 0;
2422 break;
2423 case CMD_CONNECTION_LOST:
2424 printk(KERN_WARNING "cciss: cmd %p had "
2425 "connection lost\n", cmd);
2426 status = 0;
2427 break;
2428 case CMD_ABORTED:
2429 printk(KERN_WARNING "cciss: cmd %p was "
2430 "aborted\n", cmd);
2431 status = 0;
2432 break;
2433 case CMD_ABORT_FAILED:
2434 printk(KERN_WARNING "cciss: cmd %p reports "
2435 "abort failed\n", cmd);
2436 status = 0;
2437 break;
2438 case CMD_UNSOLICITED_ABORT:
2439 printk(KERN_WARNING "cciss%d: unsolicited "
2440 "abort %p\n", h->ctlr, cmd);
2441 if (cmd->retry_count < MAX_CMD_RETRIES) {
2442 retry_cmd = 1;
2443 printk(KERN_WARNING
2444 "cciss%d: retrying %p\n", h->ctlr, cmd);
2445 cmd->retry_count++;
2446 } else
2447 printk(KERN_WARNING
2448 "cciss%d: %p retried too "
2449 "many times\n", h->ctlr, cmd);
2450 status = 0;
2451 break;
2452 case CMD_TIMEOUT:
2453 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2454 status = 0;
2455 break;
2456 default:
2457 printk(KERN_WARNING "cciss: cmd %p returned "
2458 "unknown status %x\n", cmd,
2459 cmd->err_info->CommandStatus);
2460 status = 0;
2461 }
2462 }
2463 /* We need to return this command */
2464 if (retry_cmd) {
2465 resend_cciss_cmd(h, cmd);
2466 return;
2467 }
2468
2469 cmd->rq->completion_data = cmd;
2470 cmd->rq->errors = status;
2471 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2472 blk_complete_request(cmd->rq);
2473 }
2474
2475 /*
2476 * Get a request and submit it to the controller.
2477 */
2478 static void do_cciss_request(request_queue_t *q)
2479 {
2480 ctlr_info_t *h = q->queuedata;
2481 CommandList_struct *c;
2482 sector_t start_blk;
2483 int seg;
2484 struct request *creq;
2485 u64bit temp64;
2486 struct scatterlist tmp_sg[MAXSGENTRIES];
2487 drive_info_struct *drv;
2488 int i, dir;
2489
2490 /* We call start_io here in case there is a command waiting on the
2491 * queue that has not been sent.
2492 */
2493 if (blk_queue_plugged(q))
2494 goto startio;
2495
2496 queue:
2497 creq = elv_next_request(q);
2498 if (!creq)
2499 goto startio;
2500
2501 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2502
2503 if ((c = cmd_alloc(h, 1)) == NULL)
2504 goto full;
2505
2506 blkdev_dequeue_request(creq);
2507
2508 spin_unlock_irq(q->queue_lock);
2509
2510 c->cmd_type = CMD_RWREQ;
2511 c->rq = creq;
2512
2513 /* fill in the request */
2514 drv = creq->rq_disk->private_data;
2515 c->Header.ReplyQueue = 0; // unused in simple mode
2516 /* got command from pool, so use the command block index instead */
2517 /* for direct lookups. */
2518 /* The first 2 bits are reserved for controller error reporting. */
2519 c->Header.Tag.lower = (c->cmdindex << 3);
2520 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2521 c->Header.LUN.LogDev.VolId = drv->LunID;
2522 c->Header.LUN.LogDev.Mode = 1;
2523 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2524 c->Request.Type.Type = TYPE_CMD; // It is a command.
2525 c->Request.Type.Attribute = ATTR_SIMPLE;
2526 c->Request.Type.Direction =
2527 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2528 c->Request.Timeout = 0; // Don't time out
2529 c->Request.CDB[0] =
2530 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2531 start_blk = creq->sector;
2532 #ifdef CCISS_DEBUG
2533 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2534 (int)creq->nr_sectors);
2535 #endif /* CCISS_DEBUG */
2536
2537 seg = blk_rq_map_sg(q, creq, tmp_sg);
2538
2539 /* get the DMA records for the setup */
2540 if (c->Request.Type.Direction == XFER_READ)
2541 dir = PCI_DMA_FROMDEVICE;
2542 else
2543 dir = PCI_DMA_TODEVICE;
2544
2545 for (i = 0; i < seg; i++) {
2546 c->SG[i].Len = tmp_sg[i].length;
2547 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2548 tmp_sg[i].offset,
2549 tmp_sg[i].length, dir);
2550 c->SG[i].Addr.lower = temp64.val32.lower;
2551 c->SG[i].Addr.upper = temp64.val32.upper;
2552 c->SG[i].Ext = 0; // we are not chaining
2553 }
2554 /* track how many SG entries we are using */
2555 if (seg > h->maxSG)
2556 h->maxSG = seg;
2557
2558 #ifdef CCISS_DEBUG
2559 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2560 creq->nr_sectors, seg);
2561 #endif /* CCISS_DEBUG */
2562
2563 c->Header.SGList = c->Header.SGTotal = seg;
2564 if(h->cciss_read == CCISS_READ_10) {
2565 c->Request.CDB[1] = 0;
2566 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2567 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2568 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2569 c->Request.CDB[5] = start_blk & 0xff;
2570 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2571 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2572 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2573 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2574 } else {
2575 c->Request.CDBLen = 16;
2576 c->Request.CDB[1]= 0;
2577 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2578 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2579 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2580 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2581 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2582 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2583 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2584 c->Request.CDB[9]= start_blk & 0xff;
2585 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2586 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2587 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2588 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2589 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2590 }
2591
2592 spin_lock_irq(q->queue_lock);
2593
2594 addQ(&(h->reqQ), c);
2595 h->Qdepth++;
2596 if (h->Qdepth > h->maxQsinceinit)
2597 h->maxQsinceinit = h->Qdepth;
2598
2599 goto queue;
2600 full:
2601 blk_stop_queue(q);
2602 startio:
2603 /* We will already have the driver lock here so not need
2604 * to lock it.
2605 */
2606 start_io(h);
2607 }
2608
2609 static inline unsigned long get_next_completion(ctlr_info_t *h)
2610 {
2611 #ifdef CONFIG_CISS_SCSI_TAPE
2612 /* Any rejects from sendcmd() lying around? Process them first */
2613 if (h->scsi_rejects.ncompletions == 0)
2614 return h->access.command_completed(h);
2615 else {
2616 struct sendcmd_reject_list *srl;
2617 int n;
2618 srl = &h->scsi_rejects;
2619 n = --srl->ncompletions;
2620 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2621 printk("p");
2622 return srl->complete[n];
2623 }
2624 #else
2625 return h->access.command_completed(h);
2626 #endif
2627 }
2628
2629 static inline int interrupt_pending(ctlr_info_t *h)
2630 {
2631 #ifdef CONFIG_CISS_SCSI_TAPE
2632 return (h->access.intr_pending(h)
2633 || (h->scsi_rejects.ncompletions > 0));
2634 #else
2635 return h->access.intr_pending(h);
2636 #endif
2637 }
2638
2639 static inline long interrupt_not_for_us(ctlr_info_t *h)
2640 {
2641 #ifdef CONFIG_CISS_SCSI_TAPE
2642 return (((h->access.intr_pending(h) == 0) ||
2643 (h->interrupts_enabled == 0))
2644 && (h->scsi_rejects.ncompletions == 0));
2645 #else
2646 return (((h->access.intr_pending(h) == 0) ||
2647 (h->interrupts_enabled == 0)));
2648 #endif
2649 }
2650
2651 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2652 {
2653 ctlr_info_t *h = dev_id;
2654 CommandList_struct *c;
2655 unsigned long flags;
2656 __u32 a, a1, a2;
2657
2658 if (interrupt_not_for_us(h))
2659 return IRQ_NONE;
2660 /*
2661 * If there are completed commands in the completion queue,
2662 * we had better do something about it.
2663 */
2664 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2665 while (interrupt_pending(h)) {
2666 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2667 a1 = a;
2668 if ((a & 0x04)) {
2669 a2 = (a >> 3);
2670 if (a2 >= NR_CMDS) {
2671 printk(KERN_WARNING
2672 "cciss: controller cciss%d failed, stopping.\n",
2673 h->ctlr);
2674 fail_all_cmds(h->ctlr);
2675 return IRQ_HANDLED;
2676 }
2677
2678 c = h->cmd_pool + a2;
2679 a = c->busaddr;
2680
2681 } else {
2682 a &= ~3;
2683 if ((c = h->cmpQ) == NULL) {
2684 printk(KERN_WARNING
2685 "cciss: Completion of %08x ignored\n",
2686 a1);
2687 continue;
2688 }
2689 while (c->busaddr != a) {
2690 c = c->next;
2691 if (c == h->cmpQ)
2692 break;
2693 }
2694 }
2695 /*
2696 * If we've found the command, take it off the
2697 * completion Q and free it
2698 */
2699 if (c->busaddr == a) {
2700 removeQ(&h->cmpQ, c);
2701 if (c->cmd_type == CMD_RWREQ) {
2702 complete_command(h, c, 0);
2703 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2704 complete(c->waiting);
2705 }
2706 # ifdef CONFIG_CISS_SCSI_TAPE
2707 else if (c->cmd_type == CMD_SCSI)
2708 complete_scsi_command(c, 0, a1);
2709 # endif
2710 continue;
2711 }
2712 }
2713 }
2714
2715 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2716 return IRQ_HANDLED;
2717 }
2718
2719 /*
2720 * We cannot read the structure directly, for portability we must use
2721 * the io functions.
2722 * This is for debug only.
2723 */
2724 #ifdef CCISS_DEBUG
2725 static void print_cfg_table(CfgTable_struct *tb)
2726 {
2727 int i;
2728 char temp_name[17];
2729
2730 printk("Controller Configuration information\n");
2731 printk("------------------------------------\n");
2732 for (i = 0; i < 4; i++)
2733 temp_name[i] = readb(&(tb->Signature[i]));
2734 temp_name[4] = '\0';
2735 printk(" Signature = %s\n", temp_name);
2736 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2737 printk(" Transport methods supported = 0x%x\n",
2738 readl(&(tb->TransportSupport)));
2739 printk(" Transport methods active = 0x%x\n",
2740 readl(&(tb->TransportActive)));
2741 printk(" Requested transport Method = 0x%x\n",
2742 readl(&(tb->HostWrite.TransportRequest)));
2743 printk(" Coalesce Interrupt Delay = 0x%x\n",
2744 readl(&(tb->HostWrite.CoalIntDelay)));
2745 printk(" Coalesce Interrupt Count = 0x%x\n",
2746 readl(&(tb->HostWrite.CoalIntCount)));
2747 printk(" Max outstanding commands = 0x%d\n",
2748 readl(&(tb->CmdsOutMax)));
2749 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2750 for (i = 0; i < 16; i++)
2751 temp_name[i] = readb(&(tb->ServerName[i]));
2752 temp_name[16] = '\0';
2753 printk(" Server Name = %s\n", temp_name);
2754 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2755 }
2756 #endif /* CCISS_DEBUG */
2757
2758 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2759 {
2760 int i, offset, mem_type, bar_type;
2761 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2762 return 0;
2763 offset = 0;
2764 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2765 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2766 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2767 offset += 4;
2768 else {
2769 mem_type = pci_resource_flags(pdev, i) &
2770 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2771 switch (mem_type) {
2772 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2773 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2774 offset += 4; /* 32 bit */
2775 break;
2776 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2777 offset += 8;
2778 break;
2779 default: /* reserved in PCI 2.2 */
2780 printk(KERN_WARNING
2781 "Base address is invalid\n");
2782 return -1;
2783 break;
2784 }
2785 }
2786 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2787 return i + 1;
2788 }
2789 return -1;
2790 }
2791
2792 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2793 * controllers that are capable. If not, we use IO-APIC mode.
2794 */
2795
2796 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2797 struct pci_dev *pdev, __u32 board_id)
2798 {
2799 #ifdef CONFIG_PCI_MSI
2800 int err;
2801 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2802 {0, 2}, {0, 3}
2803 };
2804
2805 /* Some boards advertise MSI but don't really support it */
2806 if ((board_id == 0x40700E11) ||
2807 (board_id == 0x40800E11) ||
2808 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2809 goto default_int_mode;
2810
2811 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2812 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2813 if (!err) {
2814 c->intr[0] = cciss_msix_entries[0].vector;
2815 c->intr[1] = cciss_msix_entries[1].vector;
2816 c->intr[2] = cciss_msix_entries[2].vector;
2817 c->intr[3] = cciss_msix_entries[3].vector;
2818 c->msix_vector = 1;
2819 return;
2820 }
2821 if (err > 0) {
2822 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2823 "available\n", err);
2824 } else {
2825 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2826 err);
2827 }
2828 }
2829 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2830 if (!pci_enable_msi(pdev)) {
2831 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2832 c->msi_vector = 1;
2833 return;
2834 } else {
2835 printk(KERN_WARNING "cciss: MSI init failed\n");
2836 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2837 return;
2838 }
2839 }
2840 default_int_mode:
2841 #endif /* CONFIG_PCI_MSI */
2842 /* if we get here we're going to use the default interrupt mode */
2843 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2844 return;
2845 }
2846
2847 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2848 {
2849 ushort subsystem_vendor_id, subsystem_device_id, command;
2850 __u32 board_id, scratchpad = 0;
2851 __u64 cfg_offset;
2852 __u32 cfg_base_addr;
2853 __u64 cfg_base_addr_index;
2854 int i, err;
2855
2856 /* check to see if controller has been disabled */
2857 /* BEFORE trying to enable it */
2858 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2859 if (!(command & 0x02)) {
2860 printk(KERN_WARNING
2861 "cciss: controller appears to be disabled\n");
2862 return -ENODEV;
2863 }
2864
2865 err = pci_enable_device(pdev);
2866 if (err) {
2867 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2868 return err;
2869 }
2870
2871 err = pci_request_regions(pdev, "cciss");
2872 if (err) {
2873 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2874 "aborting\n");
2875 goto err_out_disable_pdev;
2876 }
2877
2878 subsystem_vendor_id = pdev->subsystem_vendor;
2879 subsystem_device_id = pdev->subsystem_device;
2880 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2881 subsystem_vendor_id);
2882
2883 #ifdef CCISS_DEBUG
2884 printk("command = %x\n", command);
2885 printk("irq = %x\n", pdev->irq);
2886 printk("board_id = %x\n", board_id);
2887 #endif /* CCISS_DEBUG */
2888
2889 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2890 * else we use the IO-APIC interrupt assigned to us by system ROM.
2891 */
2892 cciss_interrupt_mode(c, pdev, board_id);
2893
2894 /*
2895 * Memory base addr is first addr , the second points to the config
2896 * table
2897 */
2898
2899 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2900 #ifdef CCISS_DEBUG
2901 printk("address 0 = %x\n", c->paddr);
2902 #endif /* CCISS_DEBUG */
2903 c->vaddr = remap_pci_mem(c->paddr, 200);
2904
2905 /* Wait for the board to become ready. (PCI hotplug needs this.)
2906 * We poll for up to 120 secs, once per 100ms. */
2907 for (i = 0; i < 1200; i++) {
2908 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2909 if (scratchpad == CCISS_FIRMWARE_READY)
2910 break;
2911 set_current_state(TASK_INTERRUPTIBLE);
2912 schedule_timeout(HZ / 10); /* wait 100ms */
2913 }
2914 if (scratchpad != CCISS_FIRMWARE_READY) {
2915 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2916 err = -ENODEV;
2917 goto err_out_free_res;
2918 }
2919
2920 /* get the address index number */
2921 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2922 cfg_base_addr &= (__u32) 0x0000ffff;
2923 #ifdef CCISS_DEBUG
2924 printk("cfg base address = %x\n", cfg_base_addr);
2925 #endif /* CCISS_DEBUG */
2926 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2927 #ifdef CCISS_DEBUG
2928 printk("cfg base address index = %x\n", cfg_base_addr_index);
2929 #endif /* CCISS_DEBUG */
2930 if (cfg_base_addr_index == -1) {
2931 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2932 err = -ENODEV;
2933 goto err_out_free_res;
2934 }
2935
2936 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2937 #ifdef CCISS_DEBUG
2938 printk("cfg offset = %x\n", cfg_offset);
2939 #endif /* CCISS_DEBUG */
2940 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2941 cfg_base_addr_index) +
2942 cfg_offset, sizeof(CfgTable_struct));
2943 c->board_id = board_id;
2944
2945 #ifdef CCISS_DEBUG
2946 print_cfg_table(c->cfgtable);
2947 #endif /* CCISS_DEBUG */
2948
2949 for (i = 0; i < ARRAY_SIZE(products); i++) {
2950 if (board_id == products[i].board_id) {
2951 c->product_name = products[i].product_name;
2952 c->access = *(products[i].access);
2953 break;
2954 }
2955 }
2956 if (i == ARRAY_SIZE(products)) {
2957 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2958 " to access the Smart Array controller %08lx\n",
2959 (unsigned long)board_id);
2960 err = -ENODEV;
2961 goto err_out_free_res;
2962 }
2963 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2964 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2965 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2966 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2967 printk("Does not appear to be a valid CISS config table\n");
2968 err = -ENODEV;
2969 goto err_out_free_res;
2970 }
2971 #ifdef CONFIG_X86
2972 {
2973 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2974 __u32 prefetch;
2975 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2976 prefetch |= 0x100;
2977 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2978 }
2979 #endif
2980
2981 #ifdef CCISS_DEBUG
2982 printk("Trying to put board into Simple mode\n");
2983 #endif /* CCISS_DEBUG */
2984 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2985 /* Update the field, and then ring the doorbell */
2986 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2987 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2988
2989 /* under certain very rare conditions, this can take awhile.
2990 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2991 * as we enter this code.) */
2992 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2993 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2994 break;
2995 /* delay and try again */
2996 set_current_state(TASK_INTERRUPTIBLE);
2997 schedule_timeout(10);
2998 }
2999
3000 #ifdef CCISS_DEBUG
3001 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3002 readl(c->vaddr + SA5_DOORBELL));
3003 #endif /* CCISS_DEBUG */
3004 #ifdef CCISS_DEBUG
3005 print_cfg_table(c->cfgtable);
3006 #endif /* CCISS_DEBUG */
3007
3008 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3009 printk(KERN_WARNING "cciss: unable to get board into"
3010 " simple mode\n");
3011 err = -ENODEV;
3012 goto err_out_free_res;
3013 }
3014 return 0;
3015
3016 err_out_free_res:
3017 pci_release_regions(pdev);
3018
3019 err_out_disable_pdev:
3020 pci_disable_device(pdev);
3021 return err;
3022 }
3023
3024 /*
3025 * Gets information about the local volumes attached to the controller.
3026 */
3027 static void cciss_getgeometry(int cntl_num)
3028 {
3029 ReportLunData_struct *ld_buff;
3030 InquiryData_struct *inq_buff;
3031 int return_code;
3032 int i;
3033 int listlength = 0;
3034 __u32 lunid = 0;
3035 int block_size;
3036 sector_t total_size;
3037
3038 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3039 if (ld_buff == NULL) {
3040 printk(KERN_ERR "cciss: out of memory\n");
3041 return;
3042 }
3043 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3044 if (inq_buff == NULL) {
3045 printk(KERN_ERR "cciss: out of memory\n");
3046 kfree(ld_buff);
3047 return;
3048 }
3049 /* Get the firmware version */
3050 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3051 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3052 TYPE_CMD);
3053 if (return_code == IO_OK) {
3054 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3055 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3056 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3057 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3058 } else { /* send command failed */
3059
3060 printk(KERN_WARNING "cciss: unable to determine firmware"
3061 " version of controller\n");
3062 }
3063 /* Get the number of logical volumes */
3064 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3065 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3066 TYPE_CMD);
3067
3068 if (return_code == IO_OK) {
3069 #ifdef CCISS_DEBUG
3070 printk("LUN Data\n--------------------------\n");
3071 #endif /* CCISS_DEBUG */
3072
3073 listlength |=
3074 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3075 listlength |=
3076 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3077 listlength |=
3078 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3079 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3080 } else { /* reading number of logical volumes failed */
3081
3082 printk(KERN_WARNING "cciss: report logical volume"
3083 " command failed\n");
3084 listlength = 0;
3085 }
3086 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3087 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3088 printk(KERN_ERR
3089 "ciss: only %d number of logical volumes supported\n",
3090 CISS_MAX_LUN);
3091 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3092 }
3093 #ifdef CCISS_DEBUG
3094 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3095 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3096 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3097 hba[cntl_num]->num_luns);
3098 #endif /* CCISS_DEBUG */
3099
3100 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3101 for (i = 0; i < CISS_MAX_LUN; i++) {
3102 if (i < hba[cntl_num]->num_luns) {
3103 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3104 << 24;
3105 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3106 << 16;
3107 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3108 << 8;
3109 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3110
3111 hba[cntl_num]->drv[i].LunID = lunid;
3112
3113 #ifdef CCISS_DEBUG
3114 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3115 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3116 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3117 hba[cntl_num]->drv[i].LunID);
3118 #endif /* CCISS_DEBUG */
3119
3120 /* testing to see if 16-byte CDBs are already being used */
3121 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3122 cciss_read_capacity_16(cntl_num, i, 0,
3123 &total_size, &block_size);
3124 goto geo_inq;
3125 }
3126 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3127
3128 /* total_size = last LBA + 1 */
3129 if(total_size == (__u32) 0) {
3130 cciss_read_capacity_16(cntl_num, i, 0,
3131 &total_size, &block_size);
3132 hba[cntl_num]->cciss_read = CCISS_READ_16;
3133 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3134 } else {
3135 hba[cntl_num]->cciss_read = CCISS_READ_10;
3136 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3137 }
3138 geo_inq:
3139 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3140 block_size, inq_buff,
3141 &hba[cntl_num]->drv[i]);
3142 } else {
3143 /* initialize raid_level to indicate a free space */
3144 hba[cntl_num]->drv[i].raid_level = -1;
3145 }
3146 }
3147 kfree(ld_buff);
3148 kfree(inq_buff);
3149 }
3150
3151 /* Function to find the first free pointer into our hba[] array */
3152 /* Returns -1 if no free entries are left. */
3153 static int alloc_cciss_hba(void)
3154 {
3155 struct gendisk *disk[NWD];
3156 int i, n;
3157 for (n = 0; n < NWD; n++) {
3158 disk[n] = alloc_disk(1 << NWD_SHIFT);
3159 if (!disk[n])
3160 goto out;
3161 }
3162
3163 for (i = 0; i < MAX_CTLR; i++) {
3164 if (!hba[i]) {
3165 ctlr_info_t *p;
3166 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3167 if (!p)
3168 goto Enomem;
3169 for (n = 0; n < NWD; n++)
3170 p->gendisk[n] = disk[n];
3171 hba[i] = p;
3172 return i;
3173 }
3174 }
3175 printk(KERN_WARNING "cciss: This driver supports a maximum"
3176 " of %d controllers.\n", MAX_CTLR);
3177 goto out;
3178 Enomem:
3179 printk(KERN_ERR "cciss: out of memory.\n");
3180 out:
3181 while (n--)
3182 put_disk(disk[n]);
3183 return -1;
3184 }
3185
3186 static void free_hba(int i)
3187 {
3188 ctlr_info_t *p = hba[i];
3189 int n;
3190
3191 hba[i] = NULL;
3192 for (n = 0; n < NWD; n++)
3193 put_disk(p->gendisk[n]);
3194 kfree(p);
3195 }
3196
3197 /*
3198 * This is it. Find all the controllers and register them. I really hate
3199 * stealing all these major device numbers.
3200 * returns the number of block devices registered.
3201 */
3202 static int __devinit cciss_init_one(struct pci_dev *pdev,
3203 const struct pci_device_id *ent)
3204 {
3205 request_queue_t *q;
3206 int i;
3207 int j;
3208 int rc;
3209 int dac;
3210
3211 i = alloc_cciss_hba();
3212 if (i < 0)
3213 return -1;
3214
3215 hba[i]->busy_initializing = 1;
3216
3217 if (cciss_pci_init(hba[i], pdev) != 0)
3218 goto clean1;
3219
3220 sprintf(hba[i]->devname, "cciss%d", i);
3221 hba[i]->ctlr = i;
3222 hba[i]->pdev = pdev;
3223
3224 /* configure PCI DMA stuff */
3225 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3226 dac = 1;
3227 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3228 dac = 0;
3229 else {
3230 printk(KERN_ERR "cciss: no suitable DMA available\n");
3231 goto clean1;
3232 }
3233
3234 /*
3235 * register with the major number, or get a dynamic major number
3236 * by passing 0 as argument. This is done for greater than
3237 * 8 controller support.
3238 */
3239 if (i < MAX_CTLR_ORIG)
3240 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3241 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3242 if (rc == -EBUSY || rc == -EINVAL) {
3243 printk(KERN_ERR
3244 "cciss: Unable to get major number %d for %s "
3245 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3246 goto clean1;
3247 } else {
3248 if (i >= MAX_CTLR_ORIG)
3249 hba[i]->major = rc;
3250 }
3251
3252 /* make sure the board interrupts are off */
3253 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3254 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3255 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3256 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3257 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3258 goto clean2;
3259 }
3260
3261 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3262 hba[i]->devname, pdev->device, pci_name(pdev),
3263 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3264
3265 hba[i]->cmd_pool_bits =
3266 kmalloc(((NR_CMDS + BITS_PER_LONG -
3267 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3268 hba[i]->cmd_pool = (CommandList_struct *)
3269 pci_alloc_consistent(hba[i]->pdev,
3270 NR_CMDS * sizeof(CommandList_struct),
3271 &(hba[i]->cmd_pool_dhandle));
3272 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3273 pci_alloc_consistent(hba[i]->pdev,
3274 NR_CMDS * sizeof(ErrorInfo_struct),
3275 &(hba[i]->errinfo_pool_dhandle));
3276 if ((hba[i]->cmd_pool_bits == NULL)
3277 || (hba[i]->cmd_pool == NULL)
3278 || (hba[i]->errinfo_pool == NULL)) {
3279 printk(KERN_ERR "cciss: out of memory");
3280 goto clean4;
3281 }
3282 #ifdef CONFIG_CISS_SCSI_TAPE
3283 hba[i]->scsi_rejects.complete =
3284 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3285 (NR_CMDS + 5), GFP_KERNEL);
3286 if (hba[i]->scsi_rejects.complete == NULL) {
3287 printk(KERN_ERR "cciss: out of memory");
3288 goto clean4;
3289 }
3290 #endif
3291 spin_lock_init(&hba[i]->lock);
3292
3293 /* Initialize the pdev driver private data.
3294 have it point to hba[i]. */
3295 pci_set_drvdata(pdev, hba[i]);
3296 /* command and error info recs zeroed out before
3297 they are used */
3298 memset(hba[i]->cmd_pool_bits, 0,
3299 ((NR_CMDS + BITS_PER_LONG -
3300 1) / BITS_PER_LONG) * sizeof(unsigned long));
3301
3302 #ifdef CCISS_DEBUG
3303 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3304 #endif /* CCISS_DEBUG */
3305
3306 cciss_getgeometry(i);
3307
3308 cciss_scsi_setup(i);
3309
3310 /* Turn the interrupts on so we can service requests */
3311 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3312
3313 cciss_procinit(i);
3314 hba[i]->busy_initializing = 0;
3315
3316 for (j = 0; j < NWD; j++) { /* mfm */
3317 drive_info_struct *drv = &(hba[i]->drv[j]);
3318 struct gendisk *disk = hba[i]->gendisk[j];
3319
3320 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3321 if (!q) {
3322 printk(KERN_ERR
3323 "cciss: unable to allocate queue for disk %d\n",
3324 j);
3325 break;
3326 }
3327 drv->queue = q;
3328
3329 q->backing_dev_info.ra_pages = READ_AHEAD;
3330 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3331
3332 /* This is a hardware imposed limit. */
3333 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3334
3335 /* This is a limit in the driver and could be eliminated. */
3336 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3337
3338 blk_queue_max_sectors(q, 512);
3339
3340 blk_queue_softirq_done(q, cciss_softirq_done);
3341
3342 q->queuedata = hba[i];
3343 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3344 disk->major = hba[i]->major;
3345 disk->first_minor = j << NWD_SHIFT;
3346 disk->fops = &cciss_fops;
3347 disk->queue = q;
3348 disk->private_data = drv;
3349 disk->driverfs_dev = &pdev->dev;
3350 /* we must register the controller even if no disks exist */
3351 /* this is for the online array utilities */
3352 if (!drv->heads && j)
3353 continue;
3354 blk_queue_hardsect_size(q, drv->block_size);
3355 set_capacity(disk, drv->nr_blocks);
3356 add_disk(disk);
3357 }
3358
3359 return 1;
3360
3361 clean4:
3362 #ifdef CONFIG_CISS_SCSI_TAPE
3363 kfree(hba[i]->scsi_rejects.complete);
3364 #endif
3365 kfree(hba[i]->cmd_pool_bits);
3366 if (hba[i]->cmd_pool)
3367 pci_free_consistent(hba[i]->pdev,
3368 NR_CMDS * sizeof(CommandList_struct),
3369 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3370 if (hba[i]->errinfo_pool)
3371 pci_free_consistent(hba[i]->pdev,
3372 NR_CMDS * sizeof(ErrorInfo_struct),
3373 hba[i]->errinfo_pool,
3374 hba[i]->errinfo_pool_dhandle);
3375 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3376 clean2:
3377 unregister_blkdev(hba[i]->major, hba[i]->devname);
3378 clean1:
3379 hba[i]->busy_initializing = 0;
3380 free_hba(i);
3381 return -1;
3382 }
3383
3384 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3385 {
3386 ctlr_info_t *tmp_ptr;
3387 int i, j;
3388 char flush_buf[4];
3389 int return_code;
3390
3391 if (pci_get_drvdata(pdev) == NULL) {
3392 printk(KERN_ERR "cciss: Unable to remove device \n");
3393 return;
3394 }
3395 tmp_ptr = pci_get_drvdata(pdev);
3396 i = tmp_ptr->ctlr;
3397 if (hba[i] == NULL) {
3398 printk(KERN_ERR "cciss: device appears to "
3399 "already be removed \n");
3400 return;
3401 }
3402 /* Turn board interrupts off and send the flush cache command */
3403 /* sendcmd will turn off interrupt, and send the flush...
3404 * To write all data in the battery backed cache to disks */
3405 memset(flush_buf, 0, 4);
3406 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3407 TYPE_CMD);
3408 if (return_code != IO_OK) {
3409 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3410 i);
3411 }
3412 free_irq(hba[i]->intr[2], hba[i]);
3413
3414 #ifdef CONFIG_PCI_MSI
3415 if (hba[i]->msix_vector)
3416 pci_disable_msix(hba[i]->pdev);
3417 else if (hba[i]->msi_vector)
3418 pci_disable_msi(hba[i]->pdev);
3419 #endif /* CONFIG_PCI_MSI */
3420
3421 iounmap(hba[i]->vaddr);
3422 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3423 unregister_blkdev(hba[i]->major, hba[i]->devname);
3424 remove_proc_entry(hba[i]->devname, proc_cciss);
3425
3426 /* remove it from the disk list */
3427 for (j = 0; j < NWD; j++) {
3428 struct gendisk *disk = hba[i]->gendisk[j];
3429 if (disk) {
3430 request_queue_t *q = disk->queue;
3431
3432 if (disk->flags & GENHD_FL_UP)
3433 del_gendisk(disk);
3434 if (q)
3435 blk_cleanup_queue(q);
3436 }
3437 }
3438
3439 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3440 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3441 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3442 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3443 kfree(hba[i]->cmd_pool_bits);
3444 #ifdef CONFIG_CISS_SCSI_TAPE
3445 kfree(hba[i]->scsi_rejects.complete);
3446 #endif
3447 pci_release_regions(pdev);
3448 pci_disable_device(pdev);
3449 pci_set_drvdata(pdev, NULL);
3450 free_hba(i);
3451 }
3452
3453 static struct pci_driver cciss_pci_driver = {
3454 .name = "cciss",
3455 .probe = cciss_init_one,
3456 .remove = __devexit_p(cciss_remove_one),
3457 .id_table = cciss_pci_device_id, /* id_table */
3458 };
3459
3460 /*
3461 * This is it. Register the PCI driver information for the cards we control
3462 * the OS will call our registered routines when it finds one of our cards.
3463 */
3464 static int __init cciss_init(void)
3465 {
3466 printk(KERN_INFO DRIVER_NAME "\n");
3467
3468 /* Register for our PCI devices */
3469 return pci_register_driver(&cciss_pci_driver);
3470 }
3471
3472 static void __exit cciss_cleanup(void)
3473 {
3474 int i;
3475
3476 pci_unregister_driver(&cciss_pci_driver);
3477 /* double check that all controller entrys have been removed */
3478 for (i = 0; i < MAX_CTLR; i++) {
3479 if (hba[i] != NULL) {
3480 printk(KERN_WARNING "cciss: had to remove"
3481 " controller %d\n", i);
3482 cciss_remove_one(hba[i]->pdev);
3483 }
3484 }
3485 remove_proc_entry("cciss", proc_root_driver);
3486 }
3487
3488 static void fail_all_cmds(unsigned long ctlr)
3489 {
3490 /* If we get here, the board is apparently dead. */
3491 ctlr_info_t *h = hba[ctlr];
3492 CommandList_struct *c;
3493 unsigned long flags;
3494
3495 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3496 h->alive = 0; /* the controller apparently died... */
3497
3498 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3499
3500 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3501
3502 /* move everything off the request queue onto the completed queue */
3503 while ((c = h->reqQ) != NULL) {
3504 removeQ(&(h->reqQ), c);
3505 h->Qdepth--;
3506 addQ(&(h->cmpQ), c);
3507 }
3508
3509 /* Now, fail everything on the completed queue with a HW error */
3510 while ((c = h->cmpQ) != NULL) {
3511 removeQ(&h->cmpQ, c);
3512 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3513 if (c->cmd_type == CMD_RWREQ) {
3514 complete_command(h, c, 0);
3515 } else if (c->cmd_type == CMD_IOCTL_PEND)
3516 complete(c->waiting);
3517 #ifdef CONFIG_CISS_SCSI_TAPE
3518 else if (c->cmd_type == CMD_SCSI)
3519 complete_scsi_command(c, 0, 0);
3520 #endif
3521 }
3522 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3523 return;
3524 }
3525
3526 module_init(cciss_init);
3527 module_exit(cciss_cleanup);