]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/block/cciss.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mirror_ubuntu-bionic-kernel.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
75 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
85 {0,}
86 };
87
88 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
89
90 /* board_id = Subsystem Device ID & Vendor ID
91 * product = Marketing Name for the board
92 * access = Address of the struct of function pointers
93 */
94 static struct board_type products[] = {
95 {0x40700E11, "Smart Array 5300", &SA5_access},
96 {0x40800E11, "Smart Array 5i", &SA5B_access},
97 {0x40820E11, "Smart Array 532", &SA5B_access},
98 {0x40830E11, "Smart Array 5312", &SA5B_access},
99 {0x409A0E11, "Smart Array 641", &SA5_access},
100 {0x409B0E11, "Smart Array 642", &SA5_access},
101 {0x409C0E11, "Smart Array 6400", &SA5_access},
102 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
103 {0x40910E11, "Smart Array 6i", &SA5_access},
104 {0x3225103C, "Smart Array P600", &SA5_access},
105 {0x3223103C, "Smart Array P800", &SA5_access},
106 {0x3234103C, "Smart Array P400", &SA5_access},
107 {0x3235103C, "Smart Array P400i", &SA5_access},
108 {0x3211103C, "Smart Array E200i", &SA5_access},
109 {0x3212103C, "Smart Array E200", &SA5_access},
110 {0x3213103C, "Smart Array E200i", &SA5_access},
111 {0x3214103C, "Smart Array E200i", &SA5_access},
112 {0x3215103C, "Smart Array E200i", &SA5_access},
113 {0x3233103C, "Smart Array E500", &SA5_access},
114 };
115
116 /* How long to wait (in milliseconds) for board to go into simple mode */
117 #define MAX_CONFIG_WAIT 30000
118 #define MAX_IOCTL_CONFIG_WAIT 1000
119
120 /*define how many times we will try a command because of bus resets */
121 #define MAX_CMD_RETRIES 3
122
123 #define READ_AHEAD 1024
124 #define NR_CMDS 384 /* #commands that can be outstanding */
125 #define MAX_CTLR 32
126
127 /* Originally cciss driver only supports 8 major numbers */
128 #define MAX_CTLR_ORIG 8
129
130 static ctlr_info_t *hba[MAX_CTLR];
131
132 static void do_cciss_request(request_queue_t *q);
133 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
134 static int cciss_open(struct inode *inode, struct file *filep);
135 static int cciss_release(struct inode *inode, struct file *filep);
136 static int cciss_ioctl(struct inode *inode, struct file *filep,
137 unsigned int cmd, unsigned long arg);
138 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
139
140 static int revalidate_allvol(ctlr_info_t *host);
141 static int cciss_revalidate(struct gendisk *disk);
142 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
143 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
144 int clear_all);
145
146 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
147 sector_t *total_size, unsigned int *block_size);
148 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
149 sector_t *total_size, unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol,
151 int withirq, sector_t total_size,
152 unsigned int block_size, InquiryData_struct *inq_buff,
153 drive_info_struct *drv);
154 static void cciss_getgeometry(int cntl_num);
155 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
156 __u32);
157 static void start_io(ctlr_info_t *h);
158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
159 unsigned int use_unit_num, unsigned int log_unit,
160 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
161 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
162 unsigned int use_unit_num, unsigned int log_unit,
163 __u8 page_code, int cmd_type);
164
165 static void fail_all_cmds(unsigned long ctlr);
166
167 #ifdef CONFIG_PROC_FS
168 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
169 int length, int *eof, void *data);
170 static void cciss_procinit(int i);
171 #else
172 static void cciss_procinit(int i)
173 {
174 }
175 #endif /* CONFIG_PROC_FS */
176
177 #ifdef CONFIG_COMPAT
178 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
179 #endif
180
181 static struct block_device_operations cciss_fops = {
182 .owner = THIS_MODULE,
183 .open = cciss_open,
184 .release = cciss_release,
185 .ioctl = cciss_ioctl,
186 .getgeo = cciss_getgeo,
187 #ifdef CONFIG_COMPAT
188 .compat_ioctl = cciss_compat_ioctl,
189 #endif
190 .revalidate_disk = cciss_revalidate,
191 };
192
193 /*
194 * Enqueuing and dequeuing functions for cmdlists.
195 */
196 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
197 {
198 if (*Qptr == NULL) {
199 *Qptr = c;
200 c->next = c->prev = c;
201 } else {
202 c->prev = (*Qptr)->prev;
203 c->next = (*Qptr);
204 (*Qptr)->prev->next = c;
205 (*Qptr)->prev = c;
206 }
207 }
208
209 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
210 CommandList_struct *c)
211 {
212 if (c && c->next != c) {
213 if (*Qptr == c)
214 *Qptr = c->next;
215 c->prev->next = c->next;
216 c->next->prev = c->prev;
217 } else {
218 *Qptr = NULL;
219 }
220 return c;
221 }
222
223 #include "cciss_scsi.c" /* For SCSI tape support */
224
225 #ifdef CONFIG_PROC_FS
226
227 /*
228 * Report information about this controller.
229 */
230 #define ENG_GIG 1000000000
231 #define ENG_GIG_FACTOR (ENG_GIG/512)
232 #define RAID_UNKNOWN 6
233 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
234 "UNKNOWN"
235 };
236
237 static struct proc_dir_entry *proc_cciss;
238
239 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
240 int length, int *eof, void *data)
241 {
242 off_t pos = 0;
243 off_t len = 0;
244 int size, i, ctlr;
245 ctlr_info_t *h = (ctlr_info_t *) data;
246 drive_info_struct *drv;
247 unsigned long flags;
248 sector_t vol_sz, vol_sz_frac;
249
250 ctlr = h->ctlr;
251
252 /* prevent displaying bogus info during configuration
253 * or deconfiguration of a logical volume
254 */
255 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
256 if (h->busy_configuring) {
257 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
258 return -EBUSY;
259 }
260 h->busy_configuring = 1;
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
262
263 size = sprintf(buffer, "%s: HP %s Controller\n"
264 "Board ID: 0x%08lx\n"
265 "Firmware Version: %c%c%c%c\n"
266 "IRQ: %d\n"
267 "Logical drives: %d\n"
268 "Current Q depth: %d\n"
269 "Current # commands on controller: %d\n"
270 "Max Q depth since init: %d\n"
271 "Max # commands on controller since init: %d\n"
272 "Max SG entries since init: %d\n\n",
273 h->devname,
274 h->product_name,
275 (unsigned long)h->board_id,
276 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
277 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
278 h->num_luns, h->Qdepth, h->commands_outstanding,
279 h->maxQsinceinit, h->max_outstanding, h->maxSG);
280
281 pos += size;
282 len += size;
283 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
284 for (i = 0; i <= h->highest_lun; i++) {
285
286 drv = &h->drv[i];
287 if (drv->heads == 0)
288 continue;
289
290 vol_sz = drv->nr_blocks;
291 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
292 vol_sz_frac *= 100;
293 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
294
295 if (drv->raid_level > 5)
296 drv->raid_level = RAID_UNKNOWN;
297 size = sprintf(buffer + len, "cciss/c%dd%d:"
298 "\t%4u.%02uGB\tRAID %s\n",
299 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
300 raid_label[drv->raid_level]);
301 pos += size;
302 len += size;
303 }
304
305 *eof = 1;
306 *start = buffer + offset;
307 len -= offset;
308 if (len > length)
309 len = length;
310 h->busy_configuring = 0;
311 return len;
312 }
313
314 static int
315 cciss_proc_write(struct file *file, const char __user *buffer,
316 unsigned long count, void *data)
317 {
318 unsigned char cmd[80];
319 int len;
320 #ifdef CONFIG_CISS_SCSI_TAPE
321 ctlr_info_t *h = (ctlr_info_t *) data;
322 int rc;
323 #endif
324
325 if (count > sizeof(cmd) - 1)
326 return -EINVAL;
327 if (copy_from_user(cmd, buffer, count))
328 return -EFAULT;
329 cmd[count] = '\0';
330 len = strlen(cmd); // above 3 lines ensure safety
331 if (len && cmd[len - 1] == '\n')
332 cmd[--len] = '\0';
333 # ifdef CONFIG_CISS_SCSI_TAPE
334 if (strcmp("engage scsi", cmd) == 0) {
335 rc = cciss_engage_scsi(h->ctlr);
336 if (rc != 0)
337 return -rc;
338 return count;
339 }
340 /* might be nice to have "disengage" too, but it's not
341 safely possible. (only 1 module use count, lock issues.) */
342 # endif
343 return -EINVAL;
344 }
345
346 /*
347 * Get us a file in /proc/cciss that says something about each controller.
348 * Create /proc/cciss if it doesn't exist yet.
349 */
350 static void __devinit cciss_procinit(int i)
351 {
352 struct proc_dir_entry *pde;
353
354 if (proc_cciss == NULL) {
355 proc_cciss = proc_mkdir("cciss", proc_root_driver);
356 if (!proc_cciss)
357 return;
358 }
359
360 pde = create_proc_read_entry(hba[i]->devname,
361 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
362 proc_cciss, cciss_proc_get_info, hba[i]);
363 pde->write_proc = cciss_proc_write;
364 }
365 #endif /* CONFIG_PROC_FS */
366
367 /*
368 * For operations that cannot sleep, a command block is allocated at init,
369 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
370 * which ones are free or in use. For operations that can wait for kmalloc
371 * to possible sleep, this routine can be called with get_from_pool set to 0.
372 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
373 */
374 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
375 {
376 CommandList_struct *c;
377 int i;
378 u64bit temp64;
379 dma_addr_t cmd_dma_handle, err_dma_handle;
380
381 if (!get_from_pool) {
382 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
383 sizeof(CommandList_struct), &cmd_dma_handle);
384 if (c == NULL)
385 return NULL;
386 memset(c, 0, sizeof(CommandList_struct));
387
388 c->cmdindex = -1;
389
390 c->err_info = (ErrorInfo_struct *)
391 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
392 &err_dma_handle);
393
394 if (c->err_info == NULL) {
395 pci_free_consistent(h->pdev,
396 sizeof(CommandList_struct), c, cmd_dma_handle);
397 return NULL;
398 }
399 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
400 } else { /* get it out of the controllers pool */
401
402 do {
403 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
404 if (i == NR_CMDS)
405 return NULL;
406 } while (test_and_set_bit
407 (i & (BITS_PER_LONG - 1),
408 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
409 #ifdef CCISS_DEBUG
410 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
411 #endif
412 c = h->cmd_pool + i;
413 memset(c, 0, sizeof(CommandList_struct));
414 cmd_dma_handle = h->cmd_pool_dhandle
415 + i * sizeof(CommandList_struct);
416 c->err_info = h->errinfo_pool + i;
417 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
418 err_dma_handle = h->errinfo_pool_dhandle
419 + i * sizeof(ErrorInfo_struct);
420 h->nr_allocs++;
421
422 c->cmdindex = i;
423 }
424
425 c->busaddr = (__u32) cmd_dma_handle;
426 temp64.val = (__u64) err_dma_handle;
427 c->ErrDesc.Addr.lower = temp64.val32.lower;
428 c->ErrDesc.Addr.upper = temp64.val32.upper;
429 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
430
431 c->ctlr = h->ctlr;
432 return c;
433 }
434
435 /*
436 * Frees a command block that was previously allocated with cmd_alloc().
437 */
438 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
439 {
440 int i;
441 u64bit temp64;
442
443 if (!got_from_pool) {
444 temp64.val32.lower = c->ErrDesc.Addr.lower;
445 temp64.val32.upper = c->ErrDesc.Addr.upper;
446 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
447 c->err_info, (dma_addr_t) temp64.val);
448 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
449 c, (dma_addr_t) c->busaddr);
450 } else {
451 i = c - h->cmd_pool;
452 clear_bit(i & (BITS_PER_LONG - 1),
453 h->cmd_pool_bits + (i / BITS_PER_LONG));
454 h->nr_frees++;
455 }
456 }
457
458 static inline ctlr_info_t *get_host(struct gendisk *disk)
459 {
460 return disk->queue->queuedata;
461 }
462
463 static inline drive_info_struct *get_drv(struct gendisk *disk)
464 {
465 return disk->private_data;
466 }
467
468 /*
469 * Open. Make sure the device is really there.
470 */
471 static int cciss_open(struct inode *inode, struct file *filep)
472 {
473 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
474 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
475
476 #ifdef CCISS_DEBUG
477 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
478 #endif /* CCISS_DEBUG */
479
480 if (host->busy_initializing || drv->busy_configuring)
481 return -EBUSY;
482 /*
483 * Root is allowed to open raw volume zero even if it's not configured
484 * so array config can still work. Root is also allowed to open any
485 * volume that has a LUN ID, so it can issue IOCTL to reread the
486 * disk information. I don't think I really like this
487 * but I'm already using way to many device nodes to claim another one
488 * for "raw controller".
489 */
490 if (drv->nr_blocks == 0) {
491 if (iminor(inode) != 0) { /* not node 0? */
492 /* if not node 0 make sure it is a partition = 0 */
493 if (iminor(inode) & 0x0f) {
494 return -ENXIO;
495 /* if it is, make sure we have a LUN ID */
496 } else if (drv->LunID == 0) {
497 return -ENXIO;
498 }
499 }
500 if (!capable(CAP_SYS_ADMIN))
501 return -EPERM;
502 }
503 drv->usage_count++;
504 host->usage_count++;
505 return 0;
506 }
507
508 /*
509 * Close. Sync first.
510 */
511 static int cciss_release(struct inode *inode, struct file *filep)
512 {
513 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
514 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
515
516 #ifdef CCISS_DEBUG
517 printk(KERN_DEBUG "cciss_release %s\n",
518 inode->i_bdev->bd_disk->disk_name);
519 #endif /* CCISS_DEBUG */
520
521 drv->usage_count--;
522 host->usage_count--;
523 return 0;
524 }
525
526 #ifdef CONFIG_COMPAT
527
528 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
529 {
530 int ret;
531 lock_kernel();
532 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
533 unlock_kernel();
534 return ret;
535 }
536
537 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
538 unsigned long arg);
539 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
540 unsigned long arg);
541
542 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
543 {
544 switch (cmd) {
545 case CCISS_GETPCIINFO:
546 case CCISS_GETINTINFO:
547 case CCISS_SETINTINFO:
548 case CCISS_GETNODENAME:
549 case CCISS_SETNODENAME:
550 case CCISS_GETHEARTBEAT:
551 case CCISS_GETBUSTYPES:
552 case CCISS_GETFIRMVER:
553 case CCISS_GETDRIVVER:
554 case CCISS_REVALIDVOLS:
555 case CCISS_DEREGDISK:
556 case CCISS_REGNEWDISK:
557 case CCISS_REGNEWD:
558 case CCISS_RESCANDISK:
559 case CCISS_GETLUNINFO:
560 return do_ioctl(f, cmd, arg);
561
562 case CCISS_PASSTHRU32:
563 return cciss_ioctl32_passthru(f, cmd, arg);
564 case CCISS_BIG_PASSTHRU32:
565 return cciss_ioctl32_big_passthru(f, cmd, arg);
566
567 default:
568 return -ENOIOCTLCMD;
569 }
570 }
571
572 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
573 unsigned long arg)
574 {
575 IOCTL32_Command_struct __user *arg32 =
576 (IOCTL32_Command_struct __user *) arg;
577 IOCTL_Command_struct arg64;
578 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
579 int err;
580 u32 cp;
581
582 err = 0;
583 err |=
584 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
585 sizeof(arg64.LUN_info));
586 err |=
587 copy_from_user(&arg64.Request, &arg32->Request,
588 sizeof(arg64.Request));
589 err |=
590 copy_from_user(&arg64.error_info, &arg32->error_info,
591 sizeof(arg64.error_info));
592 err |= get_user(arg64.buf_size, &arg32->buf_size);
593 err |= get_user(cp, &arg32->buf);
594 arg64.buf = compat_ptr(cp);
595 err |= copy_to_user(p, &arg64, sizeof(arg64));
596
597 if (err)
598 return -EFAULT;
599
600 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
601 if (err)
602 return err;
603 err |=
604 copy_in_user(&arg32->error_info, &p->error_info,
605 sizeof(arg32->error_info));
606 if (err)
607 return -EFAULT;
608 return err;
609 }
610
611 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
612 unsigned long arg)
613 {
614 BIG_IOCTL32_Command_struct __user *arg32 =
615 (BIG_IOCTL32_Command_struct __user *) arg;
616 BIG_IOCTL_Command_struct arg64;
617 BIG_IOCTL_Command_struct __user *p =
618 compat_alloc_user_space(sizeof(arg64));
619 int err;
620 u32 cp;
621
622 err = 0;
623 err |=
624 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
625 sizeof(arg64.LUN_info));
626 err |=
627 copy_from_user(&arg64.Request, &arg32->Request,
628 sizeof(arg64.Request));
629 err |=
630 copy_from_user(&arg64.error_info, &arg32->error_info,
631 sizeof(arg64.error_info));
632 err |= get_user(arg64.buf_size, &arg32->buf_size);
633 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
634 err |= get_user(cp, &arg32->buf);
635 arg64.buf = compat_ptr(cp);
636 err |= copy_to_user(p, &arg64, sizeof(arg64));
637
638 if (err)
639 return -EFAULT;
640
641 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
642 if (err)
643 return err;
644 err |=
645 copy_in_user(&arg32->error_info, &p->error_info,
646 sizeof(arg32->error_info));
647 if (err)
648 return -EFAULT;
649 return err;
650 }
651 #endif
652
653 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
654 {
655 drive_info_struct *drv = get_drv(bdev->bd_disk);
656
657 if (!drv->cylinders)
658 return -ENXIO;
659
660 geo->heads = drv->heads;
661 geo->sectors = drv->sectors;
662 geo->cylinders = drv->cylinders;
663 return 0;
664 }
665
666 /*
667 * ioctl
668 */
669 static int cciss_ioctl(struct inode *inode, struct file *filep,
670 unsigned int cmd, unsigned long arg)
671 {
672 struct block_device *bdev = inode->i_bdev;
673 struct gendisk *disk = bdev->bd_disk;
674 ctlr_info_t *host = get_host(disk);
675 drive_info_struct *drv = get_drv(disk);
676 int ctlr = host->ctlr;
677 void __user *argp = (void __user *)arg;
678
679 #ifdef CCISS_DEBUG
680 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
681 #endif /* CCISS_DEBUG */
682
683 switch (cmd) {
684 case CCISS_GETPCIINFO:
685 {
686 cciss_pci_info_struct pciinfo;
687
688 if (!arg)
689 return -EINVAL;
690 pciinfo.domain = pci_domain_nr(host->pdev->bus);
691 pciinfo.bus = host->pdev->bus->number;
692 pciinfo.dev_fn = host->pdev->devfn;
693 pciinfo.board_id = host->board_id;
694 if (copy_to_user
695 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
696 return -EFAULT;
697 return 0;
698 }
699 case CCISS_GETINTINFO:
700 {
701 cciss_coalint_struct intinfo;
702 if (!arg)
703 return -EINVAL;
704 intinfo.delay =
705 readl(&host->cfgtable->HostWrite.CoalIntDelay);
706 intinfo.count =
707 readl(&host->cfgtable->HostWrite.CoalIntCount);
708 if (copy_to_user
709 (argp, &intinfo, sizeof(cciss_coalint_struct)))
710 return -EFAULT;
711 return 0;
712 }
713 case CCISS_SETINTINFO:
714 {
715 cciss_coalint_struct intinfo;
716 unsigned long flags;
717 int i;
718
719 if (!arg)
720 return -EINVAL;
721 if (!capable(CAP_SYS_ADMIN))
722 return -EPERM;
723 if (copy_from_user
724 (&intinfo, argp, sizeof(cciss_coalint_struct)))
725 return -EFAULT;
726 if ((intinfo.delay == 0) && (intinfo.count == 0))
727 {
728 // printk("cciss_ioctl: delay and count cannot be 0\n");
729 return -EINVAL;
730 }
731 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
732 /* Update the field, and then ring the doorbell */
733 writel(intinfo.delay,
734 &(host->cfgtable->HostWrite.CoalIntDelay));
735 writel(intinfo.count,
736 &(host->cfgtable->HostWrite.CoalIntCount));
737 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
738
739 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
740 if (!(readl(host->vaddr + SA5_DOORBELL)
741 & CFGTBL_ChangeReq))
742 break;
743 /* delay and try again */
744 udelay(1000);
745 }
746 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
747 if (i >= MAX_IOCTL_CONFIG_WAIT)
748 return -EAGAIN;
749 return 0;
750 }
751 case CCISS_GETNODENAME:
752 {
753 NodeName_type NodeName;
754 int i;
755
756 if (!arg)
757 return -EINVAL;
758 for (i = 0; i < 16; i++)
759 NodeName[i] =
760 readb(&host->cfgtable->ServerName[i]);
761 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
762 return -EFAULT;
763 return 0;
764 }
765 case CCISS_SETNODENAME:
766 {
767 NodeName_type NodeName;
768 unsigned long flags;
769 int i;
770
771 if (!arg)
772 return -EINVAL;
773 if (!capable(CAP_SYS_ADMIN))
774 return -EPERM;
775
776 if (copy_from_user
777 (NodeName, argp, sizeof(NodeName_type)))
778 return -EFAULT;
779
780 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
781
782 /* Update the field, and then ring the doorbell */
783 for (i = 0; i < 16; i++)
784 writeb(NodeName[i],
785 &host->cfgtable->ServerName[i]);
786
787 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
788
789 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
790 if (!(readl(host->vaddr + SA5_DOORBELL)
791 & CFGTBL_ChangeReq))
792 break;
793 /* delay and try again */
794 udelay(1000);
795 }
796 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
797 if (i >= MAX_IOCTL_CONFIG_WAIT)
798 return -EAGAIN;
799 return 0;
800 }
801
802 case CCISS_GETHEARTBEAT:
803 {
804 Heartbeat_type heartbeat;
805
806 if (!arg)
807 return -EINVAL;
808 heartbeat = readl(&host->cfgtable->HeartBeat);
809 if (copy_to_user
810 (argp, &heartbeat, sizeof(Heartbeat_type)))
811 return -EFAULT;
812 return 0;
813 }
814 case CCISS_GETBUSTYPES:
815 {
816 BusTypes_type BusTypes;
817
818 if (!arg)
819 return -EINVAL;
820 BusTypes = readl(&host->cfgtable->BusTypes);
821 if (copy_to_user
822 (argp, &BusTypes, sizeof(BusTypes_type)))
823 return -EFAULT;
824 return 0;
825 }
826 case CCISS_GETFIRMVER:
827 {
828 FirmwareVer_type firmware;
829
830 if (!arg)
831 return -EINVAL;
832 memcpy(firmware, host->firm_ver, 4);
833
834 if (copy_to_user
835 (argp, firmware, sizeof(FirmwareVer_type)))
836 return -EFAULT;
837 return 0;
838 }
839 case CCISS_GETDRIVVER:
840 {
841 DriverVer_type DriverVer = DRIVER_VERSION;
842
843 if (!arg)
844 return -EINVAL;
845
846 if (copy_to_user
847 (argp, &DriverVer, sizeof(DriverVer_type)))
848 return -EFAULT;
849 return 0;
850 }
851
852 case CCISS_REVALIDVOLS:
853 if (bdev != bdev->bd_contains || drv != host->drv)
854 return -ENXIO;
855 return revalidate_allvol(host);
856
857 case CCISS_GETLUNINFO:{
858 LogvolInfo_struct luninfo;
859
860 luninfo.LunID = drv->LunID;
861 luninfo.num_opens = drv->usage_count;
862 luninfo.num_parts = 0;
863 if (copy_to_user(argp, &luninfo,
864 sizeof(LogvolInfo_struct)))
865 return -EFAULT;
866 return 0;
867 }
868 case CCISS_DEREGDISK:
869 return rebuild_lun_table(host, disk);
870
871 case CCISS_REGNEWD:
872 return rebuild_lun_table(host, NULL);
873
874 case CCISS_PASSTHRU:
875 {
876 IOCTL_Command_struct iocommand;
877 CommandList_struct *c;
878 char *buff = NULL;
879 u64bit temp64;
880 unsigned long flags;
881 DECLARE_COMPLETION_ONSTACK(wait);
882
883 if (!arg)
884 return -EINVAL;
885
886 if (!capable(CAP_SYS_RAWIO))
887 return -EPERM;
888
889 if (copy_from_user
890 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
891 return -EFAULT;
892 if ((iocommand.buf_size < 1) &&
893 (iocommand.Request.Type.Direction != XFER_NONE)) {
894 return -EINVAL;
895 }
896 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
897 /* Check kmalloc limits */
898 if (iocommand.buf_size > 128000)
899 return -EINVAL;
900 #endif
901 if (iocommand.buf_size > 0) {
902 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
903 if (buff == NULL)
904 return -EFAULT;
905 }
906 if (iocommand.Request.Type.Direction == XFER_WRITE) {
907 /* Copy the data into the buffer we created */
908 if (copy_from_user
909 (buff, iocommand.buf, iocommand.buf_size)) {
910 kfree(buff);
911 return -EFAULT;
912 }
913 } else {
914 memset(buff, 0, iocommand.buf_size);
915 }
916 if ((c = cmd_alloc(host, 0)) == NULL) {
917 kfree(buff);
918 return -ENOMEM;
919 }
920 // Fill in the command type
921 c->cmd_type = CMD_IOCTL_PEND;
922 // Fill in Command Header
923 c->Header.ReplyQueue = 0; // unused in simple mode
924 if (iocommand.buf_size > 0) // buffer to fill
925 {
926 c->Header.SGList = 1;
927 c->Header.SGTotal = 1;
928 } else // no buffers to fill
929 {
930 c->Header.SGList = 0;
931 c->Header.SGTotal = 0;
932 }
933 c->Header.LUN = iocommand.LUN_info;
934 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
935
936 // Fill in Request block
937 c->Request = iocommand.Request;
938
939 // Fill in the scatter gather information
940 if (iocommand.buf_size > 0) {
941 temp64.val = pci_map_single(host->pdev, buff,
942 iocommand.buf_size,
943 PCI_DMA_BIDIRECTIONAL);
944 c->SG[0].Addr.lower = temp64.val32.lower;
945 c->SG[0].Addr.upper = temp64.val32.upper;
946 c->SG[0].Len = iocommand.buf_size;
947 c->SG[0].Ext = 0; // we are not chaining
948 }
949 c->waiting = &wait;
950
951 /* Put the request on the tail of the request queue */
952 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
953 addQ(&host->reqQ, c);
954 host->Qdepth++;
955 start_io(host);
956 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
957
958 wait_for_completion(&wait);
959
960 /* unlock the buffers from DMA */
961 temp64.val32.lower = c->SG[0].Addr.lower;
962 temp64.val32.upper = c->SG[0].Addr.upper;
963 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
964 iocommand.buf_size,
965 PCI_DMA_BIDIRECTIONAL);
966
967 /* Copy the error information out */
968 iocommand.error_info = *(c->err_info);
969 if (copy_to_user
970 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
971 kfree(buff);
972 cmd_free(host, c, 0);
973 return -EFAULT;
974 }
975
976 if (iocommand.Request.Type.Direction == XFER_READ) {
977 /* Copy the data out of the buffer we created */
978 if (copy_to_user
979 (iocommand.buf, buff, iocommand.buf_size)) {
980 kfree(buff);
981 cmd_free(host, c, 0);
982 return -EFAULT;
983 }
984 }
985 kfree(buff);
986 cmd_free(host, c, 0);
987 return 0;
988 }
989 case CCISS_BIG_PASSTHRU:{
990 BIG_IOCTL_Command_struct *ioc;
991 CommandList_struct *c;
992 unsigned char **buff = NULL;
993 int *buff_size = NULL;
994 u64bit temp64;
995 unsigned long flags;
996 BYTE sg_used = 0;
997 int status = 0;
998 int i;
999 DECLARE_COMPLETION_ONSTACK(wait);
1000 __u32 left;
1001 __u32 sz;
1002 BYTE __user *data_ptr;
1003
1004 if (!arg)
1005 return -EINVAL;
1006 if (!capable(CAP_SYS_RAWIO))
1007 return -EPERM;
1008 ioc = (BIG_IOCTL_Command_struct *)
1009 kmalloc(sizeof(*ioc), GFP_KERNEL);
1010 if (!ioc) {
1011 status = -ENOMEM;
1012 goto cleanup1;
1013 }
1014 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1015 status = -EFAULT;
1016 goto cleanup1;
1017 }
1018 if ((ioc->buf_size < 1) &&
1019 (ioc->Request.Type.Direction != XFER_NONE)) {
1020 status = -EINVAL;
1021 goto cleanup1;
1022 }
1023 /* Check kmalloc limits using all SGs */
1024 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1025 status = -EINVAL;
1026 goto cleanup1;
1027 }
1028 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1029 status = -EINVAL;
1030 goto cleanup1;
1031 }
1032 buff =
1033 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1034 if (!buff) {
1035 status = -ENOMEM;
1036 goto cleanup1;
1037 }
1038 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1039 GFP_KERNEL);
1040 if (!buff_size) {
1041 status = -ENOMEM;
1042 goto cleanup1;
1043 }
1044 left = ioc->buf_size;
1045 data_ptr = ioc->buf;
1046 while (left) {
1047 sz = (left >
1048 ioc->malloc_size) ? ioc->
1049 malloc_size : left;
1050 buff_size[sg_used] = sz;
1051 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1052 if (buff[sg_used] == NULL) {
1053 status = -ENOMEM;
1054 goto cleanup1;
1055 }
1056 if (ioc->Request.Type.Direction == XFER_WRITE) {
1057 if (copy_from_user
1058 (buff[sg_used], data_ptr, sz)) {
1059 status = -ENOMEM;
1060 goto cleanup1;
1061 }
1062 } else {
1063 memset(buff[sg_used], 0, sz);
1064 }
1065 left -= sz;
1066 data_ptr += sz;
1067 sg_used++;
1068 }
1069 if ((c = cmd_alloc(host, 0)) == NULL) {
1070 status = -ENOMEM;
1071 goto cleanup1;
1072 }
1073 c->cmd_type = CMD_IOCTL_PEND;
1074 c->Header.ReplyQueue = 0;
1075
1076 if (ioc->buf_size > 0) {
1077 c->Header.SGList = sg_used;
1078 c->Header.SGTotal = sg_used;
1079 } else {
1080 c->Header.SGList = 0;
1081 c->Header.SGTotal = 0;
1082 }
1083 c->Header.LUN = ioc->LUN_info;
1084 c->Header.Tag.lower = c->busaddr;
1085
1086 c->Request = ioc->Request;
1087 if (ioc->buf_size > 0) {
1088 int i;
1089 for (i = 0; i < sg_used; i++) {
1090 temp64.val =
1091 pci_map_single(host->pdev, buff[i],
1092 buff_size[i],
1093 PCI_DMA_BIDIRECTIONAL);
1094 c->SG[i].Addr.lower =
1095 temp64.val32.lower;
1096 c->SG[i].Addr.upper =
1097 temp64.val32.upper;
1098 c->SG[i].Len = buff_size[i];
1099 c->SG[i].Ext = 0; /* we are not chaining */
1100 }
1101 }
1102 c->waiting = &wait;
1103 /* Put the request on the tail of the request queue */
1104 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1105 addQ(&host->reqQ, c);
1106 host->Qdepth++;
1107 start_io(host);
1108 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1109 wait_for_completion(&wait);
1110 /* unlock the buffers from DMA */
1111 for (i = 0; i < sg_used; i++) {
1112 temp64.val32.lower = c->SG[i].Addr.lower;
1113 temp64.val32.upper = c->SG[i].Addr.upper;
1114 pci_unmap_single(host->pdev,
1115 (dma_addr_t) temp64.val, buff_size[i],
1116 PCI_DMA_BIDIRECTIONAL);
1117 }
1118 /* Copy the error information out */
1119 ioc->error_info = *(c->err_info);
1120 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1121 cmd_free(host, c, 0);
1122 status = -EFAULT;
1123 goto cleanup1;
1124 }
1125 if (ioc->Request.Type.Direction == XFER_READ) {
1126 /* Copy the data out of the buffer we created */
1127 BYTE __user *ptr = ioc->buf;
1128 for (i = 0; i < sg_used; i++) {
1129 if (copy_to_user
1130 (ptr, buff[i], buff_size[i])) {
1131 cmd_free(host, c, 0);
1132 status = -EFAULT;
1133 goto cleanup1;
1134 }
1135 ptr += buff_size[i];
1136 }
1137 }
1138 cmd_free(host, c, 0);
1139 status = 0;
1140 cleanup1:
1141 if (buff) {
1142 for (i = 0; i < sg_used; i++)
1143 kfree(buff[i]);
1144 kfree(buff);
1145 }
1146 kfree(buff_size);
1147 kfree(ioc);
1148 return status;
1149 }
1150 default:
1151 return -ENOTTY;
1152 }
1153 }
1154
1155 /*
1156 * revalidate_allvol is for online array config utilities. After a
1157 * utility reconfigures the drives in the array, it can use this function
1158 * (through an ioctl) to make the driver zap any previous disk structs for
1159 * that controller and get new ones.
1160 *
1161 * Right now I'm using the getgeometry() function to do this, but this
1162 * function should probably be finer grained and allow you to revalidate one
1163 * particular logical volume (instead of all of them on a particular
1164 * controller).
1165 */
1166 static int revalidate_allvol(ctlr_info_t *host)
1167 {
1168 int ctlr = host->ctlr, i;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1172 if (host->usage_count > 1) {
1173 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1174 printk(KERN_WARNING "cciss: Device busy for volume"
1175 " revalidation (usage=%d)\n", host->usage_count);
1176 return -EBUSY;
1177 }
1178 host->usage_count++;
1179 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1180
1181 for (i = 0; i < NWD; i++) {
1182 struct gendisk *disk = host->gendisk[i];
1183 if (disk) {
1184 request_queue_t *q = disk->queue;
1185
1186 if (disk->flags & GENHD_FL_UP)
1187 del_gendisk(disk);
1188 if (q)
1189 blk_cleanup_queue(q);
1190 }
1191 }
1192
1193 /*
1194 * Set the partition and block size structures for all volumes
1195 * on this controller to zero. We will reread all of this data
1196 */
1197 memset(host->drv, 0, sizeof(drive_info_struct)
1198 * CISS_MAX_LUN);
1199 /*
1200 * Tell the array controller not to give us any interrupts while
1201 * we check the new geometry. Then turn interrupts back on when
1202 * we're done.
1203 */
1204 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1205 cciss_getgeometry(ctlr);
1206 host->access.set_intr_mask(host, CCISS_INTR_ON);
1207
1208 /* Loop through each real device */
1209 for (i = 0; i < NWD; i++) {
1210 struct gendisk *disk = host->gendisk[i];
1211 drive_info_struct *drv = &(host->drv[i]);
1212 /* we must register the controller even if no disks exist */
1213 /* this is for the online array utilities */
1214 if (!drv->heads && i)
1215 continue;
1216 blk_queue_hardsect_size(drv->queue, drv->block_size);
1217 set_capacity(disk, drv->nr_blocks);
1218 add_disk(disk);
1219 }
1220 host->usage_count--;
1221 return 0;
1222 }
1223
1224 static inline void complete_buffers(struct bio *bio, int status)
1225 {
1226 while (bio) {
1227 struct bio *xbh = bio->bi_next;
1228 int nr_sectors = bio_sectors(bio);
1229
1230 bio->bi_next = NULL;
1231 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1232 bio = xbh;
1233 }
1234 }
1235
1236 static void cciss_check_queues(ctlr_info_t *h)
1237 {
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278 }
1279
1280 static void cciss_softirq_done(struct request *rq)
1281 {
1282 CommandList_struct *cmd = rq->completion_data;
1283 ctlr_info_t *h = hba[cmd->ctlr];
1284 unsigned long flags;
1285 u64bit temp64;
1286 int i, ddir;
1287
1288 if (cmd->Request.Type.Direction == XFER_READ)
1289 ddir = PCI_DMA_FROMDEVICE;
1290 else
1291 ddir = PCI_DMA_TODEVICE;
1292
1293 /* command did not need to be retried */
1294 /* unmap the DMA mapping for all the scatter gather elements */
1295 for (i = 0; i < cmd->Header.SGList; i++) {
1296 temp64.val32.lower = cmd->SG[i].Addr.lower;
1297 temp64.val32.upper = cmd->SG[i].Addr.upper;
1298 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1299 }
1300
1301 complete_buffers(rq->bio, rq->errors);
1302
1303 if (blk_fs_request(rq)) {
1304 const int rw = rq_data_dir(rq);
1305
1306 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1307 }
1308
1309 #ifdef CCISS_DEBUG
1310 printk("Done with %p\n", rq);
1311 #endif /* CCISS_DEBUG */
1312
1313 add_disk_randomness(rq->rq_disk);
1314 spin_lock_irqsave(&h->lock, flags);
1315 end_that_request_last(rq, rq->errors);
1316 cmd_free(h, cmd, 1);
1317 cciss_check_queues(h);
1318 spin_unlock_irqrestore(&h->lock, flags);
1319 }
1320
1321 /* This function will check the usage_count of the drive to be updated/added.
1322 * If the usage_count is zero then the drive information will be updated and
1323 * the disk will be re-registered with the kernel. If not then it will be
1324 * left alone for the next reboot. The exception to this is disk 0 which
1325 * will always be left registered with the kernel since it is also the
1326 * controller node. Any changes to disk 0 will show up on the next
1327 * reboot.
1328 */
1329 static void cciss_update_drive_info(int ctlr, int drv_index)
1330 {
1331 ctlr_info_t *h = hba[ctlr];
1332 struct gendisk *disk;
1333 InquiryData_struct *inq_buff = NULL;
1334 unsigned int block_size;
1335 sector_t total_size;
1336 unsigned long flags = 0;
1337 int ret = 0;
1338
1339 /* if the disk already exists then deregister it before proceeding */
1340 if (h->drv[drv_index].raid_level != -1) {
1341 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1342 h->drv[drv_index].busy_configuring = 1;
1343 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1344 ret = deregister_disk(h->gendisk[drv_index],
1345 &h->drv[drv_index], 0);
1346 h->drv[drv_index].busy_configuring = 0;
1347 }
1348
1349 /* If the disk is in use return */
1350 if (ret)
1351 return;
1352
1353 /* Get information about the disk and modify the driver structure */
1354 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1355 if (inq_buff == NULL)
1356 goto mem_msg;
1357
1358 cciss_read_capacity(ctlr, drv_index, 1,
1359 &total_size, &block_size);
1360
1361 /* total size = last LBA + 1 */
1362 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1363 /* so we assume this volume this must be >2TB in size */
1364 if (total_size == (__u32) 0) {
1365 cciss_read_capacity_16(ctlr, drv_index, 1,
1366 &total_size, &block_size);
1367 h->cciss_read = CCISS_READ_16;
1368 h->cciss_write = CCISS_WRITE_16;
1369 } else {
1370 h->cciss_read = CCISS_READ_10;
1371 h->cciss_write = CCISS_WRITE_10;
1372 }
1373 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1374 inq_buff, &h->drv[drv_index]);
1375
1376 ++h->num_luns;
1377 disk = h->gendisk[drv_index];
1378 set_capacity(disk, h->drv[drv_index].nr_blocks);
1379
1380 /* if it's the controller it's already added */
1381 if (drv_index) {
1382 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1383
1384 /* Set up queue information */
1385 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1386 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1387
1388 /* This is a hardware imposed limit. */
1389 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1390
1391 /* This is a limit in the driver and could be eliminated. */
1392 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1393
1394 blk_queue_max_sectors(disk->queue, 512);
1395
1396 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1397
1398 disk->queue->queuedata = hba[ctlr];
1399
1400 blk_queue_hardsect_size(disk->queue,
1401 hba[ctlr]->drv[drv_index].block_size);
1402
1403 h->drv[drv_index].queue = disk->queue;
1404 add_disk(disk);
1405 }
1406
1407 freeret:
1408 kfree(inq_buff);
1409 return;
1410 mem_msg:
1411 printk(KERN_ERR "cciss: out of memory\n");
1412 goto freeret;
1413 }
1414
1415 /* This function will find the first index of the controllers drive array
1416 * that has a -1 for the raid_level and will return that index. This is
1417 * where new drives will be added. If the index to be returned is greater
1418 * than the highest_lun index for the controller then highest_lun is set
1419 * to this new index. If there are no available indexes then -1 is returned.
1420 */
1421 static int cciss_find_free_drive_index(int ctlr)
1422 {
1423 int i;
1424
1425 for (i = 0; i < CISS_MAX_LUN; i++) {
1426 if (hba[ctlr]->drv[i].raid_level == -1) {
1427 if (i > hba[ctlr]->highest_lun)
1428 hba[ctlr]->highest_lun = i;
1429 return i;
1430 }
1431 }
1432 return -1;
1433 }
1434
1435 /* This function will add and remove logical drives from the Logical
1436 * drive array of the controller and maintain persistency of ordering
1437 * so that mount points are preserved until the next reboot. This allows
1438 * for the removal of logical drives in the middle of the drive array
1439 * without a re-ordering of those drives.
1440 * INPUT
1441 * h = The controller to perform the operations on
1442 * del_disk = The disk to remove if specified. If the value given
1443 * is NULL then no disk is removed.
1444 */
1445 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1446 {
1447 int ctlr = h->ctlr;
1448 int num_luns;
1449 ReportLunData_struct *ld_buff = NULL;
1450 drive_info_struct *drv = NULL;
1451 int return_code;
1452 int listlength = 0;
1453 int i;
1454 int drv_found;
1455 int drv_index = 0;
1456 __u32 lunid = 0;
1457 unsigned long flags;
1458
1459 /* Set busy_configuring flag for this operation */
1460 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1461 if (h->num_luns >= CISS_MAX_LUN) {
1462 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1463 return -EINVAL;
1464 }
1465
1466 if (h->busy_configuring) {
1467 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1468 return -EBUSY;
1469 }
1470 h->busy_configuring = 1;
1471
1472 /* if del_disk is NULL then we are being called to add a new disk
1473 * and update the logical drive table. If it is not NULL then
1474 * we will check if the disk is in use or not.
1475 */
1476 if (del_disk != NULL) {
1477 drv = get_drv(del_disk);
1478 drv->busy_configuring = 1;
1479 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1480 return_code = deregister_disk(del_disk, drv, 1);
1481 drv->busy_configuring = 0;
1482 h->busy_configuring = 0;
1483 return return_code;
1484 } else {
1485 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1486 if (!capable(CAP_SYS_RAWIO))
1487 return -EPERM;
1488
1489 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1490 if (ld_buff == NULL)
1491 goto mem_msg;
1492
1493 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1494 sizeof(ReportLunData_struct), 0,
1495 0, 0, TYPE_CMD);
1496
1497 if (return_code == IO_OK) {
1498 listlength |=
1499 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1500 << 24;
1501 listlength |=
1502 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1503 << 16;
1504 listlength |=
1505 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1506 << 8;
1507 listlength |=
1508 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1509 } else { /* reading number of logical volumes failed */
1510 printk(KERN_WARNING "cciss: report logical volume"
1511 " command failed\n");
1512 listlength = 0;
1513 goto freeret;
1514 }
1515
1516 num_luns = listlength / 8; /* 8 bytes per entry */
1517 if (num_luns > CISS_MAX_LUN) {
1518 num_luns = CISS_MAX_LUN;
1519 printk(KERN_WARNING "cciss: more luns configured"
1520 " on controller than can be handled by"
1521 " this driver.\n");
1522 }
1523
1524 /* Compare controller drive array to drivers drive array.
1525 * Check for updates in the drive information and any new drives
1526 * on the controller.
1527 */
1528 for (i = 0; i < num_luns; i++) {
1529 int j;
1530
1531 drv_found = 0;
1532
1533 lunid = (0xff &
1534 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1535 lunid |= (0xff &
1536 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1537 lunid |= (0xff &
1538 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1539 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1540
1541 /* Find if the LUN is already in the drive array
1542 * of the controller. If so then update its info
1543 * if not is use. If it does not exist then find
1544 * the first free index and add it.
1545 */
1546 for (j = 0; j <= h->highest_lun; j++) {
1547 if (h->drv[j].LunID == lunid) {
1548 drv_index = j;
1549 drv_found = 1;
1550 }
1551 }
1552
1553 /* check if the drive was found already in the array */
1554 if (!drv_found) {
1555 drv_index = cciss_find_free_drive_index(ctlr);
1556 if (drv_index == -1)
1557 goto freeret;
1558
1559 }
1560 h->drv[drv_index].LunID = lunid;
1561 cciss_update_drive_info(ctlr, drv_index);
1562 } /* end for */
1563 } /* end else */
1564
1565 freeret:
1566 kfree(ld_buff);
1567 h->busy_configuring = 0;
1568 /* We return -1 here to tell the ACU that we have registered/updated
1569 * all of the drives that we can and to keep it from calling us
1570 * additional times.
1571 */
1572 return -1;
1573 mem_msg:
1574 printk(KERN_ERR "cciss: out of memory\n");
1575 goto freeret;
1576 }
1577
1578 /* This function will deregister the disk and it's queue from the
1579 * kernel. It must be called with the controller lock held and the
1580 * drv structures busy_configuring flag set. It's parameters are:
1581 *
1582 * disk = This is the disk to be deregistered
1583 * drv = This is the drive_info_struct associated with the disk to be
1584 * deregistered. It contains information about the disk used
1585 * by the driver.
1586 * clear_all = This flag determines whether or not the disk information
1587 * is going to be completely cleared out and the highest_lun
1588 * reset. Sometimes we want to clear out information about
1589 * the disk in preparation for re-adding it. In this case
1590 * the highest_lun should be left unchanged and the LunID
1591 * should not be cleared.
1592 */
1593 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1594 int clear_all)
1595 {
1596 ctlr_info_t *h = get_host(disk);
1597
1598 if (!capable(CAP_SYS_RAWIO))
1599 return -EPERM;
1600
1601 /* make sure logical volume is NOT is use */
1602 if (clear_all || (h->gendisk[0] == disk)) {
1603 if (drv->usage_count > 1)
1604 return -EBUSY;
1605 } else if (drv->usage_count > 0)
1606 return -EBUSY;
1607
1608 /* invalidate the devices and deregister the disk. If it is disk
1609 * zero do not deregister it but just zero out it's values. This
1610 * allows us to delete disk zero but keep the controller registered.
1611 */
1612 if (h->gendisk[0] != disk) {
1613 if (disk) {
1614 request_queue_t *q = disk->queue;
1615 if (disk->flags & GENHD_FL_UP)
1616 del_gendisk(disk);
1617 if (q) {
1618 blk_cleanup_queue(q);
1619 drv->queue = NULL;
1620 }
1621 }
1622 }
1623
1624 --h->num_luns;
1625 /* zero out the disk size info */
1626 drv->nr_blocks = 0;
1627 drv->block_size = 0;
1628 drv->heads = 0;
1629 drv->sectors = 0;
1630 drv->cylinders = 0;
1631 drv->raid_level = -1; /* This can be used as a flag variable to
1632 * indicate that this element of the drive
1633 * array is free.
1634 */
1635
1636 if (clear_all) {
1637 /* check to see if it was the last disk */
1638 if (drv == h->drv + h->highest_lun) {
1639 /* if so, find the new hightest lun */
1640 int i, newhighest = -1;
1641 for (i = 0; i < h->highest_lun; i++) {
1642 /* if the disk has size > 0, it is available */
1643 if (h->drv[i].heads)
1644 newhighest = i;
1645 }
1646 h->highest_lun = newhighest;
1647 }
1648
1649 drv->LunID = 0;
1650 }
1651 return 0;
1652 }
1653
1654 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1655 1: address logical volume log_unit,
1656 2: periph device address is scsi3addr */
1657 unsigned int log_unit, __u8 page_code,
1658 unsigned char *scsi3addr, int cmd_type)
1659 {
1660 ctlr_info_t *h = hba[ctlr];
1661 u64bit buff_dma_handle;
1662 int status = IO_OK;
1663
1664 c->cmd_type = CMD_IOCTL_PEND;
1665 c->Header.ReplyQueue = 0;
1666 if (buff != NULL) {
1667 c->Header.SGList = 1;
1668 c->Header.SGTotal = 1;
1669 } else {
1670 c->Header.SGList = 0;
1671 c->Header.SGTotal = 0;
1672 }
1673 c->Header.Tag.lower = c->busaddr;
1674
1675 c->Request.Type.Type = cmd_type;
1676 if (cmd_type == TYPE_CMD) {
1677 switch (cmd) {
1678 case CISS_INQUIRY:
1679 /* If the logical unit number is 0 then, this is going
1680 to controller so It's a physical command
1681 mode = 0 target = 0. So we have nothing to write.
1682 otherwise, if use_unit_num == 1,
1683 mode = 1(volume set addressing) target = LUNID
1684 otherwise, if use_unit_num == 2,
1685 mode = 0(periph dev addr) target = scsi3addr */
1686 if (use_unit_num == 1) {
1687 c->Header.LUN.LogDev.VolId =
1688 h->drv[log_unit].LunID;
1689 c->Header.LUN.LogDev.Mode = 1;
1690 } else if (use_unit_num == 2) {
1691 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1692 8);
1693 c->Header.LUN.LogDev.Mode = 0;
1694 }
1695 /* are we trying to read a vital product page */
1696 if (page_code != 0) {
1697 c->Request.CDB[1] = 0x01;
1698 c->Request.CDB[2] = page_code;
1699 }
1700 c->Request.CDBLen = 6;
1701 c->Request.Type.Attribute = ATTR_SIMPLE;
1702 c->Request.Type.Direction = XFER_READ;
1703 c->Request.Timeout = 0;
1704 c->Request.CDB[0] = CISS_INQUIRY;
1705 c->Request.CDB[4] = size & 0xFF;
1706 break;
1707 case CISS_REPORT_LOG:
1708 case CISS_REPORT_PHYS:
1709 /* Talking to controller so It's a physical command
1710 mode = 00 target = 0. Nothing to write.
1711 */
1712 c->Request.CDBLen = 12;
1713 c->Request.Type.Attribute = ATTR_SIMPLE;
1714 c->Request.Type.Direction = XFER_READ;
1715 c->Request.Timeout = 0;
1716 c->Request.CDB[0] = cmd;
1717 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1718 c->Request.CDB[7] = (size >> 16) & 0xFF;
1719 c->Request.CDB[8] = (size >> 8) & 0xFF;
1720 c->Request.CDB[9] = size & 0xFF;
1721 break;
1722
1723 case CCISS_READ_CAPACITY:
1724 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1725 c->Header.LUN.LogDev.Mode = 1;
1726 c->Request.CDBLen = 10;
1727 c->Request.Type.Attribute = ATTR_SIMPLE;
1728 c->Request.Type.Direction = XFER_READ;
1729 c->Request.Timeout = 0;
1730 c->Request.CDB[0] = cmd;
1731 break;
1732 case CCISS_READ_CAPACITY_16:
1733 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1734 c->Header.LUN.LogDev.Mode = 1;
1735 c->Request.CDBLen = 16;
1736 c->Request.Type.Attribute = ATTR_SIMPLE;
1737 c->Request.Type.Direction = XFER_READ;
1738 c->Request.Timeout = 0;
1739 c->Request.CDB[0] = cmd;
1740 c->Request.CDB[1] = 0x10;
1741 c->Request.CDB[10] = (size >> 24) & 0xFF;
1742 c->Request.CDB[11] = (size >> 16) & 0xFF;
1743 c->Request.CDB[12] = (size >> 8) & 0xFF;
1744 c->Request.CDB[13] = size & 0xFF;
1745 c->Request.Timeout = 0;
1746 c->Request.CDB[0] = cmd;
1747 break;
1748 case CCISS_CACHE_FLUSH:
1749 c->Request.CDBLen = 12;
1750 c->Request.Type.Attribute = ATTR_SIMPLE;
1751 c->Request.Type.Direction = XFER_WRITE;
1752 c->Request.Timeout = 0;
1753 c->Request.CDB[0] = BMIC_WRITE;
1754 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1755 break;
1756 default:
1757 printk(KERN_WARNING
1758 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1759 return IO_ERROR;
1760 }
1761 } else if (cmd_type == TYPE_MSG) {
1762 switch (cmd) {
1763 case 0: /* ABORT message */
1764 c->Request.CDBLen = 12;
1765 c->Request.Type.Attribute = ATTR_SIMPLE;
1766 c->Request.Type.Direction = XFER_WRITE;
1767 c->Request.Timeout = 0;
1768 c->Request.CDB[0] = cmd; /* abort */
1769 c->Request.CDB[1] = 0; /* abort a command */
1770 /* buff contains the tag of the command to abort */
1771 memcpy(&c->Request.CDB[4], buff, 8);
1772 break;
1773 case 1: /* RESET message */
1774 c->Request.CDBLen = 12;
1775 c->Request.Type.Attribute = ATTR_SIMPLE;
1776 c->Request.Type.Direction = XFER_WRITE;
1777 c->Request.Timeout = 0;
1778 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1779 c->Request.CDB[0] = cmd; /* reset */
1780 c->Request.CDB[1] = 0x04; /* reset a LUN */
1781 break;
1782 case 3: /* No-Op message */
1783 c->Request.CDBLen = 1;
1784 c->Request.Type.Attribute = ATTR_SIMPLE;
1785 c->Request.Type.Direction = XFER_WRITE;
1786 c->Request.Timeout = 0;
1787 c->Request.CDB[0] = cmd;
1788 break;
1789 default:
1790 printk(KERN_WARNING
1791 "cciss%d: unknown message type %d\n", ctlr, cmd);
1792 return IO_ERROR;
1793 }
1794 } else {
1795 printk(KERN_WARNING
1796 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1797 return IO_ERROR;
1798 }
1799 /* Fill in the scatter gather information */
1800 if (size > 0) {
1801 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1802 buff, size,
1803 PCI_DMA_BIDIRECTIONAL);
1804 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1805 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1806 c->SG[0].Len = size;
1807 c->SG[0].Ext = 0; /* we are not chaining */
1808 }
1809 return status;
1810 }
1811
1812 static int sendcmd_withirq(__u8 cmd,
1813 int ctlr,
1814 void *buff,
1815 size_t size,
1816 unsigned int use_unit_num,
1817 unsigned int log_unit, __u8 page_code, int cmd_type)
1818 {
1819 ctlr_info_t *h = hba[ctlr];
1820 CommandList_struct *c;
1821 u64bit buff_dma_handle;
1822 unsigned long flags;
1823 int return_status;
1824 DECLARE_COMPLETION_ONSTACK(wait);
1825
1826 if ((c = cmd_alloc(h, 0)) == NULL)
1827 return -ENOMEM;
1828 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1829 log_unit, page_code, NULL, cmd_type);
1830 if (return_status != IO_OK) {
1831 cmd_free(h, c, 0);
1832 return return_status;
1833 }
1834 resend_cmd2:
1835 c->waiting = &wait;
1836
1837 /* Put the request on the tail of the queue and send it */
1838 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1839 addQ(&h->reqQ, c);
1840 h->Qdepth++;
1841 start_io(h);
1842 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1843
1844 wait_for_completion(&wait);
1845
1846 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1847 switch (c->err_info->CommandStatus) {
1848 case CMD_TARGET_STATUS:
1849 printk(KERN_WARNING "cciss: cmd %p has "
1850 " completed with errors\n", c);
1851 if (c->err_info->ScsiStatus) {
1852 printk(KERN_WARNING "cciss: cmd %p "
1853 "has SCSI Status = %x\n",
1854 c, c->err_info->ScsiStatus);
1855 }
1856
1857 break;
1858 case CMD_DATA_UNDERRUN:
1859 case CMD_DATA_OVERRUN:
1860 /* expected for inquire and report lun commands */
1861 break;
1862 case CMD_INVALID:
1863 printk(KERN_WARNING "cciss: Cmd %p is "
1864 "reported invalid\n", c);
1865 return_status = IO_ERROR;
1866 break;
1867 case CMD_PROTOCOL_ERR:
1868 printk(KERN_WARNING "cciss: cmd %p has "
1869 "protocol error \n", c);
1870 return_status = IO_ERROR;
1871 break;
1872 case CMD_HARDWARE_ERR:
1873 printk(KERN_WARNING "cciss: cmd %p had "
1874 " hardware error\n", c);
1875 return_status = IO_ERROR;
1876 break;
1877 case CMD_CONNECTION_LOST:
1878 printk(KERN_WARNING "cciss: cmd %p had "
1879 "connection lost\n", c);
1880 return_status = IO_ERROR;
1881 break;
1882 case CMD_ABORTED:
1883 printk(KERN_WARNING "cciss: cmd %p was "
1884 "aborted\n", c);
1885 return_status = IO_ERROR;
1886 break;
1887 case CMD_ABORT_FAILED:
1888 printk(KERN_WARNING "cciss: cmd %p reports "
1889 "abort failed\n", c);
1890 return_status = IO_ERROR;
1891 break;
1892 case CMD_UNSOLICITED_ABORT:
1893 printk(KERN_WARNING
1894 "cciss%d: unsolicited abort %p\n", ctlr, c);
1895 if (c->retry_count < MAX_CMD_RETRIES) {
1896 printk(KERN_WARNING
1897 "cciss%d: retrying %p\n", ctlr, c);
1898 c->retry_count++;
1899 /* erase the old error information */
1900 memset(c->err_info, 0,
1901 sizeof(ErrorInfo_struct));
1902 return_status = IO_OK;
1903 INIT_COMPLETION(wait);
1904 goto resend_cmd2;
1905 }
1906 return_status = IO_ERROR;
1907 break;
1908 default:
1909 printk(KERN_WARNING "cciss: cmd %p returned "
1910 "unknown status %x\n", c,
1911 c->err_info->CommandStatus);
1912 return_status = IO_ERROR;
1913 }
1914 }
1915 /* unlock the buffers from DMA */
1916 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1917 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1918 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1919 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1920 cmd_free(h, c, 0);
1921 return return_status;
1922 }
1923
1924 static void cciss_geometry_inquiry(int ctlr, int logvol,
1925 int withirq, sector_t total_size,
1926 unsigned int block_size,
1927 InquiryData_struct *inq_buff,
1928 drive_info_struct *drv)
1929 {
1930 int return_code;
1931 unsigned long t;
1932
1933 memset(inq_buff, 0, sizeof(InquiryData_struct));
1934 if (withirq)
1935 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1936 inq_buff, sizeof(*inq_buff), 1,
1937 logvol, 0xC1, TYPE_CMD);
1938 else
1939 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1940 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1941 TYPE_CMD);
1942 if (return_code == IO_OK) {
1943 if (inq_buff->data_byte[8] == 0xFF) {
1944 printk(KERN_WARNING
1945 "cciss: reading geometry failed, volume "
1946 "does not support reading geometry\n");
1947 drv->heads = 255;
1948 drv->sectors = 32; // Sectors per track
1949 } else {
1950 drv->heads = inq_buff->data_byte[6];
1951 drv->sectors = inq_buff->data_byte[7];
1952 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1953 drv->cylinders += inq_buff->data_byte[5];
1954 drv->raid_level = inq_buff->data_byte[8];
1955 }
1956 drv->block_size = block_size;
1957 drv->nr_blocks = total_size;
1958 t = drv->heads * drv->sectors;
1959 if (t > 1) {
1960 unsigned rem = sector_div(total_size, t);
1961 if (rem)
1962 total_size++;
1963 drv->cylinders = total_size;
1964 }
1965 } else { /* Get geometry failed */
1966 printk(KERN_WARNING "cciss: reading geometry failed\n");
1967 }
1968 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1969 drv->heads, drv->sectors, drv->cylinders);
1970 }
1971
1972 static void
1973 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1974 unsigned int *block_size)
1975 {
1976 ReadCapdata_struct *buf;
1977 int return_code;
1978 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1979 if (buf == NULL) {
1980 printk(KERN_WARNING "cciss: out of memory\n");
1981 return;
1982 }
1983 memset(buf, 0, sizeof(ReadCapdata_struct));
1984 if (withirq)
1985 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1986 ctlr, buf, sizeof(ReadCapdata_struct),
1987 1, logvol, 0, TYPE_CMD);
1988 else
1989 return_code = sendcmd(CCISS_READ_CAPACITY,
1990 ctlr, buf, sizeof(ReadCapdata_struct),
1991 1, logvol, 0, NULL, TYPE_CMD);
1992 if (return_code == IO_OK) {
1993 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1994 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1995 } else { /* read capacity command failed */
1996 printk(KERN_WARNING "cciss: read capacity failed\n");
1997 *total_size = 0;
1998 *block_size = BLOCK_SIZE;
1999 }
2000 if (*total_size != (__u32) 0)
2001 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2002 (unsigned long long)*total_size, *block_size);
2003 kfree(buf);
2004 return;
2005 }
2006
2007 static void
2008 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2009 {
2010 ReadCapdata_struct_16 *buf;
2011 int return_code;
2012 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2013 if (buf == NULL) {
2014 printk(KERN_WARNING "cciss: out of memory\n");
2015 return;
2016 }
2017 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2018 if (withirq) {
2019 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2020 ctlr, buf, sizeof(ReadCapdata_struct_16),
2021 1, logvol, 0, TYPE_CMD);
2022 }
2023 else {
2024 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2025 ctlr, buf, sizeof(ReadCapdata_struct_16),
2026 1, logvol, 0, NULL, TYPE_CMD);
2027 }
2028 if (return_code == IO_OK) {
2029 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2030 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2031 } else { /* read capacity command failed */
2032 printk(KERN_WARNING "cciss: read capacity failed\n");
2033 *total_size = 0;
2034 *block_size = BLOCK_SIZE;
2035 }
2036 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2037 (unsigned long long)*total_size, *block_size);
2038 kfree(buf);
2039 return;
2040 }
2041
2042 static int cciss_revalidate(struct gendisk *disk)
2043 {
2044 ctlr_info_t *h = get_host(disk);
2045 drive_info_struct *drv = get_drv(disk);
2046 int logvol;
2047 int FOUND = 0;
2048 unsigned int block_size;
2049 sector_t total_size;
2050 InquiryData_struct *inq_buff = NULL;
2051
2052 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2053 if (h->drv[logvol].LunID == drv->LunID) {
2054 FOUND = 1;
2055 break;
2056 }
2057 }
2058
2059 if (!FOUND)
2060 return 1;
2061
2062 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2063 if (inq_buff == NULL) {
2064 printk(KERN_WARNING "cciss: out of memory\n");
2065 return 1;
2066 }
2067 if (h->cciss_read == CCISS_READ_10) {
2068 cciss_read_capacity(h->ctlr, logvol, 1,
2069 &total_size, &block_size);
2070 } else {
2071 cciss_read_capacity_16(h->ctlr, logvol, 1,
2072 &total_size, &block_size);
2073 }
2074 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2075 inq_buff, drv);
2076
2077 blk_queue_hardsect_size(drv->queue, drv->block_size);
2078 set_capacity(disk, drv->nr_blocks);
2079
2080 kfree(inq_buff);
2081 return 0;
2082 }
2083
2084 /*
2085 * Wait polling for a command to complete.
2086 * The memory mapped FIFO is polled for the completion.
2087 * Used only at init time, interrupts from the HBA are disabled.
2088 */
2089 static unsigned long pollcomplete(int ctlr)
2090 {
2091 unsigned long done;
2092 int i;
2093
2094 /* Wait (up to 20 seconds) for a command to complete */
2095
2096 for (i = 20 * HZ; i > 0; i--) {
2097 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2098 if (done == FIFO_EMPTY)
2099 schedule_timeout_uninterruptible(1);
2100 else
2101 return done;
2102 }
2103 /* Invalid address to tell caller we ran out of time */
2104 return 1;
2105 }
2106
2107 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2108 {
2109 /* We get in here if sendcmd() is polling for completions
2110 and gets some command back that it wasn't expecting --
2111 something other than that which it just sent down.
2112 Ordinarily, that shouldn't happen, but it can happen when
2113 the scsi tape stuff gets into error handling mode, and
2114 starts using sendcmd() to try to abort commands and
2115 reset tape drives. In that case, sendcmd may pick up
2116 completions of commands that were sent to logical drives
2117 through the block i/o system, or cciss ioctls completing, etc.
2118 In that case, we need to save those completions for later
2119 processing by the interrupt handler.
2120 */
2121
2122 #ifdef CONFIG_CISS_SCSI_TAPE
2123 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2124
2125 /* If it's not the scsi tape stuff doing error handling, (abort */
2126 /* or reset) then we don't expect anything weird. */
2127 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2128 #endif
2129 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2130 "Invalid command list address returned! (%lx)\n",
2131 ctlr, complete);
2132 /* not much we can do. */
2133 #ifdef CONFIG_CISS_SCSI_TAPE
2134 return 1;
2135 }
2136
2137 /* We've sent down an abort or reset, but something else
2138 has completed */
2139 if (srl->ncompletions >= (NR_CMDS + 2)) {
2140 /* Uh oh. No room to save it for later... */
2141 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2142 "reject list overflow, command lost!\n", ctlr);
2143 return 1;
2144 }
2145 /* Save it for later */
2146 srl->complete[srl->ncompletions] = complete;
2147 srl->ncompletions++;
2148 #endif
2149 return 0;
2150 }
2151
2152 /*
2153 * Send a command to the controller, and wait for it to complete.
2154 * Only used at init time.
2155 */
2156 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2157 1: address logical volume log_unit,
2158 2: periph device address is scsi3addr */
2159 unsigned int log_unit,
2160 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2161 {
2162 CommandList_struct *c;
2163 int i;
2164 unsigned long complete;
2165 ctlr_info_t *info_p = hba[ctlr];
2166 u64bit buff_dma_handle;
2167 int status, done = 0;
2168
2169 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2170 printk(KERN_WARNING "cciss: unable to get memory");
2171 return IO_ERROR;
2172 }
2173 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2174 log_unit, page_code, scsi3addr, cmd_type);
2175 if (status != IO_OK) {
2176 cmd_free(info_p, c, 1);
2177 return status;
2178 }
2179 resend_cmd1:
2180 /*
2181 * Disable interrupt
2182 */
2183 #ifdef CCISS_DEBUG
2184 printk(KERN_DEBUG "cciss: turning intr off\n");
2185 #endif /* CCISS_DEBUG */
2186 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2187
2188 /* Make sure there is room in the command FIFO */
2189 /* Actually it should be completely empty at this time */
2190 /* unless we are in here doing error handling for the scsi */
2191 /* tape side of the driver. */
2192 for (i = 200000; i > 0; i--) {
2193 /* if fifo isn't full go */
2194 if (!(info_p->access.fifo_full(info_p))) {
2195
2196 break;
2197 }
2198 udelay(10);
2199 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2200 " waiting!\n", ctlr);
2201 }
2202 /*
2203 * Send the cmd
2204 */
2205 info_p->access.submit_command(info_p, c);
2206 done = 0;
2207 do {
2208 complete = pollcomplete(ctlr);
2209
2210 #ifdef CCISS_DEBUG
2211 printk(KERN_DEBUG "cciss: command completed\n");
2212 #endif /* CCISS_DEBUG */
2213
2214 if (complete == 1) {
2215 printk(KERN_WARNING
2216 "cciss cciss%d: SendCmd Timeout out, "
2217 "No command list address returned!\n", ctlr);
2218 status = IO_ERROR;
2219 done = 1;
2220 break;
2221 }
2222
2223 /* This will need to change for direct lookup completions */
2224 if ((complete & CISS_ERROR_BIT)
2225 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2226 /* if data overrun or underun on Report command
2227 ignore it
2228 */
2229 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2230 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2231 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2232 ((c->err_info->CommandStatus ==
2233 CMD_DATA_OVERRUN) ||
2234 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2235 )) {
2236 complete = c->busaddr;
2237 } else {
2238 if (c->err_info->CommandStatus ==
2239 CMD_UNSOLICITED_ABORT) {
2240 printk(KERN_WARNING "cciss%d: "
2241 "unsolicited abort %p\n",
2242 ctlr, c);
2243 if (c->retry_count < MAX_CMD_RETRIES) {
2244 printk(KERN_WARNING
2245 "cciss%d: retrying %p\n",
2246 ctlr, c);
2247 c->retry_count++;
2248 /* erase the old error */
2249 /* information */
2250 memset(c->err_info, 0,
2251 sizeof
2252 (ErrorInfo_struct));
2253 goto resend_cmd1;
2254 } else {
2255 printk(KERN_WARNING
2256 "cciss%d: retried %p too "
2257 "many times\n", ctlr, c);
2258 status = IO_ERROR;
2259 goto cleanup1;
2260 }
2261 } else if (c->err_info->CommandStatus ==
2262 CMD_UNABORTABLE) {
2263 printk(KERN_WARNING
2264 "cciss%d: command could not be aborted.\n",
2265 ctlr);
2266 status = IO_ERROR;
2267 goto cleanup1;
2268 }
2269 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2270 " Error %x \n", ctlr,
2271 c->err_info->CommandStatus);
2272 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2273 " offensive info\n"
2274 " size %x\n num %x value %x\n",
2275 ctlr,
2276 c->err_info->MoreErrInfo.Invalid_Cmd.
2277 offense_size,
2278 c->err_info->MoreErrInfo.Invalid_Cmd.
2279 offense_num,
2280 c->err_info->MoreErrInfo.Invalid_Cmd.
2281 offense_value);
2282 status = IO_ERROR;
2283 goto cleanup1;
2284 }
2285 }
2286 /* This will need changing for direct lookup completions */
2287 if (complete != c->busaddr) {
2288 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2289 BUG(); /* we are pretty much hosed if we get here. */
2290 }
2291 continue;
2292 } else
2293 done = 1;
2294 } while (!done);
2295
2296 cleanup1:
2297 /* unlock the data buffer from DMA */
2298 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2299 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2300 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2301 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2302 #ifdef CONFIG_CISS_SCSI_TAPE
2303 /* if we saved some commands for later, process them now. */
2304 if (info_p->scsi_rejects.ncompletions > 0)
2305 do_cciss_intr(0, info_p);
2306 #endif
2307 cmd_free(info_p, c, 1);
2308 return status;
2309 }
2310
2311 /*
2312 * Map (physical) PCI mem into (virtual) kernel space
2313 */
2314 static void __iomem *remap_pci_mem(ulong base, ulong size)
2315 {
2316 ulong page_base = ((ulong) base) & PAGE_MASK;
2317 ulong page_offs = ((ulong) base) - page_base;
2318 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2319
2320 return page_remapped ? (page_remapped + page_offs) : NULL;
2321 }
2322
2323 /*
2324 * Takes jobs of the Q and sends them to the hardware, then puts it on
2325 * the Q to wait for completion.
2326 */
2327 static void start_io(ctlr_info_t *h)
2328 {
2329 CommandList_struct *c;
2330
2331 while ((c = h->reqQ) != NULL) {
2332 /* can't do anything if fifo is full */
2333 if ((h->access.fifo_full(h))) {
2334 printk(KERN_WARNING "cciss: fifo full\n");
2335 break;
2336 }
2337
2338 /* Get the first entry from the Request Q */
2339 removeQ(&(h->reqQ), c);
2340 h->Qdepth--;
2341
2342 /* Tell the controller execute command */
2343 h->access.submit_command(h, c);
2344
2345 /* Put job onto the completed Q */
2346 addQ(&(h->cmpQ), c);
2347 }
2348 }
2349
2350 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2351 /* Zeros out the error record and then resends the command back */
2352 /* to the controller */
2353 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2354 {
2355 /* erase the old error information */
2356 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2357
2358 /* add it to software queue and then send it to the controller */
2359 addQ(&(h->reqQ), c);
2360 h->Qdepth++;
2361 if (h->Qdepth > h->maxQsinceinit)
2362 h->maxQsinceinit = h->Qdepth;
2363
2364 start_io(h);
2365 }
2366
2367 /* checks the status of the job and calls complete buffers to mark all
2368 * buffers for the completed job. Note that this function does not need
2369 * to hold the hba/queue lock.
2370 */
2371 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2372 int timeout)
2373 {
2374 int status = 1;
2375 int retry_cmd = 0;
2376
2377 if (timeout)
2378 status = 0;
2379
2380 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2381 switch (cmd->err_info->CommandStatus) {
2382 unsigned char sense_key;
2383 case CMD_TARGET_STATUS:
2384 status = 0;
2385
2386 if (cmd->err_info->ScsiStatus == 0x02) {
2387 printk(KERN_WARNING "cciss: cmd %p "
2388 "has CHECK CONDITION "
2389 " byte 2 = 0x%x\n", cmd,
2390 cmd->err_info->SenseInfo[2]
2391 );
2392 /* check the sense key */
2393 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2394 /* no status or recovered error */
2395 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2396 status = 1;
2397 }
2398 } else {
2399 printk(KERN_WARNING "cciss: cmd %p "
2400 "has SCSI Status 0x%x\n",
2401 cmd, cmd->err_info->ScsiStatus);
2402 }
2403 break;
2404 case CMD_DATA_UNDERRUN:
2405 printk(KERN_WARNING "cciss: cmd %p has"
2406 " completed with data underrun "
2407 "reported\n", cmd);
2408 break;
2409 case CMD_DATA_OVERRUN:
2410 printk(KERN_WARNING "cciss: cmd %p has"
2411 " completed with data overrun "
2412 "reported\n", cmd);
2413 break;
2414 case CMD_INVALID:
2415 printk(KERN_WARNING "cciss: cmd %p is "
2416 "reported invalid\n", cmd);
2417 status = 0;
2418 break;
2419 case CMD_PROTOCOL_ERR:
2420 printk(KERN_WARNING "cciss: cmd %p has "
2421 "protocol error \n", cmd);
2422 status = 0;
2423 break;
2424 case CMD_HARDWARE_ERR:
2425 printk(KERN_WARNING "cciss: cmd %p had "
2426 " hardware error\n", cmd);
2427 status = 0;
2428 break;
2429 case CMD_CONNECTION_LOST:
2430 printk(KERN_WARNING "cciss: cmd %p had "
2431 "connection lost\n", cmd);
2432 status = 0;
2433 break;
2434 case CMD_ABORTED:
2435 printk(KERN_WARNING "cciss: cmd %p was "
2436 "aborted\n", cmd);
2437 status = 0;
2438 break;
2439 case CMD_ABORT_FAILED:
2440 printk(KERN_WARNING "cciss: cmd %p reports "
2441 "abort failed\n", cmd);
2442 status = 0;
2443 break;
2444 case CMD_UNSOLICITED_ABORT:
2445 printk(KERN_WARNING "cciss%d: unsolicited "
2446 "abort %p\n", h->ctlr, cmd);
2447 if (cmd->retry_count < MAX_CMD_RETRIES) {
2448 retry_cmd = 1;
2449 printk(KERN_WARNING
2450 "cciss%d: retrying %p\n", h->ctlr, cmd);
2451 cmd->retry_count++;
2452 } else
2453 printk(KERN_WARNING
2454 "cciss%d: %p retried too "
2455 "many times\n", h->ctlr, cmd);
2456 status = 0;
2457 break;
2458 case CMD_TIMEOUT:
2459 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2460 status = 0;
2461 break;
2462 default:
2463 printk(KERN_WARNING "cciss: cmd %p returned "
2464 "unknown status %x\n", cmd,
2465 cmd->err_info->CommandStatus);
2466 status = 0;
2467 }
2468 }
2469 /* We need to return this command */
2470 if (retry_cmd) {
2471 resend_cciss_cmd(h, cmd);
2472 return;
2473 }
2474
2475 cmd->rq->completion_data = cmd;
2476 cmd->rq->errors = status;
2477 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2478 blk_complete_request(cmd->rq);
2479 }
2480
2481 /*
2482 * Get a request and submit it to the controller.
2483 */
2484 static void do_cciss_request(request_queue_t *q)
2485 {
2486 ctlr_info_t *h = q->queuedata;
2487 CommandList_struct *c;
2488 sector_t start_blk;
2489 int seg;
2490 struct request *creq;
2491 u64bit temp64;
2492 struct scatterlist tmp_sg[MAXSGENTRIES];
2493 drive_info_struct *drv;
2494 int i, dir;
2495
2496 /* We call start_io here in case there is a command waiting on the
2497 * queue that has not been sent.
2498 */
2499 if (blk_queue_plugged(q))
2500 goto startio;
2501
2502 queue:
2503 creq = elv_next_request(q);
2504 if (!creq)
2505 goto startio;
2506
2507 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2508
2509 if ((c = cmd_alloc(h, 1)) == NULL)
2510 goto full;
2511
2512 blkdev_dequeue_request(creq);
2513
2514 spin_unlock_irq(q->queue_lock);
2515
2516 c->cmd_type = CMD_RWREQ;
2517 c->rq = creq;
2518
2519 /* fill in the request */
2520 drv = creq->rq_disk->private_data;
2521 c->Header.ReplyQueue = 0; // unused in simple mode
2522 /* got command from pool, so use the command block index instead */
2523 /* for direct lookups. */
2524 /* The first 2 bits are reserved for controller error reporting. */
2525 c->Header.Tag.lower = (c->cmdindex << 3);
2526 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2527 c->Header.LUN.LogDev.VolId = drv->LunID;
2528 c->Header.LUN.LogDev.Mode = 1;
2529 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2530 c->Request.Type.Type = TYPE_CMD; // It is a command.
2531 c->Request.Type.Attribute = ATTR_SIMPLE;
2532 c->Request.Type.Direction =
2533 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2534 c->Request.Timeout = 0; // Don't time out
2535 c->Request.CDB[0] =
2536 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2537 start_blk = creq->sector;
2538 #ifdef CCISS_DEBUG
2539 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2540 (int)creq->nr_sectors);
2541 #endif /* CCISS_DEBUG */
2542
2543 seg = blk_rq_map_sg(q, creq, tmp_sg);
2544
2545 /* get the DMA records for the setup */
2546 if (c->Request.Type.Direction == XFER_READ)
2547 dir = PCI_DMA_FROMDEVICE;
2548 else
2549 dir = PCI_DMA_TODEVICE;
2550
2551 for (i = 0; i < seg; i++) {
2552 c->SG[i].Len = tmp_sg[i].length;
2553 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2554 tmp_sg[i].offset,
2555 tmp_sg[i].length, dir);
2556 c->SG[i].Addr.lower = temp64.val32.lower;
2557 c->SG[i].Addr.upper = temp64.val32.upper;
2558 c->SG[i].Ext = 0; // we are not chaining
2559 }
2560 /* track how many SG entries we are using */
2561 if (seg > h->maxSG)
2562 h->maxSG = seg;
2563
2564 #ifdef CCISS_DEBUG
2565 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2566 creq->nr_sectors, seg);
2567 #endif /* CCISS_DEBUG */
2568
2569 c->Header.SGList = c->Header.SGTotal = seg;
2570 if(h->cciss_read == CCISS_READ_10) {
2571 c->Request.CDB[1] = 0;
2572 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2573 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2574 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2575 c->Request.CDB[5] = start_blk & 0xff;
2576 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2577 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2578 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2579 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2580 } else {
2581 c->Request.CDBLen = 16;
2582 c->Request.CDB[1]= 0;
2583 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2584 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2585 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2586 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2587 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2588 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2589 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2590 c->Request.CDB[9]= start_blk & 0xff;
2591 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2592 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2593 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2594 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2595 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2596 }
2597
2598 spin_lock_irq(q->queue_lock);
2599
2600 addQ(&(h->reqQ), c);
2601 h->Qdepth++;
2602 if (h->Qdepth > h->maxQsinceinit)
2603 h->maxQsinceinit = h->Qdepth;
2604
2605 goto queue;
2606 full:
2607 blk_stop_queue(q);
2608 startio:
2609 /* We will already have the driver lock here so not need
2610 * to lock it.
2611 */
2612 start_io(h);
2613 }
2614
2615 static inline unsigned long get_next_completion(ctlr_info_t *h)
2616 {
2617 #ifdef CONFIG_CISS_SCSI_TAPE
2618 /* Any rejects from sendcmd() lying around? Process them first */
2619 if (h->scsi_rejects.ncompletions == 0)
2620 return h->access.command_completed(h);
2621 else {
2622 struct sendcmd_reject_list *srl;
2623 int n;
2624 srl = &h->scsi_rejects;
2625 n = --srl->ncompletions;
2626 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2627 printk("p");
2628 return srl->complete[n];
2629 }
2630 #else
2631 return h->access.command_completed(h);
2632 #endif
2633 }
2634
2635 static inline int interrupt_pending(ctlr_info_t *h)
2636 {
2637 #ifdef CONFIG_CISS_SCSI_TAPE
2638 return (h->access.intr_pending(h)
2639 || (h->scsi_rejects.ncompletions > 0));
2640 #else
2641 return h->access.intr_pending(h);
2642 #endif
2643 }
2644
2645 static inline long interrupt_not_for_us(ctlr_info_t *h)
2646 {
2647 #ifdef CONFIG_CISS_SCSI_TAPE
2648 return (((h->access.intr_pending(h) == 0) ||
2649 (h->interrupts_enabled == 0))
2650 && (h->scsi_rejects.ncompletions == 0));
2651 #else
2652 return (((h->access.intr_pending(h) == 0) ||
2653 (h->interrupts_enabled == 0)));
2654 #endif
2655 }
2656
2657 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2658 {
2659 ctlr_info_t *h = dev_id;
2660 CommandList_struct *c;
2661 unsigned long flags;
2662 __u32 a, a1, a2;
2663
2664 if (interrupt_not_for_us(h))
2665 return IRQ_NONE;
2666 /*
2667 * If there are completed commands in the completion queue,
2668 * we had better do something about it.
2669 */
2670 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2671 while (interrupt_pending(h)) {
2672 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2673 a1 = a;
2674 if ((a & 0x04)) {
2675 a2 = (a >> 3);
2676 if (a2 >= NR_CMDS) {
2677 printk(KERN_WARNING
2678 "cciss: controller cciss%d failed, stopping.\n",
2679 h->ctlr);
2680 fail_all_cmds(h->ctlr);
2681 return IRQ_HANDLED;
2682 }
2683
2684 c = h->cmd_pool + a2;
2685 a = c->busaddr;
2686
2687 } else {
2688 a &= ~3;
2689 if ((c = h->cmpQ) == NULL) {
2690 printk(KERN_WARNING
2691 "cciss: Completion of %08x ignored\n",
2692 a1);
2693 continue;
2694 }
2695 while (c->busaddr != a) {
2696 c = c->next;
2697 if (c == h->cmpQ)
2698 break;
2699 }
2700 }
2701 /*
2702 * If we've found the command, take it off the
2703 * completion Q and free it
2704 */
2705 if (c->busaddr == a) {
2706 removeQ(&h->cmpQ, c);
2707 if (c->cmd_type == CMD_RWREQ) {
2708 complete_command(h, c, 0);
2709 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2710 complete(c->waiting);
2711 }
2712 # ifdef CONFIG_CISS_SCSI_TAPE
2713 else if (c->cmd_type == CMD_SCSI)
2714 complete_scsi_command(c, 0, a1);
2715 # endif
2716 continue;
2717 }
2718 }
2719 }
2720
2721 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2722 return IRQ_HANDLED;
2723 }
2724
2725 /*
2726 * We cannot read the structure directly, for portability we must use
2727 * the io functions.
2728 * This is for debug only.
2729 */
2730 #ifdef CCISS_DEBUG
2731 static void print_cfg_table(CfgTable_struct *tb)
2732 {
2733 int i;
2734 char temp_name[17];
2735
2736 printk("Controller Configuration information\n");
2737 printk("------------------------------------\n");
2738 for (i = 0; i < 4; i++)
2739 temp_name[i] = readb(&(tb->Signature[i]));
2740 temp_name[4] = '\0';
2741 printk(" Signature = %s\n", temp_name);
2742 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2743 printk(" Transport methods supported = 0x%x\n",
2744 readl(&(tb->TransportSupport)));
2745 printk(" Transport methods active = 0x%x\n",
2746 readl(&(tb->TransportActive)));
2747 printk(" Requested transport Method = 0x%x\n",
2748 readl(&(tb->HostWrite.TransportRequest)));
2749 printk(" Coalesce Interrupt Delay = 0x%x\n",
2750 readl(&(tb->HostWrite.CoalIntDelay)));
2751 printk(" Coalesce Interrupt Count = 0x%x\n",
2752 readl(&(tb->HostWrite.CoalIntCount)));
2753 printk(" Max outstanding commands = 0x%d\n",
2754 readl(&(tb->CmdsOutMax)));
2755 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2756 for (i = 0; i < 16; i++)
2757 temp_name[i] = readb(&(tb->ServerName[i]));
2758 temp_name[16] = '\0';
2759 printk(" Server Name = %s\n", temp_name);
2760 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2761 }
2762 #endif /* CCISS_DEBUG */
2763
2764 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2765 {
2766 int i, offset, mem_type, bar_type;
2767 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2768 return 0;
2769 offset = 0;
2770 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2771 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2772 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2773 offset += 4;
2774 else {
2775 mem_type = pci_resource_flags(pdev, i) &
2776 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2777 switch (mem_type) {
2778 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2779 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2780 offset += 4; /* 32 bit */
2781 break;
2782 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2783 offset += 8;
2784 break;
2785 default: /* reserved in PCI 2.2 */
2786 printk(KERN_WARNING
2787 "Base address is invalid\n");
2788 return -1;
2789 break;
2790 }
2791 }
2792 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2793 return i + 1;
2794 }
2795 return -1;
2796 }
2797
2798 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2799 * controllers that are capable. If not, we use IO-APIC mode.
2800 */
2801
2802 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2803 struct pci_dev *pdev, __u32 board_id)
2804 {
2805 #ifdef CONFIG_PCI_MSI
2806 int err;
2807 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2808 {0, 2}, {0, 3}
2809 };
2810
2811 /* Some boards advertise MSI but don't really support it */
2812 if ((board_id == 0x40700E11) ||
2813 (board_id == 0x40800E11) ||
2814 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2815 goto default_int_mode;
2816
2817 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2818 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2819 if (!err) {
2820 c->intr[0] = cciss_msix_entries[0].vector;
2821 c->intr[1] = cciss_msix_entries[1].vector;
2822 c->intr[2] = cciss_msix_entries[2].vector;
2823 c->intr[3] = cciss_msix_entries[3].vector;
2824 c->msix_vector = 1;
2825 return;
2826 }
2827 if (err > 0) {
2828 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2829 "available\n", err);
2830 } else {
2831 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2832 err);
2833 }
2834 }
2835 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2836 if (!pci_enable_msi(pdev)) {
2837 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2838 c->msi_vector = 1;
2839 return;
2840 } else {
2841 printk(KERN_WARNING "cciss: MSI init failed\n");
2842 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2843 return;
2844 }
2845 }
2846 default_int_mode:
2847 #endif /* CONFIG_PCI_MSI */
2848 /* if we get here we're going to use the default interrupt mode */
2849 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2850 return;
2851 }
2852
2853 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2854 {
2855 ushort subsystem_vendor_id, subsystem_device_id, command;
2856 __u32 board_id, scratchpad = 0;
2857 __u64 cfg_offset;
2858 __u32 cfg_base_addr;
2859 __u64 cfg_base_addr_index;
2860 int i, err;
2861
2862 /* check to see if controller has been disabled */
2863 /* BEFORE trying to enable it */
2864 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2865 if (!(command & 0x02)) {
2866 printk(KERN_WARNING
2867 "cciss: controller appears to be disabled\n");
2868 return -ENODEV;
2869 }
2870
2871 err = pci_enable_device(pdev);
2872 if (err) {
2873 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2874 return err;
2875 }
2876
2877 err = pci_request_regions(pdev, "cciss");
2878 if (err) {
2879 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2880 "aborting\n");
2881 goto err_out_disable_pdev;
2882 }
2883
2884 subsystem_vendor_id = pdev->subsystem_vendor;
2885 subsystem_device_id = pdev->subsystem_device;
2886 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2887 subsystem_vendor_id);
2888
2889 #ifdef CCISS_DEBUG
2890 printk("command = %x\n", command);
2891 printk("irq = %x\n", pdev->irq);
2892 printk("board_id = %x\n", board_id);
2893 #endif /* CCISS_DEBUG */
2894
2895 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2896 * else we use the IO-APIC interrupt assigned to us by system ROM.
2897 */
2898 cciss_interrupt_mode(c, pdev, board_id);
2899
2900 /*
2901 * Memory base addr is first addr , the second points to the config
2902 * table
2903 */
2904
2905 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2906 #ifdef CCISS_DEBUG
2907 printk("address 0 = %x\n", c->paddr);
2908 #endif /* CCISS_DEBUG */
2909 c->vaddr = remap_pci_mem(c->paddr, 200);
2910
2911 /* Wait for the board to become ready. (PCI hotplug needs this.)
2912 * We poll for up to 120 secs, once per 100ms. */
2913 for (i = 0; i < 1200; i++) {
2914 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2915 if (scratchpad == CCISS_FIRMWARE_READY)
2916 break;
2917 set_current_state(TASK_INTERRUPTIBLE);
2918 schedule_timeout(HZ / 10); /* wait 100ms */
2919 }
2920 if (scratchpad != CCISS_FIRMWARE_READY) {
2921 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2922 err = -ENODEV;
2923 goto err_out_free_res;
2924 }
2925
2926 /* get the address index number */
2927 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2928 cfg_base_addr &= (__u32) 0x0000ffff;
2929 #ifdef CCISS_DEBUG
2930 printk("cfg base address = %x\n", cfg_base_addr);
2931 #endif /* CCISS_DEBUG */
2932 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2933 #ifdef CCISS_DEBUG
2934 printk("cfg base address index = %x\n", cfg_base_addr_index);
2935 #endif /* CCISS_DEBUG */
2936 if (cfg_base_addr_index == -1) {
2937 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2938 err = -ENODEV;
2939 goto err_out_free_res;
2940 }
2941
2942 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2943 #ifdef CCISS_DEBUG
2944 printk("cfg offset = %x\n", cfg_offset);
2945 #endif /* CCISS_DEBUG */
2946 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2947 cfg_base_addr_index) +
2948 cfg_offset, sizeof(CfgTable_struct));
2949 c->board_id = board_id;
2950
2951 #ifdef CCISS_DEBUG
2952 print_cfg_table(c->cfgtable);
2953 #endif /* CCISS_DEBUG */
2954
2955 for (i = 0; i < ARRAY_SIZE(products); i++) {
2956 if (board_id == products[i].board_id) {
2957 c->product_name = products[i].product_name;
2958 c->access = *(products[i].access);
2959 break;
2960 }
2961 }
2962 if (i == ARRAY_SIZE(products)) {
2963 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2964 " to access the Smart Array controller %08lx\n",
2965 (unsigned long)board_id);
2966 err = -ENODEV;
2967 goto err_out_free_res;
2968 }
2969 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2970 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2971 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2972 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2973 printk("Does not appear to be a valid CISS config table\n");
2974 err = -ENODEV;
2975 goto err_out_free_res;
2976 }
2977 #ifdef CONFIG_X86
2978 {
2979 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2980 __u32 prefetch;
2981 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2982 prefetch |= 0x100;
2983 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2984 }
2985 #endif
2986
2987 #ifdef CCISS_DEBUG
2988 printk("Trying to put board into Simple mode\n");
2989 #endif /* CCISS_DEBUG */
2990 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2991 /* Update the field, and then ring the doorbell */
2992 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2993 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2994
2995 /* under certain very rare conditions, this can take awhile.
2996 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2997 * as we enter this code.) */
2998 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2999 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3000 break;
3001 /* delay and try again */
3002 set_current_state(TASK_INTERRUPTIBLE);
3003 schedule_timeout(10);
3004 }
3005
3006 #ifdef CCISS_DEBUG
3007 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3008 readl(c->vaddr + SA5_DOORBELL));
3009 #endif /* CCISS_DEBUG */
3010 #ifdef CCISS_DEBUG
3011 print_cfg_table(c->cfgtable);
3012 #endif /* CCISS_DEBUG */
3013
3014 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3015 printk(KERN_WARNING "cciss: unable to get board into"
3016 " simple mode\n");
3017 err = -ENODEV;
3018 goto err_out_free_res;
3019 }
3020 return 0;
3021
3022 err_out_free_res:
3023 pci_release_regions(pdev);
3024
3025 err_out_disable_pdev:
3026 pci_disable_device(pdev);
3027 return err;
3028 }
3029
3030 /*
3031 * Gets information about the local volumes attached to the controller.
3032 */
3033 static void cciss_getgeometry(int cntl_num)
3034 {
3035 ReportLunData_struct *ld_buff;
3036 InquiryData_struct *inq_buff;
3037 int return_code;
3038 int i;
3039 int listlength = 0;
3040 __u32 lunid = 0;
3041 int block_size;
3042 sector_t total_size;
3043
3044 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3045 if (ld_buff == NULL) {
3046 printk(KERN_ERR "cciss: out of memory\n");
3047 return;
3048 }
3049 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3050 if (inq_buff == NULL) {
3051 printk(KERN_ERR "cciss: out of memory\n");
3052 kfree(ld_buff);
3053 return;
3054 }
3055 /* Get the firmware version */
3056 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3057 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3058 TYPE_CMD);
3059 if (return_code == IO_OK) {
3060 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3061 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3062 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3063 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3064 } else { /* send command failed */
3065
3066 printk(KERN_WARNING "cciss: unable to determine firmware"
3067 " version of controller\n");
3068 }
3069 /* Get the number of logical volumes */
3070 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3071 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3072 TYPE_CMD);
3073
3074 if (return_code == IO_OK) {
3075 #ifdef CCISS_DEBUG
3076 printk("LUN Data\n--------------------------\n");
3077 #endif /* CCISS_DEBUG */
3078
3079 listlength |=
3080 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3081 listlength |=
3082 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3083 listlength |=
3084 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3085 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3086 } else { /* reading number of logical volumes failed */
3087
3088 printk(KERN_WARNING "cciss: report logical volume"
3089 " command failed\n");
3090 listlength = 0;
3091 }
3092 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3093 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3094 printk(KERN_ERR
3095 "ciss: only %d number of logical volumes supported\n",
3096 CISS_MAX_LUN);
3097 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3098 }
3099 #ifdef CCISS_DEBUG
3100 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3101 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3102 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3103 hba[cntl_num]->num_luns);
3104 #endif /* CCISS_DEBUG */
3105
3106 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3107 for (i = 0; i < CISS_MAX_LUN; i++) {
3108 if (i < hba[cntl_num]->num_luns) {
3109 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3110 << 24;
3111 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3112 << 16;
3113 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3114 << 8;
3115 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3116
3117 hba[cntl_num]->drv[i].LunID = lunid;
3118
3119 #ifdef CCISS_DEBUG
3120 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3121 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3122 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3123 hba[cntl_num]->drv[i].LunID);
3124 #endif /* CCISS_DEBUG */
3125
3126 /* testing to see if 16-byte CDBs are already being used */
3127 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3128 cciss_read_capacity_16(cntl_num, i, 0,
3129 &total_size, &block_size);
3130 goto geo_inq;
3131 }
3132 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3133
3134 /* total_size = last LBA + 1 */
3135 if(total_size == (__u32) 0) {
3136 cciss_read_capacity_16(cntl_num, i, 0,
3137 &total_size, &block_size);
3138 hba[cntl_num]->cciss_read = CCISS_READ_16;
3139 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3140 } else {
3141 hba[cntl_num]->cciss_read = CCISS_READ_10;
3142 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3143 }
3144 geo_inq:
3145 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3146 block_size, inq_buff,
3147 &hba[cntl_num]->drv[i]);
3148 } else {
3149 /* initialize raid_level to indicate a free space */
3150 hba[cntl_num]->drv[i].raid_level = -1;
3151 }
3152 }
3153 kfree(ld_buff);
3154 kfree(inq_buff);
3155 }
3156
3157 /* Function to find the first free pointer into our hba[] array */
3158 /* Returns -1 if no free entries are left. */
3159 static int alloc_cciss_hba(void)
3160 {
3161 struct gendisk *disk[NWD];
3162 int i, n;
3163 for (n = 0; n < NWD; n++) {
3164 disk[n] = alloc_disk(1 << NWD_SHIFT);
3165 if (!disk[n])
3166 goto out;
3167 }
3168
3169 for (i = 0; i < MAX_CTLR; i++) {
3170 if (!hba[i]) {
3171 ctlr_info_t *p;
3172 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3173 if (!p)
3174 goto Enomem;
3175 for (n = 0; n < NWD; n++)
3176 p->gendisk[n] = disk[n];
3177 hba[i] = p;
3178 return i;
3179 }
3180 }
3181 printk(KERN_WARNING "cciss: This driver supports a maximum"
3182 " of %d controllers.\n", MAX_CTLR);
3183 goto out;
3184 Enomem:
3185 printk(KERN_ERR "cciss: out of memory.\n");
3186 out:
3187 while (n--)
3188 put_disk(disk[n]);
3189 return -1;
3190 }
3191
3192 static void free_hba(int i)
3193 {
3194 ctlr_info_t *p = hba[i];
3195 int n;
3196
3197 hba[i] = NULL;
3198 for (n = 0; n < NWD; n++)
3199 put_disk(p->gendisk[n]);
3200 kfree(p);
3201 }
3202
3203 /*
3204 * This is it. Find all the controllers and register them. I really hate
3205 * stealing all these major device numbers.
3206 * returns the number of block devices registered.
3207 */
3208 static int __devinit cciss_init_one(struct pci_dev *pdev,
3209 const struct pci_device_id *ent)
3210 {
3211 request_queue_t *q;
3212 int i;
3213 int j;
3214 int rc;
3215 int dac;
3216
3217 i = alloc_cciss_hba();
3218 if (i < 0)
3219 return -1;
3220
3221 hba[i]->busy_initializing = 1;
3222
3223 if (cciss_pci_init(hba[i], pdev) != 0)
3224 goto clean1;
3225
3226 sprintf(hba[i]->devname, "cciss%d", i);
3227 hba[i]->ctlr = i;
3228 hba[i]->pdev = pdev;
3229
3230 /* configure PCI DMA stuff */
3231 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3232 dac = 1;
3233 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3234 dac = 0;
3235 else {
3236 printk(KERN_ERR "cciss: no suitable DMA available\n");
3237 goto clean1;
3238 }
3239
3240 /*
3241 * register with the major number, or get a dynamic major number
3242 * by passing 0 as argument. This is done for greater than
3243 * 8 controller support.
3244 */
3245 if (i < MAX_CTLR_ORIG)
3246 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3247 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3248 if (rc == -EBUSY || rc == -EINVAL) {
3249 printk(KERN_ERR
3250 "cciss: Unable to get major number %d for %s "
3251 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3252 goto clean1;
3253 } else {
3254 if (i >= MAX_CTLR_ORIG)
3255 hba[i]->major = rc;
3256 }
3257
3258 /* make sure the board interrupts are off */
3259 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3260 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3261 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3262 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3263 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3264 goto clean2;
3265 }
3266
3267 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3268 hba[i]->devname, pdev->device, pci_name(pdev),
3269 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3270
3271 hba[i]->cmd_pool_bits =
3272 kmalloc(((NR_CMDS + BITS_PER_LONG -
3273 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3274 hba[i]->cmd_pool = (CommandList_struct *)
3275 pci_alloc_consistent(hba[i]->pdev,
3276 NR_CMDS * sizeof(CommandList_struct),
3277 &(hba[i]->cmd_pool_dhandle));
3278 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3279 pci_alloc_consistent(hba[i]->pdev,
3280 NR_CMDS * sizeof(ErrorInfo_struct),
3281 &(hba[i]->errinfo_pool_dhandle));
3282 if ((hba[i]->cmd_pool_bits == NULL)
3283 || (hba[i]->cmd_pool == NULL)
3284 || (hba[i]->errinfo_pool == NULL)) {
3285 printk(KERN_ERR "cciss: out of memory");
3286 goto clean4;
3287 }
3288 #ifdef CONFIG_CISS_SCSI_TAPE
3289 hba[i]->scsi_rejects.complete =
3290 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3291 (NR_CMDS + 5), GFP_KERNEL);
3292 if (hba[i]->scsi_rejects.complete == NULL) {
3293 printk(KERN_ERR "cciss: out of memory");
3294 goto clean4;
3295 }
3296 #endif
3297 spin_lock_init(&hba[i]->lock);
3298
3299 /* Initialize the pdev driver private data.
3300 have it point to hba[i]. */
3301 pci_set_drvdata(pdev, hba[i]);
3302 /* command and error info recs zeroed out before
3303 they are used */
3304 memset(hba[i]->cmd_pool_bits, 0,
3305 ((NR_CMDS + BITS_PER_LONG -
3306 1) / BITS_PER_LONG) * sizeof(unsigned long));
3307
3308 #ifdef CCISS_DEBUG
3309 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3310 #endif /* CCISS_DEBUG */
3311
3312 cciss_getgeometry(i);
3313
3314 cciss_scsi_setup(i);
3315
3316 /* Turn the interrupts on so we can service requests */
3317 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3318
3319 cciss_procinit(i);
3320 hba[i]->busy_initializing = 0;
3321
3322 for (j = 0; j < NWD; j++) { /* mfm */
3323 drive_info_struct *drv = &(hba[i]->drv[j]);
3324 struct gendisk *disk = hba[i]->gendisk[j];
3325
3326 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3327 if (!q) {
3328 printk(KERN_ERR
3329 "cciss: unable to allocate queue for disk %d\n",
3330 j);
3331 break;
3332 }
3333 drv->queue = q;
3334
3335 q->backing_dev_info.ra_pages = READ_AHEAD;
3336 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3337
3338 /* This is a hardware imposed limit. */
3339 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3340
3341 /* This is a limit in the driver and could be eliminated. */
3342 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3343
3344 blk_queue_max_sectors(q, 512);
3345
3346 blk_queue_softirq_done(q, cciss_softirq_done);
3347
3348 q->queuedata = hba[i];
3349 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3350 disk->major = hba[i]->major;
3351 disk->first_minor = j << NWD_SHIFT;
3352 disk->fops = &cciss_fops;
3353 disk->queue = q;
3354 disk->private_data = drv;
3355 disk->driverfs_dev = &pdev->dev;
3356 /* we must register the controller even if no disks exist */
3357 /* this is for the online array utilities */
3358 if (!drv->heads && j)
3359 continue;
3360 blk_queue_hardsect_size(q, drv->block_size);
3361 set_capacity(disk, drv->nr_blocks);
3362 add_disk(disk);
3363 }
3364
3365 return 1;
3366
3367 clean4:
3368 #ifdef CONFIG_CISS_SCSI_TAPE
3369 kfree(hba[i]->scsi_rejects.complete);
3370 #endif
3371 kfree(hba[i]->cmd_pool_bits);
3372 if (hba[i]->cmd_pool)
3373 pci_free_consistent(hba[i]->pdev,
3374 NR_CMDS * sizeof(CommandList_struct),
3375 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3376 if (hba[i]->errinfo_pool)
3377 pci_free_consistent(hba[i]->pdev,
3378 NR_CMDS * sizeof(ErrorInfo_struct),
3379 hba[i]->errinfo_pool,
3380 hba[i]->errinfo_pool_dhandle);
3381 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3382 clean2:
3383 unregister_blkdev(hba[i]->major, hba[i]->devname);
3384 clean1:
3385 hba[i]->busy_initializing = 0;
3386 free_hba(i);
3387 return -1;
3388 }
3389
3390 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3391 {
3392 ctlr_info_t *tmp_ptr;
3393 int i, j;
3394 char flush_buf[4];
3395 int return_code;
3396
3397 if (pci_get_drvdata(pdev) == NULL) {
3398 printk(KERN_ERR "cciss: Unable to remove device \n");
3399 return;
3400 }
3401 tmp_ptr = pci_get_drvdata(pdev);
3402 i = tmp_ptr->ctlr;
3403 if (hba[i] == NULL) {
3404 printk(KERN_ERR "cciss: device appears to "
3405 "already be removed \n");
3406 return;
3407 }
3408 /* Turn board interrupts off and send the flush cache command */
3409 /* sendcmd will turn off interrupt, and send the flush...
3410 * To write all data in the battery backed cache to disks */
3411 memset(flush_buf, 0, 4);
3412 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3413 TYPE_CMD);
3414 if (return_code != IO_OK) {
3415 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3416 i);
3417 }
3418 free_irq(hba[i]->intr[2], hba[i]);
3419
3420 #ifdef CONFIG_PCI_MSI
3421 if (hba[i]->msix_vector)
3422 pci_disable_msix(hba[i]->pdev);
3423 else if (hba[i]->msi_vector)
3424 pci_disable_msi(hba[i]->pdev);
3425 #endif /* CONFIG_PCI_MSI */
3426
3427 iounmap(hba[i]->vaddr);
3428 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3429 unregister_blkdev(hba[i]->major, hba[i]->devname);
3430 remove_proc_entry(hba[i]->devname, proc_cciss);
3431
3432 /* remove it from the disk list */
3433 for (j = 0; j < NWD; j++) {
3434 struct gendisk *disk = hba[i]->gendisk[j];
3435 if (disk) {
3436 request_queue_t *q = disk->queue;
3437
3438 if (disk->flags & GENHD_FL_UP)
3439 del_gendisk(disk);
3440 if (q)
3441 blk_cleanup_queue(q);
3442 }
3443 }
3444
3445 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3446 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3447 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3448 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3449 kfree(hba[i]->cmd_pool_bits);
3450 #ifdef CONFIG_CISS_SCSI_TAPE
3451 kfree(hba[i]->scsi_rejects.complete);
3452 #endif
3453 pci_release_regions(pdev);
3454 pci_disable_device(pdev);
3455 pci_set_drvdata(pdev, NULL);
3456 free_hba(i);
3457 }
3458
3459 static struct pci_driver cciss_pci_driver = {
3460 .name = "cciss",
3461 .probe = cciss_init_one,
3462 .remove = __devexit_p(cciss_remove_one),
3463 .id_table = cciss_pci_device_id, /* id_table */
3464 };
3465
3466 /*
3467 * This is it. Register the PCI driver information for the cards we control
3468 * the OS will call our registered routines when it finds one of our cards.
3469 */
3470 static int __init cciss_init(void)
3471 {
3472 printk(KERN_INFO DRIVER_NAME "\n");
3473
3474 /* Register for our PCI devices */
3475 return pci_register_driver(&cciss_pci_driver);
3476 }
3477
3478 static void __exit cciss_cleanup(void)
3479 {
3480 int i;
3481
3482 pci_unregister_driver(&cciss_pci_driver);
3483 /* double check that all controller entrys have been removed */
3484 for (i = 0; i < MAX_CTLR; i++) {
3485 if (hba[i] != NULL) {
3486 printk(KERN_WARNING "cciss: had to remove"
3487 " controller %d\n", i);
3488 cciss_remove_one(hba[i]->pdev);
3489 }
3490 }
3491 remove_proc_entry("cciss", proc_root_driver);
3492 }
3493
3494 static void fail_all_cmds(unsigned long ctlr)
3495 {
3496 /* If we get here, the board is apparently dead. */
3497 ctlr_info_t *h = hba[ctlr];
3498 CommandList_struct *c;
3499 unsigned long flags;
3500
3501 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3502 h->alive = 0; /* the controller apparently died... */
3503
3504 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3505
3506 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3507
3508 /* move everything off the request queue onto the completed queue */
3509 while ((c = h->reqQ) != NULL) {
3510 removeQ(&(h->reqQ), c);
3511 h->Qdepth--;
3512 addQ(&(h->cmpQ), c);
3513 }
3514
3515 /* Now, fail everything on the completed queue with a HW error */
3516 while ((c = h->cmpQ) != NULL) {
3517 removeQ(&h->cmpQ, c);
3518 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3519 if (c->cmd_type == CMD_RWREQ) {
3520 complete_command(h, c, 0);
3521 } else if (c->cmd_type == CMD_IOCTL_PEND)
3522 complete(c->waiting);
3523 #ifdef CONFIG_CISS_SCSI_TAPE
3524 else if (c->cmd_type == CMD_SCSI)
3525 complete_scsi_command(c, 0, 0);
3526 #endif
3527 }
3528 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3529 return;
3530 }
3531
3532 module_init(cciss_init);
3533 module_exit(cciss_cleanup);