]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/block/cpqarray.c
Merge phase #2 (PAT updates) of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[mirror_ubuntu-zesty-kernel.git] / drivers / block / cpqarray.c
1 /*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/bio.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/blkpg.h>
33 #include <linux/timer.h>
34 #include <linux/proc_fs.h>
35 #include <linux/init.h>
36 #include <linux/hdreg.h>
37 #include <linux/spinlock.h>
38 #include <linux/blkdev.h>
39 #include <linux/genhd.h>
40 #include <linux/scatterlist.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44
45 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
46
47 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
48 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
49
50 /* Embedded module documentation macros - see modules.h */
51 /* Original author Chris Frantz - Compaq Computer Corporation */
52 MODULE_AUTHOR("Compaq Computer Corporation");
53 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
54 MODULE_LICENSE("GPL");
55
56 #include "cpqarray.h"
57 #include "ida_cmd.h"
58 #include "smart1,2.h"
59 #include "ida_ioctl.h"
60
61 #define READ_AHEAD 128
62 #define NR_CMDS 128 /* This could probably go as high as ~400 */
63
64 #define MAX_CTLR 8
65 #define CTLR_SHIFT 8
66
67 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
68
69 static int nr_ctlr;
70 static ctlr_info_t *hba[MAX_CTLR];
71
72 static int eisa[8];
73
74 #define NR_PRODUCTS ARRAY_SIZE(products)
75
76 /* board_id = Subsystem Device ID & Vendor ID
77 * product = Marketing Name for the board
78 * access = Address of the struct of function pointers
79 */
80 static struct board_type products[] = {
81 { 0x0040110E, "IDA", &smart1_access },
82 { 0x0140110E, "IDA-2", &smart1_access },
83 { 0x1040110E, "IAES", &smart1_access },
84 { 0x2040110E, "SMART", &smart1_access },
85 { 0x3040110E, "SMART-2/E", &smart2e_access },
86 { 0x40300E11, "SMART-2/P", &smart2_access },
87 { 0x40310E11, "SMART-2SL", &smart2_access },
88 { 0x40320E11, "Smart Array 3200", &smart2_access },
89 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
90 { 0x40340E11, "Smart Array 221", &smart2_access },
91 { 0x40400E11, "Integrated Array", &smart4_access },
92 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
93 { 0x40500E11, "Smart Array 4200", &smart4_access },
94 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
95 { 0x40580E11, "Smart Array 431", &smart4_access },
96 };
97
98 /* define the PCI info for the PCI cards this driver can control */
99 static const struct pci_device_id cpqarray_pci_device_id[] =
100 {
101 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
102 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
104 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
106 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
107 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
108 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
110 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
111 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
112 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
114 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
116 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
118 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
120 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
121 { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
125
126 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
127
128 /* Debug... */
129 #define DBG(s) do { s } while(0)
130 /* Debug (general info)... */
131 #define DBGINFO(s) do { } while(0)
132 /* Debug Paranoid... */
133 #define DBGP(s) do { } while(0)
134 /* Debug Extra Paranoid... */
135 #define DBGPX(s) do { } while(0)
136
137 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
138 static void __iomem *remap_pci_mem(ulong base, ulong size);
139 static int cpqarray_eisa_detect(void);
140 static int pollcomplete(int ctlr);
141 static void getgeometry(int ctlr);
142 static void start_fwbk(int ctlr);
143
144 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
145 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
146
147 static void free_hba(int i);
148 static int alloc_cpqarray_hba(void);
149
150 static int sendcmd(
151 __u8 cmd,
152 int ctlr,
153 void *buff,
154 size_t size,
155 unsigned int blk,
156 unsigned int blkcnt,
157 unsigned int log_unit );
158
159 static int ida_open(struct inode *inode, struct file *filep);
160 static int ida_release(struct inode *inode, struct file *filep);
161 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
162 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
163 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164
165 static void do_ida_request(struct request_queue *q);
166 static void start_io(ctlr_info_t *h);
167
168 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170 static inline void complete_command(cmdlist_t *cmd, int timeout);
171
172 static irqreturn_t do_ida_intr(int irq, void *dev_id);
173 static void ida_timer(unsigned long tdata);
174 static int ida_revalidate(struct gendisk *disk);
175 static int revalidate_allvol(ctlr_info_t *host);
176 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
177
178 #ifdef CONFIG_PROC_FS
179 static void ida_procinit(int i);
180 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
181 #else
182 static void ida_procinit(int i) {}
183 #endif
184
185 static inline drv_info_t *get_drv(struct gendisk *disk)
186 {
187 return disk->private_data;
188 }
189
190 static inline ctlr_info_t *get_host(struct gendisk *disk)
191 {
192 return disk->queue->queuedata;
193 }
194
195
196 static struct block_device_operations ida_fops = {
197 .owner = THIS_MODULE,
198 .open = ida_open,
199 .release = ida_release,
200 .ioctl = ida_ioctl,
201 .getgeo = ida_getgeo,
202 .revalidate_disk= ida_revalidate,
203 };
204
205
206 #ifdef CONFIG_PROC_FS
207
208 static struct proc_dir_entry *proc_array;
209
210 /*
211 * Get us a file in /proc/array that says something about each controller.
212 * Create /proc/array if it doesn't exist yet.
213 */
214 static void __init ida_procinit(int i)
215 {
216 if (proc_array == NULL) {
217 proc_array = proc_mkdir("driver/cpqarray", NULL);
218 if (!proc_array) return;
219 }
220
221 create_proc_read_entry(hba[i]->devname, 0, proc_array,
222 ida_proc_get_info, hba[i]);
223 }
224
225 /*
226 * Report information about this controller.
227 */
228 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
229 {
230 off_t pos = 0;
231 off_t len = 0;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv;
235 #ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c;
237 unsigned long flags;
238 #endif
239
240 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n"
245 " Memory Address: 0x%08lx\n"
246 " I/O Port: 0x%04x\n"
247 " IRQ: %d\n"
248 " Logical drives: %d\n"
249 " Physical drives: %d\n\n"
250 " Current Q depth: %d\n"
251 " Max Q depth since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit);
260
261 pos += size; len += size;
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265
266 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 }
272
273 #ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n");
276 pos += size; len += size;
277
278 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
280 if (c) c=c->next;
281 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c);
283 pos += size; len += size;
284 c=c->next;
285 }
286
287 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
289 if (c) c=c->next;
290 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c);
292 pos += size; len += size;
293 c=c->next;
294 }
295
296 size = sprintf(buffer+len, "\n"); pos += size; len += size;
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298 #endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees);
301 pos += size; len += size;
302
303 *eof = 1;
304 *start = buffer+offset;
305 len -= offset;
306 if (len>length)
307 len = length;
308 return len;
309 }
310 #endif /* CONFIG_PROC_FS */
311
312 module_param_array(eisa, int, NULL, 0);
313
314 static void release_io_mem(ctlr_info_t *c)
315 {
316 /* if IO mem was not protected do nothing */
317 if( c->io_mem_addr == 0)
318 return;
319 release_region(c->io_mem_addr, c->io_mem_length);
320 c->io_mem_addr = 0;
321 c->io_mem_length = 0;
322 }
323
324 static void __devexit cpqarray_remove_one(int i)
325 {
326 int j;
327 char buff[4];
328
329 /* sendcmd will turn off interrupt, and send the flush...
330 * To write all data in the battery backed cache to disks
331 * no data returned, but don't want to send NULL to sendcmd */
332 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
333 {
334 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
335 i);
336 }
337 free_irq(hba[i]->intr, hba[i]);
338 iounmap(hba[i]->vaddr);
339 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
340 del_timer(&hba[i]->timer);
341 remove_proc_entry(hba[i]->devname, proc_array);
342 pci_free_consistent(hba[i]->pci_dev,
343 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
344 hba[i]->cmd_pool_dhandle);
345 kfree(hba[i]->cmd_pool_bits);
346 for(j = 0; j < NWD; j++) {
347 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
348 del_gendisk(ida_gendisk[i][j]);
349 put_disk(ida_gendisk[i][j]);
350 }
351 blk_cleanup_queue(hba[i]->queue);
352 release_io_mem(hba[i]);
353 free_hba(i);
354 }
355
356 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
357 {
358 int i;
359 ctlr_info_t *tmp_ptr;
360
361 if (pci_get_drvdata(pdev) == NULL) {
362 printk( KERN_ERR "cpqarray: Unable to remove device \n");
363 return;
364 }
365
366 tmp_ptr = pci_get_drvdata(pdev);
367 i = tmp_ptr->ctlr;
368 if (hba[i] == NULL) {
369 printk(KERN_ERR "cpqarray: controller %d appears to have"
370 "already been removed \n", i);
371 return;
372 }
373 pci_set_drvdata(pdev, NULL);
374
375 cpqarray_remove_one(i);
376 }
377
378 /* removing an instance that was not removed automatically..
379 * must be an eisa card.
380 */
381 static void __devexit cpqarray_remove_one_eisa (int i)
382 {
383 if (hba[i] == NULL) {
384 printk(KERN_ERR "cpqarray: controller %d appears to have"
385 "already been removed \n", i);
386 return;
387 }
388 cpqarray_remove_one(i);
389 }
390
391 /* pdev is NULL for eisa */
392 static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
393 {
394 struct request_queue *q;
395 int j;
396
397 /*
398 * register block devices
399 * Find disks and fill in structs
400 * Get an interrupt, set the Q depth and get into /proc
401 */
402
403 /* If this successful it should insure that we are the only */
404 /* instance of the driver */
405 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
406 goto Enomem4;
407 }
408 hba[i]->access.set_intr_mask(hba[i], 0);
409 if (request_irq(hba[i]->intr, do_ida_intr,
410 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
411 {
412 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
413 hba[i]->intr, hba[i]->devname);
414 goto Enomem3;
415 }
416
417 for (j=0; j<NWD; j++) {
418 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
419 if (!ida_gendisk[i][j])
420 goto Enomem2;
421 }
422
423 hba[i]->cmd_pool = pci_alloc_consistent(
424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
425 &(hba[i]->cmd_pool_dhandle));
426 hba[i]->cmd_pool_bits = kcalloc(
427 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
428 GFP_KERNEL);
429
430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
431 goto Enomem1;
432
433 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
434 printk(KERN_INFO "cpqarray: Finding drives on %s",
435 hba[i]->devname);
436
437 spin_lock_init(&hba[i]->lock);
438 q = blk_init_queue(do_ida_request, &hba[i]->lock);
439 if (!q)
440 goto Enomem1;
441
442 hba[i]->queue = q;
443 q->queuedata = hba[i];
444
445 getgeometry(i);
446 start_fwbk(i);
447
448 ida_procinit(i);
449
450 if (pdev)
451 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
452
453 /* This is a hardware imposed limit. */
454 blk_queue_max_hw_segments(q, SG_MAX);
455
456 /* This is a driver limit and could be eliminated. */
457 blk_queue_max_phys_segments(q, SG_MAX);
458
459 init_timer(&hba[i]->timer);
460 hba[i]->timer.expires = jiffies + IDA_TIMER;
461 hba[i]->timer.data = (unsigned long)hba[i];
462 hba[i]->timer.function = ida_timer;
463 add_timer(&hba[i]->timer);
464
465 /* Enable IRQ now that spinlock and rate limit timer are set up */
466 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
467
468 for(j=0; j<NWD; j++) {
469 struct gendisk *disk = ida_gendisk[i][j];
470 drv_info_t *drv = &hba[i]->drv[j];
471 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
472 disk->major = COMPAQ_SMART2_MAJOR + i;
473 disk->first_minor = j<<NWD_SHIFT;
474 disk->fops = &ida_fops;
475 if (j && !drv->nr_blks)
476 continue;
477 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
478 set_capacity(disk, drv->nr_blks);
479 disk->queue = hba[i]->queue;
480 disk->private_data = drv;
481 add_disk(disk);
482 }
483
484 /* done ! */
485 return(i);
486
487 Enomem1:
488 nr_ctlr = i;
489 kfree(hba[i]->cmd_pool_bits);
490 if (hba[i]->cmd_pool)
491 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
492 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
493 Enomem2:
494 while (j--) {
495 put_disk(ida_gendisk[i][j]);
496 ida_gendisk[i][j] = NULL;
497 }
498 free_irq(hba[i]->intr, hba[i]);
499 Enomem3:
500 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
501 Enomem4:
502 if (pdev)
503 pci_set_drvdata(pdev, NULL);
504 release_io_mem(hba[i]);
505 free_hba(i);
506
507 printk( KERN_ERR "cpqarray: out of memory");
508
509 return -1;
510 }
511
512 static int __init cpqarray_init_one( struct pci_dev *pdev,
513 const struct pci_device_id *ent)
514 {
515 int i;
516
517 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
518 " bus %d dev %d func %d\n",
519 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
520 PCI_FUNC(pdev->devfn));
521 i = alloc_cpqarray_hba();
522 if( i < 0 )
523 return (-1);
524 memset(hba[i], 0, sizeof(ctlr_info_t));
525 sprintf(hba[i]->devname, "ida%d", i);
526 hba[i]->ctlr = i;
527 /* Initialize the pdev driver private data */
528 pci_set_drvdata(pdev, hba[i]);
529
530 if (cpqarray_pci_init(hba[i], pdev) != 0) {
531 pci_set_drvdata(pdev, NULL);
532 release_io_mem(hba[i]);
533 free_hba(i);
534 return -1;
535 }
536
537 return (cpqarray_register_ctlr(i, pdev));
538 }
539
540 static struct pci_driver cpqarray_pci_driver = {
541 .name = "cpqarray",
542 .probe = cpqarray_init_one,
543 .remove = __devexit_p(cpqarray_remove_one_pci),
544 .id_table = cpqarray_pci_device_id,
545 };
546
547 /*
548 * This is it. Find all the controllers and register them.
549 * returns the number of block devices registered.
550 */
551 static int __init cpqarray_init(void)
552 {
553 int num_cntlrs_reg = 0;
554 int i;
555 int rc = 0;
556
557 /* detect controllers */
558 printk(DRIVER_NAME "\n");
559
560 rc = pci_register_driver(&cpqarray_pci_driver);
561 if (rc)
562 return rc;
563 cpqarray_eisa_detect();
564
565 for (i=0; i < MAX_CTLR; i++) {
566 if (hba[i] != NULL)
567 num_cntlrs_reg++;
568 }
569
570 return(num_cntlrs_reg);
571 }
572
573 /* Function to find the first free pointer into our hba[] array */
574 /* Returns -1 if no free entries are left. */
575 static int alloc_cpqarray_hba(void)
576 {
577 int i;
578
579 for(i=0; i< MAX_CTLR; i++) {
580 if (hba[i] == NULL) {
581 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
582 if(hba[i]==NULL) {
583 printk(KERN_ERR "cpqarray: out of memory.\n");
584 return (-1);
585 }
586 return (i);
587 }
588 }
589 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
590 " of 8 controllers.\n");
591 return(-1);
592 }
593
594 static void free_hba(int i)
595 {
596 kfree(hba[i]);
597 hba[i]=NULL;
598 }
599
600 /*
601 * Find the IO address of the controller, its IRQ and so forth. Fill
602 * in some basic stuff into the ctlr_info_t structure.
603 */
604 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
605 {
606 ushort vendor_id, device_id, command;
607 unchar cache_line_size, latency_timer;
608 unchar irq, revision;
609 unsigned long addr[6];
610 __u32 board_id;
611
612 int i;
613
614 c->pci_dev = pdev;
615 if (pci_enable_device(pdev)) {
616 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
617 return -1;
618 }
619 vendor_id = pdev->vendor;
620 device_id = pdev->device;
621 irq = pdev->irq;
622
623 for(i=0; i<6; i++)
624 addr[i] = pci_resource_start(pdev, i);
625
626 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
627 {
628 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
629 return -1;
630 }
631
632 pci_read_config_word(pdev, PCI_COMMAND, &command);
633 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
634 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
635 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
636
637 pci_read_config_dword(pdev, 0x2c, &board_id);
638
639 /* check to see if controller has been disabled */
640 if(!(command & 0x02)) {
641 printk(KERN_WARNING
642 "cpqarray: controller appears to be disabled\n");
643 return(-1);
644 }
645
646 DBGINFO(
647 printk("vendor_id = %x\n", vendor_id);
648 printk("device_id = %x\n", device_id);
649 printk("command = %x\n", command);
650 for(i=0; i<6; i++)
651 printk("addr[%d] = %lx\n", i, addr[i]);
652 printk("revision = %x\n", revision);
653 printk("irq = %x\n", irq);
654 printk("cache_line_size = %x\n", cache_line_size);
655 printk("latency_timer = %x\n", latency_timer);
656 printk("board_id = %x\n", board_id);
657 );
658
659 c->intr = irq;
660
661 for(i=0; i<6; i++) {
662 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
663 { /* IO space */
664 c->io_mem_addr = addr[i];
665 c->io_mem_length = pci_resource_end(pdev, i)
666 - pci_resource_start(pdev, i) + 1;
667 if(!request_region( c->io_mem_addr, c->io_mem_length,
668 "cpqarray"))
669 {
670 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
671 c->io_mem_addr = 0;
672 c->io_mem_length = 0;
673 }
674 break;
675 }
676 }
677
678 c->paddr = 0;
679 for(i=0; i<6; i++)
680 if (!(pci_resource_flags(pdev, i) &
681 PCI_BASE_ADDRESS_SPACE_IO)) {
682 c->paddr = pci_resource_start (pdev, i);
683 break;
684 }
685 if (!c->paddr)
686 return -1;
687 c->vaddr = remap_pci_mem(c->paddr, 128);
688 if (!c->vaddr)
689 return -1;
690 c->board_id = board_id;
691
692 for(i=0; i<NR_PRODUCTS; i++) {
693 if (board_id == products[i].board_id) {
694 c->product_name = products[i].product_name;
695 c->access = *(products[i].access);
696 break;
697 }
698 }
699 if (i == NR_PRODUCTS) {
700 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
701 " to access the SMART Array controller %08lx\n",
702 (unsigned long)board_id);
703 return -1;
704 }
705
706 return 0;
707 }
708
709 /*
710 * Map (physical) PCI mem into (virtual) kernel space
711 */
712 static void __iomem *remap_pci_mem(ulong base, ulong size)
713 {
714 ulong page_base = ((ulong) base) & PAGE_MASK;
715 ulong page_offs = ((ulong) base) - page_base;
716 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
717
718 return (page_remapped ? (page_remapped + page_offs) : NULL);
719 }
720
721 #ifndef MODULE
722 /*
723 * Config string is a comma separated set of i/o addresses of EISA cards.
724 */
725 static int cpqarray_setup(char *str)
726 {
727 int i, ints[9];
728
729 (void)get_options(str, ARRAY_SIZE(ints), ints);
730
731 for(i=0; i<ints[0] && i<8; i++)
732 eisa[i] = ints[i+1];
733 return 1;
734 }
735
736 __setup("smart2=", cpqarray_setup);
737
738 #endif
739
740 /*
741 * Find an EISA controller's signature. Set up an hba if we find it.
742 */
743 static int __init cpqarray_eisa_detect(void)
744 {
745 int i=0, j;
746 __u32 board_id;
747 int intr;
748 int ctlr;
749 int num_ctlr = 0;
750
751 while(i<8 && eisa[i]) {
752 ctlr = alloc_cpqarray_hba();
753 if(ctlr == -1)
754 break;
755 board_id = inl(eisa[i]+0xC80);
756 for(j=0; j < NR_PRODUCTS; j++)
757 if (board_id == products[j].board_id)
758 break;
759
760 if (j == NR_PRODUCTS) {
761 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
762 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
763 continue;
764 }
765
766 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
767 hba[ctlr]->io_mem_addr = eisa[i];
768 hba[ctlr]->io_mem_length = 0x7FF;
769 if(!request_region(hba[ctlr]->io_mem_addr,
770 hba[ctlr]->io_mem_length,
771 "cpqarray"))
772 {
773 printk(KERN_WARNING "cpqarray: I/O range already in "
774 "use addr = %lx length = %ld\n",
775 hba[ctlr]->io_mem_addr,
776 hba[ctlr]->io_mem_length);
777 free_hba(ctlr);
778 continue;
779 }
780
781 /*
782 * Read the config register to find our interrupt
783 */
784 intr = inb(eisa[i]+0xCC0) >> 4;
785 if (intr & 1) intr = 11;
786 else if (intr & 2) intr = 10;
787 else if (intr & 4) intr = 14;
788 else if (intr & 8) intr = 15;
789
790 hba[ctlr]->intr = intr;
791 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
792 hba[ctlr]->product_name = products[j].product_name;
793 hba[ctlr]->access = *(products[j].access);
794 hba[ctlr]->ctlr = ctlr;
795 hba[ctlr]->board_id = board_id;
796 hba[ctlr]->pci_dev = NULL; /* not PCI */
797
798 DBGINFO(
799 printk("i = %d, j = %d\n", i, j);
800 printk("irq = %x\n", intr);
801 printk("product name = %s\n", products[j].product_name);
802 printk("board_id = %x\n", board_id);
803 );
804
805 num_ctlr++;
806 i++;
807
808 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
809 printk(KERN_WARNING
810 "cpqarray: Can't register EISA controller %d\n",
811 ctlr);
812
813 }
814
815 return num_ctlr;
816 }
817
818 /*
819 * Open. Make sure the device is really there.
820 */
821 static int ida_open(struct inode *inode, struct file *filep)
822 {
823 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
824 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
825
826 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
827 /*
828 * Root is allowed to open raw volume zero even if it's not configured
829 * so array config can still work. I don't think I really like this,
830 * but I'm already using way to many device nodes to claim another one
831 * for "raw controller".
832 */
833 if (!drv->nr_blks) {
834 if (!capable(CAP_SYS_RAWIO))
835 return -ENXIO;
836 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
837 return -ENXIO;
838 }
839 host->usage_count++;
840 return 0;
841 }
842
843 /*
844 * Close. Sync first.
845 */
846 static int ida_release(struct inode *inode, struct file *filep)
847 {
848 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
849 host->usage_count--;
850 return 0;
851 }
852
853 /*
854 * Enqueuing and dequeuing functions for cmdlists.
855 */
856 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
857 {
858 if (*Qptr == NULL) {
859 *Qptr = c;
860 c->next = c->prev = c;
861 } else {
862 c->prev = (*Qptr)->prev;
863 c->next = (*Qptr);
864 (*Qptr)->prev->next = c;
865 (*Qptr)->prev = c;
866 }
867 }
868
869 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
870 {
871 if (c && c->next != c) {
872 if (*Qptr == c) *Qptr = c->next;
873 c->prev->next = c->next;
874 c->next->prev = c->prev;
875 } else {
876 *Qptr = NULL;
877 }
878 return c;
879 }
880
881 /*
882 * Get a request and submit it to the controller.
883 * This routine needs to grab all the requests it possibly can from the
884 * req Q and submit them. Interrupts are off (and need to be off) when you
885 * are in here (either via the dummy do_ida_request functions or by being
886 * called from the interrupt handler
887 */
888 static void do_ida_request(struct request_queue *q)
889 {
890 ctlr_info_t *h = q->queuedata;
891 cmdlist_t *c;
892 struct request *creq;
893 struct scatterlist tmp_sg[SG_MAX];
894 int i, dir, seg;
895
896 if (blk_queue_plugged(q))
897 goto startio;
898
899 queue_next:
900 creq = elv_next_request(q);
901 if (!creq)
902 goto startio;
903
904 BUG_ON(creq->nr_phys_segments > SG_MAX);
905
906 if ((c = cmd_alloc(h,1)) == NULL)
907 goto startio;
908
909 blkdev_dequeue_request(creq);
910
911 c->ctlr = h->ctlr;
912 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
913 c->hdr.size = sizeof(rblk_t) >> 2;
914 c->size += sizeof(rblk_t);
915
916 c->req.hdr.blk = creq->sector;
917 c->rq = creq;
918 DBGPX(
919 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
920 );
921 sg_init_table(tmp_sg, SG_MAX);
922 seg = blk_rq_map_sg(q, creq, tmp_sg);
923
924 /* Now do all the DMA Mappings */
925 if (rq_data_dir(creq) == READ)
926 dir = PCI_DMA_FROMDEVICE;
927 else
928 dir = PCI_DMA_TODEVICE;
929 for( i=0; i < seg; i++)
930 {
931 c->req.sg[i].size = tmp_sg[i].length;
932 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
933 sg_page(&tmp_sg[i]),
934 tmp_sg[i].offset,
935 tmp_sg[i].length, dir);
936 }
937 DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
938 c->req.hdr.sg_cnt = seg;
939 c->req.hdr.blk_cnt = creq->nr_sectors;
940 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
941 c->type = CMD_RWREQ;
942
943 /* Put the request on the tail of the request queue */
944 addQ(&h->reqQ, c);
945 h->Qdepth++;
946 if (h->Qdepth > h->maxQsinceinit)
947 h->maxQsinceinit = h->Qdepth;
948
949 goto queue_next;
950
951 startio:
952 start_io(h);
953 }
954
955 /*
956 * start_io submits everything on a controller's request queue
957 * and moves it to the completion queue.
958 *
959 * Interrupts had better be off if you're in here
960 */
961 static void start_io(ctlr_info_t *h)
962 {
963 cmdlist_t *c;
964
965 while((c = h->reqQ) != NULL) {
966 /* Can't do anything if we're busy */
967 if (h->access.fifo_full(h) == 0)
968 return;
969
970 /* Get the first entry from the request Q */
971 removeQ(&h->reqQ, c);
972 h->Qdepth--;
973
974 /* Tell the controller to do our bidding */
975 h->access.submit_command(h, c);
976
977 /* Get onto the completion Q */
978 addQ(&h->cmpQ, c);
979 }
980 }
981
982 /*
983 * Mark all buffers that cmd was responsible for
984 */
985 static inline void complete_command(cmdlist_t *cmd, int timeout)
986 {
987 struct request *rq = cmd->rq;
988 int error = 0;
989 int i, ddir;
990
991 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
992 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
993 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
994 cmd->ctlr, cmd->hdr.unit);
995 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
996 }
997 if (cmd->req.hdr.rcode & RCODE_FATAL) {
998 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
999 cmd->ctlr, cmd->hdr.unit);
1000 error = -EIO;
1001 }
1002 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1003 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1004 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1005 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1006 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1007 error = -EIO;
1008 }
1009 if (timeout)
1010 error = -EIO;
1011 /* unmap the DMA mapping for all the scatter gather elements */
1012 if (cmd->req.hdr.cmd == IDA_READ)
1013 ddir = PCI_DMA_FROMDEVICE;
1014 else
1015 ddir = PCI_DMA_TODEVICE;
1016 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1017 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1018 cmd->req.sg[i].size, ddir);
1019
1020 DBGPX(printk("Done with %p\n", rq););
1021 if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
1022 BUG();
1023 }
1024
1025 /*
1026 * The controller will interrupt us upon completion of commands.
1027 * Find the command on the completion queue, remove it, tell the OS and
1028 * try to queue up more IO
1029 */
1030 static irqreturn_t do_ida_intr(int irq, void *dev_id)
1031 {
1032 ctlr_info_t *h = dev_id;
1033 cmdlist_t *c;
1034 unsigned long istat;
1035 unsigned long flags;
1036 __u32 a,a1;
1037
1038 istat = h->access.intr_pending(h);
1039 /* Is this interrupt for us? */
1040 if (istat == 0)
1041 return IRQ_NONE;
1042
1043 /*
1044 * If there are completed commands in the completion queue,
1045 * we had better do something about it.
1046 */
1047 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1048 if (istat & FIFO_NOT_EMPTY) {
1049 while((a = h->access.command_completed(h))) {
1050 a1 = a; a &= ~3;
1051 if ((c = h->cmpQ) == NULL)
1052 {
1053 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1054 continue;
1055 }
1056 while(c->busaddr != a) {
1057 c = c->next;
1058 if (c == h->cmpQ)
1059 break;
1060 }
1061 /*
1062 * If we've found the command, take it off the
1063 * completion Q and free it
1064 */
1065 if (c->busaddr == a) {
1066 removeQ(&h->cmpQ, c);
1067 /* Check for invalid command.
1068 * Controller returns command error,
1069 * But rcode = 0.
1070 */
1071
1072 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1073 {
1074 c->req.hdr.rcode = RCODE_INVREQ;
1075 }
1076 if (c->type == CMD_RWREQ) {
1077 complete_command(c, 0);
1078 cmd_free(h, c, 1);
1079 } else if (c->type == CMD_IOCTL_PEND) {
1080 c->type = CMD_IOCTL_DONE;
1081 }
1082 continue;
1083 }
1084 }
1085 }
1086
1087 /*
1088 * See if we can queue up some more IO
1089 */
1090 do_ida_request(h->queue);
1091 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1092 return IRQ_HANDLED;
1093 }
1094
1095 /*
1096 * This timer was for timing out requests that haven't happened after
1097 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1098 * reset a flags structure so we don't flood the user with
1099 * "Non-Fatal error" messages.
1100 */
1101 static void ida_timer(unsigned long tdata)
1102 {
1103 ctlr_info_t *h = (ctlr_info_t*)tdata;
1104
1105 h->timer.expires = jiffies + IDA_TIMER;
1106 add_timer(&h->timer);
1107 h->misc_tflags = 0;
1108 }
1109
1110 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1111 {
1112 drv_info_t *drv = get_drv(bdev->bd_disk);
1113
1114 if (drv->cylinders) {
1115 geo->heads = drv->heads;
1116 geo->sectors = drv->sectors;
1117 geo->cylinders = drv->cylinders;
1118 } else {
1119 geo->heads = 0xff;
1120 geo->sectors = 0x3f;
1121 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1122 }
1123
1124 return 0;
1125 }
1126
1127 /*
1128 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1129 * setting readahead and submitting commands from userspace to the controller.
1130 */
1131 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1132 {
1133 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1134 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1135 int error;
1136 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1137 ida_ioctl_t *my_io;
1138
1139 switch(cmd) {
1140 case IDAGETDRVINFO:
1141 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1142 return -EFAULT;
1143 return 0;
1144 case IDAPASSTHRU:
1145 if (!capable(CAP_SYS_RAWIO))
1146 return -EPERM;
1147 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1148 if (!my_io)
1149 return -ENOMEM;
1150 error = -EFAULT;
1151 if (copy_from_user(my_io, io, sizeof(*my_io)))
1152 goto out_passthru;
1153 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1154 if (error)
1155 goto out_passthru;
1156 error = -EFAULT;
1157 if (copy_to_user(io, my_io, sizeof(*my_io)))
1158 goto out_passthru;
1159 error = 0;
1160 out_passthru:
1161 kfree(my_io);
1162 return error;
1163 case IDAGETCTLRSIG:
1164 if (!arg) return -EINVAL;
1165 put_user(host->ctlr_sig, (int __user *)arg);
1166 return 0;
1167 case IDAREVALIDATEVOLS:
1168 if (iminor(inode) != 0)
1169 return -ENXIO;
1170 return revalidate_allvol(host);
1171 case IDADRIVERVERSION:
1172 if (!arg) return -EINVAL;
1173 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1174 return 0;
1175 case IDAGETPCIINFO:
1176 {
1177
1178 ida_pci_info_struct pciinfo;
1179
1180 if (!arg) return -EINVAL;
1181 pciinfo.bus = host->pci_dev->bus->number;
1182 pciinfo.dev_fn = host->pci_dev->devfn;
1183 pciinfo.board_id = host->board_id;
1184 if(copy_to_user((void __user *) arg, &pciinfo,
1185 sizeof( ida_pci_info_struct)))
1186 return -EFAULT;
1187 return(0);
1188 }
1189
1190 default:
1191 return -EINVAL;
1192 }
1193
1194 }
1195 /*
1196 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1197 * The command block (io) has already been copied to kernel space for us,
1198 * however, any elements in the sglist need to be copied to kernel space
1199 * or copied back to userspace.
1200 *
1201 * Only root may perform a controller passthru command, however I'm not doing
1202 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1203 * putting a 64M buffer in the sglist is probably a *bad* idea.
1204 */
1205 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1206 {
1207 int ctlr = h->ctlr;
1208 cmdlist_t *c;
1209 void *p = NULL;
1210 unsigned long flags;
1211 int error;
1212
1213 if ((c = cmd_alloc(h, 0)) == NULL)
1214 return -ENOMEM;
1215 c->ctlr = ctlr;
1216 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1217 c->hdr.size = sizeof(rblk_t) >> 2;
1218 c->size += sizeof(rblk_t);
1219
1220 c->req.hdr.cmd = io->cmd;
1221 c->req.hdr.blk = io->blk;
1222 c->req.hdr.blk_cnt = io->blk_cnt;
1223 c->type = CMD_IOCTL_PEND;
1224
1225 /* Pre submit processing */
1226 switch(io->cmd) {
1227 case PASSTHRU_A:
1228 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1229 if (!p)
1230 {
1231 error = -ENOMEM;
1232 cmd_free(h, c, 0);
1233 return(error);
1234 }
1235 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1236 kfree(p);
1237 cmd_free(h, c, 0);
1238 return -EFAULT;
1239 }
1240 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1241 sizeof(ida_ioctl_t),
1242 PCI_DMA_BIDIRECTIONAL);
1243 c->req.sg[0].size = io->sg[0].size;
1244 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1245 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1246 c->req.hdr.sg_cnt = 1;
1247 break;
1248 case IDA_READ:
1249 case READ_FLASH_ROM:
1250 case SENSE_CONTROLLER_PERFORMANCE:
1251 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1252 if (!p)
1253 {
1254 error = -ENOMEM;
1255 cmd_free(h, c, 0);
1256 return(error);
1257 }
1258
1259 c->req.sg[0].size = io->sg[0].size;
1260 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1261 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1262 c->req.hdr.sg_cnt = 1;
1263 break;
1264 case IDA_WRITE:
1265 case IDA_WRITE_MEDIA:
1266 case DIAG_PASS_THRU:
1267 case COLLECT_BUFFER:
1268 case WRITE_FLASH_ROM:
1269 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1270 if (!p)
1271 {
1272 error = -ENOMEM;
1273 cmd_free(h, c, 0);
1274 return(error);
1275 }
1276 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1277 kfree(p);
1278 cmd_free(h, c, 0);
1279 return -EFAULT;
1280 }
1281 c->req.sg[0].size = io->sg[0].size;
1282 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1283 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1284 c->req.hdr.sg_cnt = 1;
1285 break;
1286 default:
1287 c->req.sg[0].size = sizeof(io->c);
1288 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1289 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1290 c->req.hdr.sg_cnt = 1;
1291 }
1292
1293 /* Put the request on the tail of the request queue */
1294 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1295 addQ(&h->reqQ, c);
1296 h->Qdepth++;
1297 start_io(h);
1298 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1299
1300 /* Wait for completion */
1301 while(c->type != CMD_IOCTL_DONE)
1302 schedule();
1303
1304 /* Unmap the DMA */
1305 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1306 PCI_DMA_BIDIRECTIONAL);
1307 /* Post submit processing */
1308 switch(io->cmd) {
1309 case PASSTHRU_A:
1310 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1311 sizeof(ida_ioctl_t),
1312 PCI_DMA_BIDIRECTIONAL);
1313 case IDA_READ:
1314 case DIAG_PASS_THRU:
1315 case SENSE_CONTROLLER_PERFORMANCE:
1316 case READ_FLASH_ROM:
1317 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1318 kfree(p);
1319 return -EFAULT;
1320 }
1321 /* fall through and free p */
1322 case IDA_WRITE:
1323 case IDA_WRITE_MEDIA:
1324 case COLLECT_BUFFER:
1325 case WRITE_FLASH_ROM:
1326 kfree(p);
1327 break;
1328 default:;
1329 /* Nothing to do */
1330 }
1331
1332 io->rcode = c->req.hdr.rcode;
1333 cmd_free(h, c, 0);
1334 return(0);
1335 }
1336
1337 /*
1338 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1339 * scheme to suballocte them to the driver. Operations that are not time
1340 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1341 * as the first argument to get a new command.
1342 */
1343 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1344 {
1345 cmdlist_t * c;
1346 int i;
1347 dma_addr_t cmd_dhandle;
1348
1349 if (!get_from_pool) {
1350 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1351 sizeof(cmdlist_t), &cmd_dhandle);
1352 if(c==NULL)
1353 return NULL;
1354 } else {
1355 do {
1356 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1357 if (i == NR_CMDS)
1358 return NULL;
1359 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1360 c = h->cmd_pool + i;
1361 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1362 h->nr_allocs++;
1363 }
1364
1365 memset(c, 0, sizeof(cmdlist_t));
1366 c->busaddr = cmd_dhandle;
1367 return c;
1368 }
1369
1370 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1371 {
1372 int i;
1373
1374 if (!got_from_pool) {
1375 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1376 c->busaddr);
1377 } else {
1378 i = c - h->cmd_pool;
1379 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1380 h->nr_frees++;
1381 }
1382 }
1383
1384 /***********************************************************************
1385 name: sendcmd
1386 Send a command to an IDA using the memory mapped FIFO interface
1387 and wait for it to complete.
1388 This routine should only be called at init time.
1389 ***********************************************************************/
1390 static int sendcmd(
1391 __u8 cmd,
1392 int ctlr,
1393 void *buff,
1394 size_t size,
1395 unsigned int blk,
1396 unsigned int blkcnt,
1397 unsigned int log_unit )
1398 {
1399 cmdlist_t *c;
1400 int complete;
1401 unsigned long temp;
1402 unsigned long i;
1403 ctlr_info_t *info_p = hba[ctlr];
1404
1405 c = cmd_alloc(info_p, 1);
1406 if(!c)
1407 return IO_ERROR;
1408 c->ctlr = ctlr;
1409 c->hdr.unit = log_unit;
1410 c->hdr.prio = 0;
1411 c->hdr.size = sizeof(rblk_t) >> 2;
1412 c->size += sizeof(rblk_t);
1413
1414 /* The request information. */
1415 c->req.hdr.next = 0;
1416 c->req.hdr.rcode = 0;
1417 c->req.bp = 0;
1418 c->req.hdr.sg_cnt = 1;
1419 c->req.hdr.reserved = 0;
1420
1421 if (size == 0)
1422 c->req.sg[0].size = 512;
1423 else
1424 c->req.sg[0].size = size;
1425
1426 c->req.hdr.blk = blk;
1427 c->req.hdr.blk_cnt = blkcnt;
1428 c->req.hdr.cmd = (unsigned char) cmd;
1429 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1430 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1431 /*
1432 * Disable interrupt
1433 */
1434 info_p->access.set_intr_mask(info_p, 0);
1435 /* Make sure there is room in the command FIFO */
1436 /* Actually it should be completely empty at this time. */
1437 for (i = 200000; i > 0; i--) {
1438 temp = info_p->access.fifo_full(info_p);
1439 if (temp != 0) {
1440 break;
1441 }
1442 udelay(10);
1443 DBG(
1444 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1445 " waiting!\n", ctlr);
1446 );
1447 }
1448 /*
1449 * Send the cmd
1450 */
1451 info_p->access.submit_command(info_p, c);
1452 complete = pollcomplete(ctlr);
1453
1454 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1455 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1456 if (complete != 1) {
1457 if (complete != c->busaddr) {
1458 printk( KERN_WARNING
1459 "cpqarray ida%d: idaSendPciCmd "
1460 "Invalid command list address returned! (%08lx)\n",
1461 ctlr, (unsigned long)complete);
1462 cmd_free(info_p, c, 1);
1463 return (IO_ERROR);
1464 }
1465 } else {
1466 printk( KERN_WARNING
1467 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1468 "No command list address returned!\n",
1469 ctlr);
1470 cmd_free(info_p, c, 1);
1471 return (IO_ERROR);
1472 }
1473
1474 if (c->req.hdr.rcode & 0x00FE) {
1475 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1476 printk( KERN_WARNING
1477 "cpqarray ida%d: idaSendPciCmd, error: "
1478 "Controller failed at init time "
1479 "cmd: 0x%x, return code = 0x%x\n",
1480 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1481
1482 cmd_free(info_p, c, 1);
1483 return (IO_ERROR);
1484 }
1485 }
1486 cmd_free(info_p, c, 1);
1487 return (IO_OK);
1488 }
1489
1490 /*
1491 * revalidate_allvol is for online array config utilities. After a
1492 * utility reconfigures the drives in the array, it can use this function
1493 * (through an ioctl) to make the driver zap any previous disk structs for
1494 * that controller and get new ones.
1495 *
1496 * Right now I'm using the getgeometry() function to do this, but this
1497 * function should probably be finer grained and allow you to revalidate one
1498 * particualar logical volume (instead of all of them on a particular
1499 * controller).
1500 */
1501 static int revalidate_allvol(ctlr_info_t *host)
1502 {
1503 int ctlr = host->ctlr;
1504 int i;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1508 if (host->usage_count > 1) {
1509 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1510 printk(KERN_WARNING "cpqarray: Device busy for volume"
1511 " revalidation (usage=%d)\n", host->usage_count);
1512 return -EBUSY;
1513 }
1514 host->usage_count++;
1515 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1516
1517 /*
1518 * Set the partition and block size structures for all volumes
1519 * on this controller to zero. We will reread all of this data
1520 */
1521 set_capacity(ida_gendisk[ctlr][0], 0);
1522 for (i = 1; i < NWD; i++) {
1523 struct gendisk *disk = ida_gendisk[ctlr][i];
1524 if (disk->flags & GENHD_FL_UP)
1525 del_gendisk(disk);
1526 }
1527 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1528
1529 /*
1530 * Tell the array controller not to give us any interrupts while
1531 * we check the new geometry. Then turn interrupts back on when
1532 * we're done.
1533 */
1534 host->access.set_intr_mask(host, 0);
1535 getgeometry(ctlr);
1536 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1537
1538 for(i=0; i<NWD; i++) {
1539 struct gendisk *disk = ida_gendisk[ctlr][i];
1540 drv_info_t *drv = &host->drv[i];
1541 if (i && !drv->nr_blks)
1542 continue;
1543 blk_queue_hardsect_size(host->queue, drv->blk_size);
1544 set_capacity(disk, drv->nr_blks);
1545 disk->queue = host->queue;
1546 disk->private_data = drv;
1547 if (i)
1548 add_disk(disk);
1549 }
1550
1551 host->usage_count--;
1552 return 0;
1553 }
1554
1555 static int ida_revalidate(struct gendisk *disk)
1556 {
1557 drv_info_t *drv = disk->private_data;
1558 set_capacity(disk, drv->nr_blks);
1559 return 0;
1560 }
1561
1562 /********************************************************************
1563 name: pollcomplete
1564 Wait polling for a command to complete.
1565 The memory mapped FIFO is polled for the completion.
1566 Used only at init time, interrupts disabled.
1567 ********************************************************************/
1568 static int pollcomplete(int ctlr)
1569 {
1570 int done;
1571 int i;
1572
1573 /* Wait (up to 2 seconds) for a command to complete */
1574
1575 for (i = 200000; i > 0; i--) {
1576 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1577 if (done == 0) {
1578 udelay(10); /* a short fixed delay */
1579 } else
1580 return (done);
1581 }
1582 /* Invalid address to tell caller we ran out of time */
1583 return 1;
1584 }
1585 /*****************************************************************
1586 start_fwbk
1587 Starts controller firmwares background processing.
1588 Currently only the Integrated Raid controller needs this done.
1589 If the PCI mem address registers are written to after this,
1590 data corruption may occur
1591 *****************************************************************/
1592 static void start_fwbk(int ctlr)
1593 {
1594 id_ctlr_t *id_ctlr_buf;
1595 int ret_code;
1596
1597 if( (hba[ctlr]->board_id != 0x40400E11)
1598 && (hba[ctlr]->board_id != 0x40480E11) )
1599
1600 /* Not a Integrated Raid, so there is nothing for us to do */
1601 return;
1602 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1603 " processing\n");
1604 /* Command does not return anything, but idasend command needs a
1605 buffer */
1606 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1607 if(id_ctlr_buf==NULL)
1608 {
1609 printk(KERN_WARNING "cpqarray: Out of memory. "
1610 "Unable to start background processing.\n");
1611 return;
1612 }
1613 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1614 id_ctlr_buf, 0, 0, 0, 0);
1615 if(ret_code != IO_OK)
1616 printk(KERN_WARNING "cpqarray: Unable to start"
1617 " background processing\n");
1618
1619 kfree(id_ctlr_buf);
1620 }
1621 /*****************************************************************
1622 getgeometry
1623 Get ida logical volume geometry from the controller
1624 This is a large bit of code which once existed in two flavors,
1625 It is used only at init time.
1626 *****************************************************************/
1627 static void getgeometry(int ctlr)
1628 {
1629 id_log_drv_t *id_ldrive;
1630 id_ctlr_t *id_ctlr_buf;
1631 sense_log_drv_stat_t *id_lstatus_buf;
1632 config_t *sense_config_buf;
1633 unsigned int log_unit, log_index;
1634 int ret_code, size;
1635 drv_info_t *drv;
1636 ctlr_info_t *info_p = hba[ctlr];
1637 int i;
1638
1639 info_p->log_drv_map = 0;
1640
1641 id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1642 if (!id_ldrive) {
1643 printk( KERN_ERR "cpqarray: out of memory.\n");
1644 goto err_0;
1645 }
1646
1647 id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1648 if (!id_ctlr_buf) {
1649 printk( KERN_ERR "cpqarray: out of memory.\n");
1650 goto err_1;
1651 }
1652
1653 id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1654 if (!id_lstatus_buf) {
1655 printk( KERN_ERR "cpqarray: out of memory.\n");
1656 goto err_2;
1657 }
1658
1659 sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1660 if (!sense_config_buf) {
1661 printk( KERN_ERR "cpqarray: out of memory.\n");
1662 goto err_3;
1663 }
1664
1665 info_p->phys_drives = 0;
1666 info_p->log_drv_map = 0;
1667 info_p->drv_assign_map = 0;
1668 info_p->drv_spare_map = 0;
1669 info_p->mp_failed_drv_map = 0; /* only initialized here */
1670 /* Get controllers info for this logical drive */
1671 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1672 if (ret_code == IO_ERROR) {
1673 /*
1674 * If can't get controller info, set the logical drive map to 0,
1675 * so the idastubopen will fail on all logical drives
1676 * on the controller.
1677 */
1678 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1679 goto err_4;
1680 }
1681
1682 info_p->log_drives = id_ctlr_buf->nr_drvs;
1683 for(i=0;i<4;i++)
1684 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1685 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1686
1687 printk(" (%s)\n", info_p->product_name);
1688 /*
1689 * Initialize logical drive map to zero
1690 */
1691 log_index = 0;
1692 /*
1693 * Get drive geometry for all logical drives
1694 */
1695 if (id_ctlr_buf->nr_drvs > 16)
1696 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1697 "16 logical drives per controller.\n. "
1698 " Additional drives will not be "
1699 "detected\n", ctlr);
1700
1701 for (log_unit = 0;
1702 (log_index < id_ctlr_buf->nr_drvs)
1703 && (log_unit < NWD);
1704 log_unit++) {
1705 size = sizeof(sense_log_drv_stat_t);
1706
1707 /*
1708 Send "Identify logical drive status" cmd
1709 */
1710 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1711 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1712 if (ret_code == IO_ERROR) {
1713 /*
1714 If can't get logical drive status, set
1715 the logical drive map to 0, so the
1716 idastubopen will fail for all logical drives
1717 on the controller.
1718 */
1719 info_p->log_drv_map = 0;
1720 printk( KERN_WARNING
1721 "cpqarray ida%d: idaGetGeometry - Controller"
1722 " failed to report status of logical drive %d\n"
1723 "Access to this controller has been disabled\n",
1724 ctlr, log_unit);
1725 goto err_4;
1726 }
1727 /*
1728 Make sure the logical drive is configured
1729 */
1730 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1731 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1732 sizeof(id_log_drv_t), 0, 0, log_unit);
1733 /*
1734 If error, the bit for this
1735 logical drive won't be set and
1736 idastubopen will return error.
1737 */
1738 if (ret_code != IO_ERROR) {
1739 drv = &info_p->drv[log_unit];
1740 drv->blk_size = id_ldrive->blk_size;
1741 drv->nr_blks = id_ldrive->nr_blks;
1742 drv->cylinders = id_ldrive->drv.cyl;
1743 drv->heads = id_ldrive->drv.heads;
1744 drv->sectors = id_ldrive->drv.sect_per_track;
1745 info_p->log_drv_map |= (1 << log_unit);
1746
1747 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1748 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1749 ret_code = sendcmd(SENSE_CONFIG,
1750 ctlr, sense_config_buf,
1751 sizeof(config_t), 0, 0, log_unit);
1752 if (ret_code == IO_ERROR) {
1753 info_p->log_drv_map = 0;
1754 printk(KERN_ERR "cpqarray: error sending sense config\n");
1755 goto err_4;
1756 }
1757
1758 info_p->phys_drives =
1759 sense_config_buf->ctlr_phys_drv;
1760 info_p->drv_assign_map
1761 |= sense_config_buf->drv_asgn_map;
1762 info_p->drv_assign_map
1763 |= sense_config_buf->spare_asgn_map;
1764 info_p->drv_spare_map
1765 |= sense_config_buf->spare_asgn_map;
1766 } /* end of if no error on id_ldrive */
1767 log_index = log_index + 1;
1768 } /* end of if logical drive configured */
1769 } /* end of for log_unit */
1770
1771 /* Free all the buffers and return */
1772 err_4:
1773 kfree(sense_config_buf);
1774 err_3:
1775 kfree(id_lstatus_buf);
1776 err_2:
1777 kfree(id_ctlr_buf);
1778 err_1:
1779 kfree(id_ldrive);
1780 err_0:
1781 return;
1782 }
1783
1784 static void __exit cpqarray_exit(void)
1785 {
1786 int i;
1787
1788 pci_unregister_driver(&cpqarray_pci_driver);
1789
1790 /* Double check that all controller entries have been removed */
1791 for(i=0; i<MAX_CTLR; i++) {
1792 if (hba[i] != NULL) {
1793 printk(KERN_WARNING "cpqarray: Removing EISA "
1794 "controller %d\n", i);
1795 cpqarray_remove_one_eisa(i);
1796 }
1797 }
1798
1799 remove_proc_entry("driver/cpqarray", NULL);
1800 }
1801
1802 module_init(cpqarray_init)
1803 module_exit(cpqarray_exit)