]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/cpqarray.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[mirror_ubuntu-artful-kernel.git] / drivers / block / cpqarray.c
1 /*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22 #include <linux/config.h> /* CONFIG_PROC_FS */
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/bio.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/blkdev.h>
40 #include <linux/genhd.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44
45 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
46
47 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
48 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
49
50 /* Embedded module documentation macros - see modules.h */
51 /* Original author Chris Frantz - Compaq Computer Corporation */
52 MODULE_AUTHOR("Compaq Computer Corporation");
53 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
54 MODULE_LICENSE("GPL");
55
56 #include "cpqarray.h"
57 #include "ida_cmd.h"
58 #include "smart1,2.h"
59 #include "ida_ioctl.h"
60
61 #define READ_AHEAD 128
62 #define NR_CMDS 128 /* This could probably go as high as ~400 */
63
64 #define MAX_CTLR 8
65 #define CTLR_SHIFT 8
66
67 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
68
69 static int nr_ctlr;
70 static ctlr_info_t *hba[MAX_CTLR];
71
72 static int eisa[8];
73
74 #define NR_PRODUCTS ARRAY_SIZE(products)
75
76 /* board_id = Subsystem Device ID & Vendor ID
77 * product = Marketing Name for the board
78 * access = Address of the struct of function pointers
79 */
80 static struct board_type products[] = {
81 { 0x0040110E, "IDA", &smart1_access },
82 { 0x0140110E, "IDA-2", &smart1_access },
83 { 0x1040110E, "IAES", &smart1_access },
84 { 0x2040110E, "SMART", &smart1_access },
85 { 0x3040110E, "SMART-2/E", &smart2e_access },
86 { 0x40300E11, "SMART-2/P", &smart2_access },
87 { 0x40310E11, "SMART-2SL", &smart2_access },
88 { 0x40320E11, "Smart Array 3200", &smart2_access },
89 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
90 { 0x40340E11, "Smart Array 221", &smart2_access },
91 { 0x40400E11, "Integrated Array", &smart4_access },
92 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
93 { 0x40500E11, "Smart Array 4200", &smart4_access },
94 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
95 { 0x40580E11, "Smart Array 431", &smart4_access },
96 };
97
98 /* define the PCI info for the PCI cards this driver can control */
99 static const struct pci_device_id cpqarray_pci_device_id[] =
100 {
101 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
102 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
104 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
106 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
107 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
108 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
110 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
111 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
112 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
114 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
116 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
118 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
120 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
121 { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
125
126 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
127
128 /* Debug... */
129 #define DBG(s) do { s } while(0)
130 /* Debug (general info)... */
131 #define DBGINFO(s) do { } while(0)
132 /* Debug Paranoid... */
133 #define DBGP(s) do { } while(0)
134 /* Debug Extra Paranoid... */
135 #define DBGPX(s) do { } while(0)
136
137 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
138 static void __iomem *remap_pci_mem(ulong base, ulong size);
139 static int cpqarray_eisa_detect(void);
140 static int pollcomplete(int ctlr);
141 static void getgeometry(int ctlr);
142 static void start_fwbk(int ctlr);
143
144 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
145 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
146
147 static void free_hba(int i);
148 static int alloc_cpqarray_hba(void);
149
150 static int sendcmd(
151 __u8 cmd,
152 int ctlr,
153 void *buff,
154 size_t size,
155 unsigned int blk,
156 unsigned int blkcnt,
157 unsigned int log_unit );
158
159 static int ida_open(struct inode *inode, struct file *filep);
160 static int ida_release(struct inode *inode, struct file *filep);
161 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
162 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
163 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164
165 static void do_ida_request(request_queue_t *q);
166 static void start_io(ctlr_info_t *h);
167
168 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170 static inline void complete_buffers(struct bio *bio, int ok);
171 static inline void complete_command(cmdlist_t *cmd, int timeout);
172
173 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
174 static void ida_timer(unsigned long tdata);
175 static int ida_revalidate(struct gendisk *disk);
176 static int revalidate_allvol(ctlr_info_t *host);
177 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
178
179 #ifdef CONFIG_PROC_FS
180 static void ida_procinit(int i);
181 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
182 #else
183 static void ida_procinit(int i) {}
184 #endif
185
186 static inline drv_info_t *get_drv(struct gendisk *disk)
187 {
188 return disk->private_data;
189 }
190
191 static inline ctlr_info_t *get_host(struct gendisk *disk)
192 {
193 return disk->queue->queuedata;
194 }
195
196
197 static struct block_device_operations ida_fops = {
198 .owner = THIS_MODULE,
199 .open = ida_open,
200 .release = ida_release,
201 .ioctl = ida_ioctl,
202 .getgeo = ida_getgeo,
203 .revalidate_disk= ida_revalidate,
204 };
205
206
207 #ifdef CONFIG_PROC_FS
208
209 static struct proc_dir_entry *proc_array;
210
211 /*
212 * Get us a file in /proc/array that says something about each controller.
213 * Create /proc/array if it doesn't exist yet.
214 */
215 static void __init ida_procinit(int i)
216 {
217 if (proc_array == NULL) {
218 proc_array = proc_mkdir("cpqarray", proc_root_driver);
219 if (!proc_array) return;
220 }
221
222 create_proc_read_entry(hba[i]->devname, 0, proc_array,
223 ida_proc_get_info, hba[i]);
224 }
225
226 /*
227 * Report information about this controller.
228 */
229 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
230 {
231 off_t pos = 0;
232 off_t len = 0;
233 int size, i, ctlr;
234 ctlr_info_t *h = (ctlr_info_t*)data;
235 drv_info_t *drv;
236 #ifdef CPQ_PROC_PRINT_QUEUES
237 cmdlist_t *c;
238 unsigned long flags;
239 #endif
240
241 ctlr = h->ctlr;
242 size = sprintf(buffer, "%s: Compaq %s Controller\n"
243 " Board ID: 0x%08lx\n"
244 " Firmware Revision: %c%c%c%c\n"
245 " Controller Sig: 0x%08lx\n"
246 " Memory Address: 0x%08lx\n"
247 " I/O Port: 0x%04x\n"
248 " IRQ: %d\n"
249 " Logical drives: %d\n"
250 " Physical drives: %d\n\n"
251 " Current Q depth: %d\n"
252 " Max Q depth since init: %d\n\n",
253 h->devname,
254 h->product_name,
255 (unsigned long)h->board_id,
256 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
257 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
258 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
259 h->log_drives, h->phys_drives,
260 h->Qdepth, h->maxQsinceinit);
261
262 pos += size; len += size;
263
264 size = sprintf(buffer+len, "Logical Drive Info:\n");
265 pos += size; len += size;
266
267 for(i=0; i<h->log_drives; i++) {
268 drv = &h->drv[i];
269 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
270 ctlr, i, drv->blk_size, drv->nr_blks);
271 pos += size; len += size;
272 }
273
274 #ifdef CPQ_PROC_PRINT_QUEUES
275 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
276 size = sprintf(buffer+len, "\nCurrent Queues:\n");
277 pos += size; len += size;
278
279 c = h->reqQ;
280 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
281 if (c) c=c->next;
282 while(c && c != h->reqQ) {
283 size = sprintf(buffer+len, "->%p", c);
284 pos += size; len += size;
285 c=c->next;
286 }
287
288 c = h->cmpQ;
289 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
290 if (c) c=c->next;
291 while(c && c != h->cmpQ) {
292 size = sprintf(buffer+len, "->%p", c);
293 pos += size; len += size;
294 c=c->next;
295 }
296
297 size = sprintf(buffer+len, "\n"); pos += size; len += size;
298 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
299 #endif
300 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
301 h->nr_allocs, h->nr_frees);
302 pos += size; len += size;
303
304 *eof = 1;
305 *start = buffer+offset;
306 len -= offset;
307 if (len>length)
308 len = length;
309 return len;
310 }
311 #endif /* CONFIG_PROC_FS */
312
313 module_param_array(eisa, int, NULL, 0);
314
315 static void release_io_mem(ctlr_info_t *c)
316 {
317 /* if IO mem was not protected do nothing */
318 if( c->io_mem_addr == 0)
319 return;
320 release_region(c->io_mem_addr, c->io_mem_length);
321 c->io_mem_addr = 0;
322 c->io_mem_length = 0;
323 }
324
325 static void __devexit cpqarray_remove_one(int i)
326 {
327 int j;
328 char buff[4];
329
330 /* sendcmd will turn off interrupt, and send the flush...
331 * To write all data in the battery backed cache to disks
332 * no data returned, but don't want to send NULL to sendcmd */
333 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
334 {
335 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
336 i);
337 }
338 free_irq(hba[i]->intr, hba[i]);
339 iounmap(hba[i]->vaddr);
340 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
341 del_timer(&hba[i]->timer);
342 remove_proc_entry(hba[i]->devname, proc_array);
343 pci_free_consistent(hba[i]->pci_dev,
344 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
345 hba[i]->cmd_pool_dhandle);
346 kfree(hba[i]->cmd_pool_bits);
347 for(j = 0; j < NWD; j++) {
348 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
349 del_gendisk(ida_gendisk[i][j]);
350 put_disk(ida_gendisk[i][j]);
351 }
352 blk_cleanup_queue(hba[i]->queue);
353 release_io_mem(hba[i]);
354 free_hba(i);
355 }
356
357 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
358 {
359 int i;
360 ctlr_info_t *tmp_ptr;
361
362 if (pci_get_drvdata(pdev) == NULL) {
363 printk( KERN_ERR "cpqarray: Unable to remove device \n");
364 return;
365 }
366
367 tmp_ptr = pci_get_drvdata(pdev);
368 i = tmp_ptr->ctlr;
369 if (hba[i] == NULL) {
370 printk(KERN_ERR "cpqarray: controller %d appears to have"
371 "already been removed \n", i);
372 return;
373 }
374 pci_set_drvdata(pdev, NULL);
375
376 cpqarray_remove_one(i);
377 }
378
379 /* removing an instance that was not removed automatically..
380 * must be an eisa card.
381 */
382 static void __devexit cpqarray_remove_one_eisa (int i)
383 {
384 if (hba[i] == NULL) {
385 printk(KERN_ERR "cpqarray: controller %d appears to have"
386 "already been removed \n", i);
387 return;
388 }
389 cpqarray_remove_one(i);
390 }
391
392 /* pdev is NULL for eisa */
393 static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
394 {
395 request_queue_t *q;
396 int j;
397
398 /*
399 * register block devices
400 * Find disks and fill in structs
401 * Get an interrupt, set the Q depth and get into /proc
402 */
403
404 /* If this successful it should insure that we are the only */
405 /* instance of the driver */
406 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
407 goto Enomem4;
408 }
409 hba[i]->access.set_intr_mask(hba[i], 0);
410 if (request_irq(hba[i]->intr, do_ida_intr,
411 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
412 {
413 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
414 hba[i]->intr, hba[i]->devname);
415 goto Enomem3;
416 }
417
418 for (j=0; j<NWD; j++) {
419 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
420 if (!ida_gendisk[i][j])
421 goto Enomem2;
422 }
423
424 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
425 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
426 &(hba[i]->cmd_pool_dhandle));
427 hba[i]->cmd_pool_bits = kmalloc(
428 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
429 GFP_KERNEL);
430
431 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
432 goto Enomem1;
433
434 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
435 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
436 printk(KERN_INFO "cpqarray: Finding drives on %s",
437 hba[i]->devname);
438
439 spin_lock_init(&hba[i]->lock);
440 q = blk_init_queue(do_ida_request, &hba[i]->lock);
441 if (!q)
442 goto Enomem1;
443
444 hba[i]->queue = q;
445 q->queuedata = hba[i];
446
447 getgeometry(i);
448 start_fwbk(i);
449
450 ida_procinit(i);
451
452 if (pdev)
453 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
454
455 /* This is a hardware imposed limit. */
456 blk_queue_max_hw_segments(q, SG_MAX);
457
458 /* This is a driver limit and could be eliminated. */
459 blk_queue_max_phys_segments(q, SG_MAX);
460
461 init_timer(&hba[i]->timer);
462 hba[i]->timer.expires = jiffies + IDA_TIMER;
463 hba[i]->timer.data = (unsigned long)hba[i];
464 hba[i]->timer.function = ida_timer;
465 add_timer(&hba[i]->timer);
466
467 /* Enable IRQ now that spinlock and rate limit timer are set up */
468 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
469
470 for(j=0; j<NWD; j++) {
471 struct gendisk *disk = ida_gendisk[i][j];
472 drv_info_t *drv = &hba[i]->drv[j];
473 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
474 disk->major = COMPAQ_SMART2_MAJOR + i;
475 disk->first_minor = j<<NWD_SHIFT;
476 disk->fops = &ida_fops;
477 if (j && !drv->nr_blks)
478 continue;
479 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
480 set_capacity(disk, drv->nr_blks);
481 disk->queue = hba[i]->queue;
482 disk->private_data = drv;
483 add_disk(disk);
484 }
485
486 /* done ! */
487 return(i);
488
489 Enomem1:
490 nr_ctlr = i;
491 kfree(hba[i]->cmd_pool_bits);
492 if (hba[i]->cmd_pool)
493 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
494 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
495 Enomem2:
496 while (j--) {
497 put_disk(ida_gendisk[i][j]);
498 ida_gendisk[i][j] = NULL;
499 }
500 free_irq(hba[i]->intr, hba[i]);
501 Enomem3:
502 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
503 Enomem4:
504 if (pdev)
505 pci_set_drvdata(pdev, NULL);
506 release_io_mem(hba[i]);
507 free_hba(i);
508
509 printk( KERN_ERR "cpqarray: out of memory");
510
511 return -1;
512 }
513
514 static int __init cpqarray_init_one( struct pci_dev *pdev,
515 const struct pci_device_id *ent)
516 {
517 int i;
518
519 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
520 " bus %d dev %d func %d\n",
521 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
522 PCI_FUNC(pdev->devfn));
523 i = alloc_cpqarray_hba();
524 if( i < 0 )
525 return (-1);
526 memset(hba[i], 0, sizeof(ctlr_info_t));
527 sprintf(hba[i]->devname, "ida%d", i);
528 hba[i]->ctlr = i;
529 /* Initialize the pdev driver private data */
530 pci_set_drvdata(pdev, hba[i]);
531
532 if (cpqarray_pci_init(hba[i], pdev) != 0) {
533 pci_set_drvdata(pdev, NULL);
534 release_io_mem(hba[i]);
535 free_hba(i);
536 return -1;
537 }
538
539 return (cpqarray_register_ctlr(i, pdev));
540 }
541
542 static struct pci_driver cpqarray_pci_driver = {
543 .name = "cpqarray",
544 .probe = cpqarray_init_one,
545 .remove = __devexit_p(cpqarray_remove_one_pci),
546 .id_table = cpqarray_pci_device_id,
547 };
548
549 /*
550 * This is it. Find all the controllers and register them.
551 * returns the number of block devices registered.
552 */
553 static int __init cpqarray_init(void)
554 {
555 int num_cntlrs_reg = 0;
556 int i;
557 int rc = 0;
558
559 /* detect controllers */
560 printk(DRIVER_NAME "\n");
561
562 rc = pci_register_driver(&cpqarray_pci_driver);
563 if (rc)
564 return rc;
565 cpqarray_eisa_detect();
566
567 for (i=0; i < MAX_CTLR; i++) {
568 if (hba[i] != NULL)
569 num_cntlrs_reg++;
570 }
571
572 return(num_cntlrs_reg);
573 }
574
575 /* Function to find the first free pointer into our hba[] array */
576 /* Returns -1 if no free entries are left. */
577 static int alloc_cpqarray_hba(void)
578 {
579 int i;
580
581 for(i=0; i< MAX_CTLR; i++) {
582 if (hba[i] == NULL) {
583 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
584 if(hba[i]==NULL) {
585 printk(KERN_ERR "cpqarray: out of memory.\n");
586 return (-1);
587 }
588 return (i);
589 }
590 }
591 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
592 " of 8 controllers.\n");
593 return(-1);
594 }
595
596 static void free_hba(int i)
597 {
598 kfree(hba[i]);
599 hba[i]=NULL;
600 }
601
602 /*
603 * Find the IO address of the controller, its IRQ and so forth. Fill
604 * in some basic stuff into the ctlr_info_t structure.
605 */
606 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
607 {
608 ushort vendor_id, device_id, command;
609 unchar cache_line_size, latency_timer;
610 unchar irq, revision;
611 unsigned long addr[6];
612 __u32 board_id;
613
614 int i;
615
616 c->pci_dev = pdev;
617 if (pci_enable_device(pdev)) {
618 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
619 return -1;
620 }
621 vendor_id = pdev->vendor;
622 device_id = pdev->device;
623 irq = pdev->irq;
624
625 for(i=0; i<6; i++)
626 addr[i] = pci_resource_start(pdev, i);
627
628 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
629 {
630 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
631 return -1;
632 }
633
634 pci_read_config_word(pdev, PCI_COMMAND, &command);
635 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
636 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
637 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
638
639 pci_read_config_dword(pdev, 0x2c, &board_id);
640
641 /* check to see if controller has been disabled */
642 if(!(command & 0x02)) {
643 printk(KERN_WARNING
644 "cpqarray: controller appears to be disabled\n");
645 return(-1);
646 }
647
648 DBGINFO(
649 printk("vendor_id = %x\n", vendor_id);
650 printk("device_id = %x\n", device_id);
651 printk("command = %x\n", command);
652 for(i=0; i<6; i++)
653 printk("addr[%d] = %lx\n", i, addr[i]);
654 printk("revision = %x\n", revision);
655 printk("irq = %x\n", irq);
656 printk("cache_line_size = %x\n", cache_line_size);
657 printk("latency_timer = %x\n", latency_timer);
658 printk("board_id = %x\n", board_id);
659 );
660
661 c->intr = irq;
662
663 for(i=0; i<6; i++) {
664 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
665 { /* IO space */
666 c->io_mem_addr = addr[i];
667 c->io_mem_length = pci_resource_end(pdev, i)
668 - pci_resource_start(pdev, i) + 1;
669 if(!request_region( c->io_mem_addr, c->io_mem_length,
670 "cpqarray"))
671 {
672 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
673 c->io_mem_addr = 0;
674 c->io_mem_length = 0;
675 }
676 break;
677 }
678 }
679
680 c->paddr = 0;
681 for(i=0; i<6; i++)
682 if (!(pci_resource_flags(pdev, i) &
683 PCI_BASE_ADDRESS_SPACE_IO)) {
684 c->paddr = pci_resource_start (pdev, i);
685 break;
686 }
687 if (!c->paddr)
688 return -1;
689 c->vaddr = remap_pci_mem(c->paddr, 128);
690 if (!c->vaddr)
691 return -1;
692 c->board_id = board_id;
693
694 for(i=0; i<NR_PRODUCTS; i++) {
695 if (board_id == products[i].board_id) {
696 c->product_name = products[i].product_name;
697 c->access = *(products[i].access);
698 break;
699 }
700 }
701 if (i == NR_PRODUCTS) {
702 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
703 " to access the SMART Array controller %08lx\n",
704 (unsigned long)board_id);
705 return -1;
706 }
707
708 return 0;
709 }
710
711 /*
712 * Map (physical) PCI mem into (virtual) kernel space
713 */
714 static void __iomem *remap_pci_mem(ulong base, ulong size)
715 {
716 ulong page_base = ((ulong) base) & PAGE_MASK;
717 ulong page_offs = ((ulong) base) - page_base;
718 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
719
720 return (page_remapped ? (page_remapped + page_offs) : NULL);
721 }
722
723 #ifndef MODULE
724 /*
725 * Config string is a comma separated set of i/o addresses of EISA cards.
726 */
727 static int cpqarray_setup(char *str)
728 {
729 int i, ints[9];
730
731 (void)get_options(str, ARRAY_SIZE(ints), ints);
732
733 for(i=0; i<ints[0] && i<8; i++)
734 eisa[i] = ints[i+1];
735 return 1;
736 }
737
738 __setup("smart2=", cpqarray_setup);
739
740 #endif
741
742 /*
743 * Find an EISA controller's signature. Set up an hba if we find it.
744 */
745 static int __init cpqarray_eisa_detect(void)
746 {
747 int i=0, j;
748 __u32 board_id;
749 int intr;
750 int ctlr;
751 int num_ctlr = 0;
752
753 while(i<8 && eisa[i]) {
754 ctlr = alloc_cpqarray_hba();
755 if(ctlr == -1)
756 break;
757 board_id = inl(eisa[i]+0xC80);
758 for(j=0; j < NR_PRODUCTS; j++)
759 if (board_id == products[j].board_id)
760 break;
761
762 if (j == NR_PRODUCTS) {
763 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
764 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
765 continue;
766 }
767
768 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
769 hba[ctlr]->io_mem_addr = eisa[i];
770 hba[ctlr]->io_mem_length = 0x7FF;
771 if(!request_region(hba[ctlr]->io_mem_addr,
772 hba[ctlr]->io_mem_length,
773 "cpqarray"))
774 {
775 printk(KERN_WARNING "cpqarray: I/O range already in "
776 "use addr = %lx length = %ld\n",
777 hba[ctlr]->io_mem_addr,
778 hba[ctlr]->io_mem_length);
779 free_hba(ctlr);
780 continue;
781 }
782
783 /*
784 * Read the config register to find our interrupt
785 */
786 intr = inb(eisa[i]+0xCC0) >> 4;
787 if (intr & 1) intr = 11;
788 else if (intr & 2) intr = 10;
789 else if (intr & 4) intr = 14;
790 else if (intr & 8) intr = 15;
791
792 hba[ctlr]->intr = intr;
793 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
794 hba[ctlr]->product_name = products[j].product_name;
795 hba[ctlr]->access = *(products[j].access);
796 hba[ctlr]->ctlr = ctlr;
797 hba[ctlr]->board_id = board_id;
798 hba[ctlr]->pci_dev = NULL; /* not PCI */
799
800 DBGINFO(
801 printk("i = %d, j = %d\n", i, j);
802 printk("irq = %x\n", intr);
803 printk("product name = %s\n", products[j].product_name);
804 printk("board_id = %x\n", board_id);
805 );
806
807 num_ctlr++;
808 i++;
809
810 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
811 printk(KERN_WARNING
812 "cpqarray: Can't register EISA controller %d\n",
813 ctlr);
814
815 }
816
817 return num_ctlr;
818 }
819
820 /*
821 * Open. Make sure the device is really there.
822 */
823 static int ida_open(struct inode *inode, struct file *filep)
824 {
825 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
826 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
827
828 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
829 /*
830 * Root is allowed to open raw volume zero even if it's not configured
831 * so array config can still work. I don't think I really like this,
832 * but I'm already using way to many device nodes to claim another one
833 * for "raw controller".
834 */
835 if (!drv->nr_blks) {
836 if (!capable(CAP_SYS_RAWIO))
837 return -ENXIO;
838 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
839 return -ENXIO;
840 }
841 host->usage_count++;
842 return 0;
843 }
844
845 /*
846 * Close. Sync first.
847 */
848 static int ida_release(struct inode *inode, struct file *filep)
849 {
850 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
851 host->usage_count--;
852 return 0;
853 }
854
855 /*
856 * Enqueuing and dequeuing functions for cmdlists.
857 */
858 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
859 {
860 if (*Qptr == NULL) {
861 *Qptr = c;
862 c->next = c->prev = c;
863 } else {
864 c->prev = (*Qptr)->prev;
865 c->next = (*Qptr);
866 (*Qptr)->prev->next = c;
867 (*Qptr)->prev = c;
868 }
869 }
870
871 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
872 {
873 if (c && c->next != c) {
874 if (*Qptr == c) *Qptr = c->next;
875 c->prev->next = c->next;
876 c->next->prev = c->prev;
877 } else {
878 *Qptr = NULL;
879 }
880 return c;
881 }
882
883 /*
884 * Get a request and submit it to the controller.
885 * This routine needs to grab all the requests it possibly can from the
886 * req Q and submit them. Interrupts are off (and need to be off) when you
887 * are in here (either via the dummy do_ida_request functions or by being
888 * called from the interrupt handler
889 */
890 static void do_ida_request(request_queue_t *q)
891 {
892 ctlr_info_t *h = q->queuedata;
893 cmdlist_t *c;
894 struct request *creq;
895 struct scatterlist tmp_sg[SG_MAX];
896 int i, dir, seg;
897
898 if (blk_queue_plugged(q))
899 goto startio;
900
901 queue_next:
902 creq = elv_next_request(q);
903 if (!creq)
904 goto startio;
905
906 BUG_ON(creq->nr_phys_segments > SG_MAX);
907
908 if ((c = cmd_alloc(h,1)) == NULL)
909 goto startio;
910
911 blkdev_dequeue_request(creq);
912
913 c->ctlr = h->ctlr;
914 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
915 c->hdr.size = sizeof(rblk_t) >> 2;
916 c->size += sizeof(rblk_t);
917
918 c->req.hdr.blk = creq->sector;
919 c->rq = creq;
920 DBGPX(
921 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
922 );
923 seg = blk_rq_map_sg(q, creq, tmp_sg);
924
925 /* Now do all the DMA Mappings */
926 if (rq_data_dir(creq) == READ)
927 dir = PCI_DMA_FROMDEVICE;
928 else
929 dir = PCI_DMA_TODEVICE;
930 for( i=0; i < seg; i++)
931 {
932 c->req.sg[i].size = tmp_sg[i].length;
933 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
934 tmp_sg[i].page,
935 tmp_sg[i].offset,
936 tmp_sg[i].length, dir);
937 }
938 DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
939 c->req.hdr.sg_cnt = seg;
940 c->req.hdr.blk_cnt = creq->nr_sectors;
941 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
942 c->type = CMD_RWREQ;
943
944 /* Put the request on the tail of the request queue */
945 addQ(&h->reqQ, c);
946 h->Qdepth++;
947 if (h->Qdepth > h->maxQsinceinit)
948 h->maxQsinceinit = h->Qdepth;
949
950 goto queue_next;
951
952 startio:
953 start_io(h);
954 }
955
956 /*
957 * start_io submits everything on a controller's request queue
958 * and moves it to the completion queue.
959 *
960 * Interrupts had better be off if you're in here
961 */
962 static void start_io(ctlr_info_t *h)
963 {
964 cmdlist_t *c;
965
966 while((c = h->reqQ) != NULL) {
967 /* Can't do anything if we're busy */
968 if (h->access.fifo_full(h) == 0)
969 return;
970
971 /* Get the first entry from the request Q */
972 removeQ(&h->reqQ, c);
973 h->Qdepth--;
974
975 /* Tell the controller to do our bidding */
976 h->access.submit_command(h, c);
977
978 /* Get onto the completion Q */
979 addQ(&h->cmpQ, c);
980 }
981 }
982
983 static inline void complete_buffers(struct bio *bio, int ok)
984 {
985 struct bio *xbh;
986 while(bio) {
987 int nr_sectors = bio_sectors(bio);
988
989 xbh = bio->bi_next;
990 bio->bi_next = NULL;
991
992 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
993
994 bio = xbh;
995 }
996 }
997 /*
998 * Mark all buffers that cmd was responsible for
999 */
1000 static inline void complete_command(cmdlist_t *cmd, int timeout)
1001 {
1002 int ok=1;
1003 int i, ddir;
1004
1005 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1006 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1007 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1008 cmd->ctlr, cmd->hdr.unit);
1009 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1010 }
1011 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1012 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1013 cmd->ctlr, cmd->hdr.unit);
1014 ok = 0;
1015 }
1016 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1017 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1018 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1019 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1020 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1021 ok = 0;
1022 }
1023 if (timeout) ok = 0;
1024 /* unmap the DMA mapping for all the scatter gather elements */
1025 if (cmd->req.hdr.cmd == IDA_READ)
1026 ddir = PCI_DMA_FROMDEVICE;
1027 else
1028 ddir = PCI_DMA_TODEVICE;
1029 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1030 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1031 cmd->req.sg[i].size, ddir);
1032
1033 complete_buffers(cmd->rq->bio, ok);
1034
1035 add_disk_randomness(cmd->rq->rq_disk);
1036
1037 DBGPX(printk("Done with %p\n", cmd->rq););
1038 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1039 }
1040
1041 /*
1042 * The controller will interrupt us upon completion of commands.
1043 * Find the command on the completion queue, remove it, tell the OS and
1044 * try to queue up more IO
1045 */
1046 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1047 {
1048 ctlr_info_t *h = dev_id;
1049 cmdlist_t *c;
1050 unsigned long istat;
1051 unsigned long flags;
1052 __u32 a,a1;
1053
1054 istat = h->access.intr_pending(h);
1055 /* Is this interrupt for us? */
1056 if (istat == 0)
1057 return IRQ_NONE;
1058
1059 /*
1060 * If there are completed commands in the completion queue,
1061 * we had better do something about it.
1062 */
1063 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1064 if (istat & FIFO_NOT_EMPTY) {
1065 while((a = h->access.command_completed(h))) {
1066 a1 = a; a &= ~3;
1067 if ((c = h->cmpQ) == NULL)
1068 {
1069 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1070 continue;
1071 }
1072 while(c->busaddr != a) {
1073 c = c->next;
1074 if (c == h->cmpQ)
1075 break;
1076 }
1077 /*
1078 * If we've found the command, take it off the
1079 * completion Q and free it
1080 */
1081 if (c->busaddr == a) {
1082 removeQ(&h->cmpQ, c);
1083 /* Check for invalid command.
1084 * Controller returns command error,
1085 * But rcode = 0.
1086 */
1087
1088 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1089 {
1090 c->req.hdr.rcode = RCODE_INVREQ;
1091 }
1092 if (c->type == CMD_RWREQ) {
1093 complete_command(c, 0);
1094 cmd_free(h, c, 1);
1095 } else if (c->type == CMD_IOCTL_PEND) {
1096 c->type = CMD_IOCTL_DONE;
1097 }
1098 continue;
1099 }
1100 }
1101 }
1102
1103 /*
1104 * See if we can queue up some more IO
1105 */
1106 do_ida_request(h->queue);
1107 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1108 return IRQ_HANDLED;
1109 }
1110
1111 /*
1112 * This timer was for timing out requests that haven't happened after
1113 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1114 * reset a flags structure so we don't flood the user with
1115 * "Non-Fatal error" messages.
1116 */
1117 static void ida_timer(unsigned long tdata)
1118 {
1119 ctlr_info_t *h = (ctlr_info_t*)tdata;
1120
1121 h->timer.expires = jiffies + IDA_TIMER;
1122 add_timer(&h->timer);
1123 h->misc_tflags = 0;
1124 }
1125
1126 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1127 {
1128 drv_info_t *drv = get_drv(bdev->bd_disk);
1129
1130 if (drv->cylinders) {
1131 geo->heads = drv->heads;
1132 geo->sectors = drv->sectors;
1133 geo->cylinders = drv->cylinders;
1134 } else {
1135 geo->heads = 0xff;
1136 geo->sectors = 0x3f;
1137 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1138 }
1139
1140 return 0;
1141 }
1142
1143 /*
1144 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1145 * setting readahead and submitting commands from userspace to the controller.
1146 */
1147 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1148 {
1149 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1150 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1151 int error;
1152 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1153 ida_ioctl_t *my_io;
1154
1155 switch(cmd) {
1156 case IDAGETDRVINFO:
1157 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1158 return -EFAULT;
1159 return 0;
1160 case IDAPASSTHRU:
1161 if (!capable(CAP_SYS_RAWIO))
1162 return -EPERM;
1163 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1164 if (!my_io)
1165 return -ENOMEM;
1166 error = -EFAULT;
1167 if (copy_from_user(my_io, io, sizeof(*my_io)))
1168 goto out_passthru;
1169 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1170 if (error)
1171 goto out_passthru;
1172 error = -EFAULT;
1173 if (copy_to_user(io, my_io, sizeof(*my_io)))
1174 goto out_passthru;
1175 error = 0;
1176 out_passthru:
1177 kfree(my_io);
1178 return error;
1179 case IDAGETCTLRSIG:
1180 if (!arg) return -EINVAL;
1181 put_user(host->ctlr_sig, (int __user *)arg);
1182 return 0;
1183 case IDAREVALIDATEVOLS:
1184 if (iminor(inode) != 0)
1185 return -ENXIO;
1186 return revalidate_allvol(host);
1187 case IDADRIVERVERSION:
1188 if (!arg) return -EINVAL;
1189 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1190 return 0;
1191 case IDAGETPCIINFO:
1192 {
1193
1194 ida_pci_info_struct pciinfo;
1195
1196 if (!arg) return -EINVAL;
1197 pciinfo.bus = host->pci_dev->bus->number;
1198 pciinfo.dev_fn = host->pci_dev->devfn;
1199 pciinfo.board_id = host->board_id;
1200 if(copy_to_user((void __user *) arg, &pciinfo,
1201 sizeof( ida_pci_info_struct)))
1202 return -EFAULT;
1203 return(0);
1204 }
1205
1206 default:
1207 return -EINVAL;
1208 }
1209
1210 }
1211 /*
1212 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1213 * The command block (io) has already been copied to kernel space for us,
1214 * however, any elements in the sglist need to be copied to kernel space
1215 * or copied back to userspace.
1216 *
1217 * Only root may perform a controller passthru command, however I'm not doing
1218 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1219 * putting a 64M buffer in the sglist is probably a *bad* idea.
1220 */
1221 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1222 {
1223 int ctlr = h->ctlr;
1224 cmdlist_t *c;
1225 void *p = NULL;
1226 unsigned long flags;
1227 int error;
1228
1229 if ((c = cmd_alloc(h, 0)) == NULL)
1230 return -ENOMEM;
1231 c->ctlr = ctlr;
1232 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1233 c->hdr.size = sizeof(rblk_t) >> 2;
1234 c->size += sizeof(rblk_t);
1235
1236 c->req.hdr.cmd = io->cmd;
1237 c->req.hdr.blk = io->blk;
1238 c->req.hdr.blk_cnt = io->blk_cnt;
1239 c->type = CMD_IOCTL_PEND;
1240
1241 /* Pre submit processing */
1242 switch(io->cmd) {
1243 case PASSTHRU_A:
1244 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1245 if (!p)
1246 {
1247 error = -ENOMEM;
1248 cmd_free(h, c, 0);
1249 return(error);
1250 }
1251 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1252 kfree(p);
1253 cmd_free(h, c, 0);
1254 return -EFAULT;
1255 }
1256 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1257 sizeof(ida_ioctl_t),
1258 PCI_DMA_BIDIRECTIONAL);
1259 c->req.sg[0].size = io->sg[0].size;
1260 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1261 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1262 c->req.hdr.sg_cnt = 1;
1263 break;
1264 case IDA_READ:
1265 case READ_FLASH_ROM:
1266 case SENSE_CONTROLLER_PERFORMANCE:
1267 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1268 if (!p)
1269 {
1270 error = -ENOMEM;
1271 cmd_free(h, c, 0);
1272 return(error);
1273 }
1274
1275 c->req.sg[0].size = io->sg[0].size;
1276 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1277 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1278 c->req.hdr.sg_cnt = 1;
1279 break;
1280 case IDA_WRITE:
1281 case IDA_WRITE_MEDIA:
1282 case DIAG_PASS_THRU:
1283 case COLLECT_BUFFER:
1284 case WRITE_FLASH_ROM:
1285 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1286 if (!p)
1287 {
1288 error = -ENOMEM;
1289 cmd_free(h, c, 0);
1290 return(error);
1291 }
1292 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1293 kfree(p);
1294 cmd_free(h, c, 0);
1295 return -EFAULT;
1296 }
1297 c->req.sg[0].size = io->sg[0].size;
1298 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1299 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1300 c->req.hdr.sg_cnt = 1;
1301 break;
1302 default:
1303 c->req.sg[0].size = sizeof(io->c);
1304 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1305 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1306 c->req.hdr.sg_cnt = 1;
1307 }
1308
1309 /* Put the request on the tail of the request queue */
1310 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1311 addQ(&h->reqQ, c);
1312 h->Qdepth++;
1313 start_io(h);
1314 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1315
1316 /* Wait for completion */
1317 while(c->type != CMD_IOCTL_DONE)
1318 schedule();
1319
1320 /* Unmap the DMA */
1321 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1322 PCI_DMA_BIDIRECTIONAL);
1323 /* Post submit processing */
1324 switch(io->cmd) {
1325 case PASSTHRU_A:
1326 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1327 sizeof(ida_ioctl_t),
1328 PCI_DMA_BIDIRECTIONAL);
1329 case IDA_READ:
1330 case DIAG_PASS_THRU:
1331 case SENSE_CONTROLLER_PERFORMANCE:
1332 case READ_FLASH_ROM:
1333 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1334 kfree(p);
1335 return -EFAULT;
1336 }
1337 /* fall through and free p */
1338 case IDA_WRITE:
1339 case IDA_WRITE_MEDIA:
1340 case COLLECT_BUFFER:
1341 case WRITE_FLASH_ROM:
1342 kfree(p);
1343 break;
1344 default:;
1345 /* Nothing to do */
1346 }
1347
1348 io->rcode = c->req.hdr.rcode;
1349 cmd_free(h, c, 0);
1350 return(0);
1351 }
1352
1353 /*
1354 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1355 * scheme to suballocte them to the driver. Operations that are not time
1356 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1357 * as the first argument to get a new command.
1358 */
1359 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1360 {
1361 cmdlist_t * c;
1362 int i;
1363 dma_addr_t cmd_dhandle;
1364
1365 if (!get_from_pool) {
1366 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1367 sizeof(cmdlist_t), &cmd_dhandle);
1368 if(c==NULL)
1369 return NULL;
1370 } else {
1371 do {
1372 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1373 if (i == NR_CMDS)
1374 return NULL;
1375 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1376 c = h->cmd_pool + i;
1377 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1378 h->nr_allocs++;
1379 }
1380
1381 memset(c, 0, sizeof(cmdlist_t));
1382 c->busaddr = cmd_dhandle;
1383 return c;
1384 }
1385
1386 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1387 {
1388 int i;
1389
1390 if (!got_from_pool) {
1391 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1392 c->busaddr);
1393 } else {
1394 i = c - h->cmd_pool;
1395 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1396 h->nr_frees++;
1397 }
1398 }
1399
1400 /***********************************************************************
1401 name: sendcmd
1402 Send a command to an IDA using the memory mapped FIFO interface
1403 and wait for it to complete.
1404 This routine should only be called at init time.
1405 ***********************************************************************/
1406 static int sendcmd(
1407 __u8 cmd,
1408 int ctlr,
1409 void *buff,
1410 size_t size,
1411 unsigned int blk,
1412 unsigned int blkcnt,
1413 unsigned int log_unit )
1414 {
1415 cmdlist_t *c;
1416 int complete;
1417 unsigned long temp;
1418 unsigned long i;
1419 ctlr_info_t *info_p = hba[ctlr];
1420
1421 c = cmd_alloc(info_p, 1);
1422 if(!c)
1423 return IO_ERROR;
1424 c->ctlr = ctlr;
1425 c->hdr.unit = log_unit;
1426 c->hdr.prio = 0;
1427 c->hdr.size = sizeof(rblk_t) >> 2;
1428 c->size += sizeof(rblk_t);
1429
1430 /* The request information. */
1431 c->req.hdr.next = 0;
1432 c->req.hdr.rcode = 0;
1433 c->req.bp = 0;
1434 c->req.hdr.sg_cnt = 1;
1435 c->req.hdr.reserved = 0;
1436
1437 if (size == 0)
1438 c->req.sg[0].size = 512;
1439 else
1440 c->req.sg[0].size = size;
1441
1442 c->req.hdr.blk = blk;
1443 c->req.hdr.blk_cnt = blkcnt;
1444 c->req.hdr.cmd = (unsigned char) cmd;
1445 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1446 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1447 /*
1448 * Disable interrupt
1449 */
1450 info_p->access.set_intr_mask(info_p, 0);
1451 /* Make sure there is room in the command FIFO */
1452 /* Actually it should be completely empty at this time. */
1453 for (i = 200000; i > 0; i--) {
1454 temp = info_p->access.fifo_full(info_p);
1455 if (temp != 0) {
1456 break;
1457 }
1458 udelay(10);
1459 DBG(
1460 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1461 " waiting!\n", ctlr);
1462 );
1463 }
1464 /*
1465 * Send the cmd
1466 */
1467 info_p->access.submit_command(info_p, c);
1468 complete = pollcomplete(ctlr);
1469
1470 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1471 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1472 if (complete != 1) {
1473 if (complete != c->busaddr) {
1474 printk( KERN_WARNING
1475 "cpqarray ida%d: idaSendPciCmd "
1476 "Invalid command list address returned! (%08lx)\n",
1477 ctlr, (unsigned long)complete);
1478 cmd_free(info_p, c, 1);
1479 return (IO_ERROR);
1480 }
1481 } else {
1482 printk( KERN_WARNING
1483 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1484 "No command list address returned!\n",
1485 ctlr);
1486 cmd_free(info_p, c, 1);
1487 return (IO_ERROR);
1488 }
1489
1490 if (c->req.hdr.rcode & 0x00FE) {
1491 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1492 printk( KERN_WARNING
1493 "cpqarray ida%d: idaSendPciCmd, error: "
1494 "Controller failed at init time "
1495 "cmd: 0x%x, return code = 0x%x\n",
1496 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1497
1498 cmd_free(info_p, c, 1);
1499 return (IO_ERROR);
1500 }
1501 }
1502 cmd_free(info_p, c, 1);
1503 return (IO_OK);
1504 }
1505
1506 /*
1507 * revalidate_allvol is for online array config utilities. After a
1508 * utility reconfigures the drives in the array, it can use this function
1509 * (through an ioctl) to make the driver zap any previous disk structs for
1510 * that controller and get new ones.
1511 *
1512 * Right now I'm using the getgeometry() function to do this, but this
1513 * function should probably be finer grained and allow you to revalidate one
1514 * particualar logical volume (instead of all of them on a particular
1515 * controller).
1516 */
1517 static int revalidate_allvol(ctlr_info_t *host)
1518 {
1519 int ctlr = host->ctlr;
1520 int i;
1521 unsigned long flags;
1522
1523 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1524 if (host->usage_count > 1) {
1525 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1526 printk(KERN_WARNING "cpqarray: Device busy for volume"
1527 " revalidation (usage=%d)\n", host->usage_count);
1528 return -EBUSY;
1529 }
1530 host->usage_count++;
1531 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1532
1533 /*
1534 * Set the partition and block size structures for all volumes
1535 * on this controller to zero. We will reread all of this data
1536 */
1537 set_capacity(ida_gendisk[ctlr][0], 0);
1538 for (i = 1; i < NWD; i++) {
1539 struct gendisk *disk = ida_gendisk[ctlr][i];
1540 if (disk->flags & GENHD_FL_UP)
1541 del_gendisk(disk);
1542 }
1543 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1544
1545 /*
1546 * Tell the array controller not to give us any interrupts while
1547 * we check the new geometry. Then turn interrupts back on when
1548 * we're done.
1549 */
1550 host->access.set_intr_mask(host, 0);
1551 getgeometry(ctlr);
1552 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1553
1554 for(i=0; i<NWD; i++) {
1555 struct gendisk *disk = ida_gendisk[ctlr][i];
1556 drv_info_t *drv = &host->drv[i];
1557 if (i && !drv->nr_blks)
1558 continue;
1559 blk_queue_hardsect_size(host->queue, drv->blk_size);
1560 set_capacity(disk, drv->nr_blks);
1561 disk->queue = host->queue;
1562 disk->private_data = drv;
1563 if (i)
1564 add_disk(disk);
1565 }
1566
1567 host->usage_count--;
1568 return 0;
1569 }
1570
1571 static int ida_revalidate(struct gendisk *disk)
1572 {
1573 drv_info_t *drv = disk->private_data;
1574 set_capacity(disk, drv->nr_blks);
1575 return 0;
1576 }
1577
1578 /********************************************************************
1579 name: pollcomplete
1580 Wait polling for a command to complete.
1581 The memory mapped FIFO is polled for the completion.
1582 Used only at init time, interrupts disabled.
1583 ********************************************************************/
1584 static int pollcomplete(int ctlr)
1585 {
1586 int done;
1587 int i;
1588
1589 /* Wait (up to 2 seconds) for a command to complete */
1590
1591 for (i = 200000; i > 0; i--) {
1592 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1593 if (done == 0) {
1594 udelay(10); /* a short fixed delay */
1595 } else
1596 return (done);
1597 }
1598 /* Invalid address to tell caller we ran out of time */
1599 return 1;
1600 }
1601 /*****************************************************************
1602 start_fwbk
1603 Starts controller firmwares background processing.
1604 Currently only the Integrated Raid controller needs this done.
1605 If the PCI mem address registers are written to after this,
1606 data corruption may occur
1607 *****************************************************************/
1608 static void start_fwbk(int ctlr)
1609 {
1610 id_ctlr_t *id_ctlr_buf;
1611 int ret_code;
1612
1613 if( (hba[ctlr]->board_id != 0x40400E11)
1614 && (hba[ctlr]->board_id != 0x40480E11) )
1615
1616 /* Not a Integrated Raid, so there is nothing for us to do */
1617 return;
1618 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1619 " processing\n");
1620 /* Command does not return anything, but idasend command needs a
1621 buffer */
1622 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1623 if(id_ctlr_buf==NULL)
1624 {
1625 printk(KERN_WARNING "cpqarray: Out of memory. "
1626 "Unable to start background processing.\n");
1627 return;
1628 }
1629 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1630 id_ctlr_buf, 0, 0, 0, 0);
1631 if(ret_code != IO_OK)
1632 printk(KERN_WARNING "cpqarray: Unable to start"
1633 " background processing\n");
1634
1635 kfree(id_ctlr_buf);
1636 }
1637 /*****************************************************************
1638 getgeometry
1639 Get ida logical volume geometry from the controller
1640 This is a large bit of code which once existed in two flavors,
1641 It is used only at init time.
1642 *****************************************************************/
1643 static void getgeometry(int ctlr)
1644 {
1645 id_log_drv_t *id_ldrive;
1646 id_ctlr_t *id_ctlr_buf;
1647 sense_log_drv_stat_t *id_lstatus_buf;
1648 config_t *sense_config_buf;
1649 unsigned int log_unit, log_index;
1650 int ret_code, size;
1651 drv_info_t *drv;
1652 ctlr_info_t *info_p = hba[ctlr];
1653 int i;
1654
1655 info_p->log_drv_map = 0;
1656
1657 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1658 if(id_ldrive == NULL)
1659 {
1660 printk( KERN_ERR "cpqarray: out of memory.\n");
1661 return;
1662 }
1663
1664 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1665 if(id_ctlr_buf == NULL)
1666 {
1667 kfree(id_ldrive);
1668 printk( KERN_ERR "cpqarray: out of memory.\n");
1669 return;
1670 }
1671
1672 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1673 if(id_lstatus_buf == NULL)
1674 {
1675 kfree(id_ctlr_buf);
1676 kfree(id_ldrive);
1677 printk( KERN_ERR "cpqarray: out of memory.\n");
1678 return;
1679 }
1680
1681 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1682 if(sense_config_buf == NULL)
1683 {
1684 kfree(id_lstatus_buf);
1685 kfree(id_ctlr_buf);
1686 kfree(id_ldrive);
1687 printk( KERN_ERR "cpqarray: out of memory.\n");
1688 return;
1689 }
1690
1691 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1692 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1693 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1694 memset(sense_config_buf, 0, sizeof(config_t));
1695
1696 info_p->phys_drives = 0;
1697 info_p->log_drv_map = 0;
1698 info_p->drv_assign_map = 0;
1699 info_p->drv_spare_map = 0;
1700 info_p->mp_failed_drv_map = 0; /* only initialized here */
1701 /* Get controllers info for this logical drive */
1702 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1703 if (ret_code == IO_ERROR) {
1704 /*
1705 * If can't get controller info, set the logical drive map to 0,
1706 * so the idastubopen will fail on all logical drives
1707 * on the controller.
1708 */
1709 /* Free all the buffers and return */
1710 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1711 kfree(sense_config_buf);
1712 kfree(id_lstatus_buf);
1713 kfree(id_ctlr_buf);
1714 kfree(id_ldrive);
1715 return;
1716 }
1717
1718 info_p->log_drives = id_ctlr_buf->nr_drvs;
1719 for(i=0;i<4;i++)
1720 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1721 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1722
1723 printk(" (%s)\n", info_p->product_name);
1724 /*
1725 * Initialize logical drive map to zero
1726 */
1727 log_index = 0;
1728 /*
1729 * Get drive geometry for all logical drives
1730 */
1731 if (id_ctlr_buf->nr_drvs > 16)
1732 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1733 "16 logical drives per controller.\n. "
1734 " Additional drives will not be "
1735 "detected\n", ctlr);
1736
1737 for (log_unit = 0;
1738 (log_index < id_ctlr_buf->nr_drvs)
1739 && (log_unit < NWD);
1740 log_unit++) {
1741 size = sizeof(sense_log_drv_stat_t);
1742
1743 /*
1744 Send "Identify logical drive status" cmd
1745 */
1746 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1747 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1748 if (ret_code == IO_ERROR) {
1749 /*
1750 If can't get logical drive status, set
1751 the logical drive map to 0, so the
1752 idastubopen will fail for all logical drives
1753 on the controller.
1754 */
1755 info_p->log_drv_map = 0;
1756 printk( KERN_WARNING
1757 "cpqarray ida%d: idaGetGeometry - Controller"
1758 " failed to report status of logical drive %d\n"
1759 "Access to this controller has been disabled\n",
1760 ctlr, log_unit);
1761 /* Free all the buffers and return */
1762 kfree(sense_config_buf);
1763 kfree(id_lstatus_buf);
1764 kfree(id_ctlr_buf);
1765 kfree(id_ldrive);
1766 return;
1767 }
1768 /*
1769 Make sure the logical drive is configured
1770 */
1771 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1772 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1773 sizeof(id_log_drv_t), 0, 0, log_unit);
1774 /*
1775 If error, the bit for this
1776 logical drive won't be set and
1777 idastubopen will return error.
1778 */
1779 if (ret_code != IO_ERROR) {
1780 drv = &info_p->drv[log_unit];
1781 drv->blk_size = id_ldrive->blk_size;
1782 drv->nr_blks = id_ldrive->nr_blks;
1783 drv->cylinders = id_ldrive->drv.cyl;
1784 drv->heads = id_ldrive->drv.heads;
1785 drv->sectors = id_ldrive->drv.sect_per_track;
1786 info_p->log_drv_map |= (1 << log_unit);
1787
1788 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1789 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1790 ret_code = sendcmd(SENSE_CONFIG,
1791 ctlr, sense_config_buf,
1792 sizeof(config_t), 0, 0, log_unit);
1793 if (ret_code == IO_ERROR) {
1794 info_p->log_drv_map = 0;
1795 /* Free all the buffers and return */
1796 printk(KERN_ERR "cpqarray: error sending sense config\n");
1797 kfree(sense_config_buf);
1798 kfree(id_lstatus_buf);
1799 kfree(id_ctlr_buf);
1800 kfree(id_ldrive);
1801 return;
1802
1803 }
1804
1805 info_p->phys_drives =
1806 sense_config_buf->ctlr_phys_drv;
1807 info_p->drv_assign_map
1808 |= sense_config_buf->drv_asgn_map;
1809 info_p->drv_assign_map
1810 |= sense_config_buf->spare_asgn_map;
1811 info_p->drv_spare_map
1812 |= sense_config_buf->spare_asgn_map;
1813 } /* end of if no error on id_ldrive */
1814 log_index = log_index + 1;
1815 } /* end of if logical drive configured */
1816 } /* end of for log_unit */
1817 kfree(sense_config_buf);
1818 kfree(id_ldrive);
1819 kfree(id_lstatus_buf);
1820 kfree(id_ctlr_buf);
1821 return;
1822
1823 }
1824
1825 static void __exit cpqarray_exit(void)
1826 {
1827 int i;
1828
1829 pci_unregister_driver(&cpqarray_pci_driver);
1830
1831 /* Double check that all controller entries have been removed */
1832 for(i=0; i<MAX_CTLR; i++) {
1833 if (hba[i] != NULL) {
1834 printk(KERN_WARNING "cpqarray: Removing EISA "
1835 "controller %d\n", i);
1836 cpqarray_remove_one_eisa(i);
1837 }
1838 }
1839
1840 remove_proc_entry("cpqarray", proc_root_driver);
1841 }
1842
1843 module_init(cpqarray_init)
1844 module_exit(cpqarray_exit)