]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/block/cpqarray.c
[BLOCK] Get rid of request_queue_t typedef
[mirror_ubuntu-bionic-kernel.git] / drivers / block / cpqarray.c
1 /*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/bio.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/blkpg.h>
33 #include <linux/timer.h>
34 #include <linux/proc_fs.h>
35 #include <linux/init.h>
36 #include <linux/hdreg.h>
37 #include <linux/spinlock.h>
38 #include <linux/blkdev.h>
39 #include <linux/genhd.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42
43
44 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
45
46 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
47 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
48
49 /* Embedded module documentation macros - see modules.h */
50 /* Original author Chris Frantz - Compaq Computer Corporation */
51 MODULE_AUTHOR("Compaq Computer Corporation");
52 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
53 MODULE_LICENSE("GPL");
54
55 #include "cpqarray.h"
56 #include "ida_cmd.h"
57 #include "smart1,2.h"
58 #include "ida_ioctl.h"
59
60 #define READ_AHEAD 128
61 #define NR_CMDS 128 /* This could probably go as high as ~400 */
62
63 #define MAX_CTLR 8
64 #define CTLR_SHIFT 8
65
66 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
67
68 static int nr_ctlr;
69 static ctlr_info_t *hba[MAX_CTLR];
70
71 static int eisa[8];
72
73 #define NR_PRODUCTS ARRAY_SIZE(products)
74
75 /* board_id = Subsystem Device ID & Vendor ID
76 * product = Marketing Name for the board
77 * access = Address of the struct of function pointers
78 */
79 static struct board_type products[] = {
80 { 0x0040110E, "IDA", &smart1_access },
81 { 0x0140110E, "IDA-2", &smart1_access },
82 { 0x1040110E, "IAES", &smart1_access },
83 { 0x2040110E, "SMART", &smart1_access },
84 { 0x3040110E, "SMART-2/E", &smart2e_access },
85 { 0x40300E11, "SMART-2/P", &smart2_access },
86 { 0x40310E11, "SMART-2SL", &smart2_access },
87 { 0x40320E11, "Smart Array 3200", &smart2_access },
88 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
89 { 0x40340E11, "Smart Array 221", &smart2_access },
90 { 0x40400E11, "Integrated Array", &smart4_access },
91 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
92 { 0x40500E11, "Smart Array 4200", &smart4_access },
93 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
94 { 0x40580E11, "Smart Array 431", &smart4_access },
95 };
96
97 /* define the PCI info for the PCI cards this driver can control */
98 static const struct pci_device_id cpqarray_pci_device_id[] =
99 {
100 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
101 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
102 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
103 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
106 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
107 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
108 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
110 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
111 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
112 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
113 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
120 { 0 }
121 };
122
123 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
124
125 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
126
127 /* Debug... */
128 #define DBG(s) do { s } while(0)
129 /* Debug (general info)... */
130 #define DBGINFO(s) do { } while(0)
131 /* Debug Paranoid... */
132 #define DBGP(s) do { } while(0)
133 /* Debug Extra Paranoid... */
134 #define DBGPX(s) do { } while(0)
135
136 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
137 static void __iomem *remap_pci_mem(ulong base, ulong size);
138 static int cpqarray_eisa_detect(void);
139 static int pollcomplete(int ctlr);
140 static void getgeometry(int ctlr);
141 static void start_fwbk(int ctlr);
142
143 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
144 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
145
146 static void free_hba(int i);
147 static int alloc_cpqarray_hba(void);
148
149 static int sendcmd(
150 __u8 cmd,
151 int ctlr,
152 void *buff,
153 size_t size,
154 unsigned int blk,
155 unsigned int blkcnt,
156 unsigned int log_unit );
157
158 static int ida_open(struct inode *inode, struct file *filep);
159 static int ida_release(struct inode *inode, struct file *filep);
160 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
161 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
162 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
163
164 static void do_ida_request(struct request_queue *q);
165 static void start_io(ctlr_info_t *h);
166
167 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
168 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
169 static inline void complete_buffers(struct bio *bio, int ok);
170 static inline void complete_command(cmdlist_t *cmd, int timeout);
171
172 static irqreturn_t do_ida_intr(int irq, void *dev_id);
173 static void ida_timer(unsigned long tdata);
174 static int ida_revalidate(struct gendisk *disk);
175 static int revalidate_allvol(ctlr_info_t *host);
176 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
177
178 #ifdef CONFIG_PROC_FS
179 static void ida_procinit(int i);
180 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
181 #else
182 static void ida_procinit(int i) {}
183 #endif
184
185 static inline drv_info_t *get_drv(struct gendisk *disk)
186 {
187 return disk->private_data;
188 }
189
190 static inline ctlr_info_t *get_host(struct gendisk *disk)
191 {
192 return disk->queue->queuedata;
193 }
194
195
196 static struct block_device_operations ida_fops = {
197 .owner = THIS_MODULE,
198 .open = ida_open,
199 .release = ida_release,
200 .ioctl = ida_ioctl,
201 .getgeo = ida_getgeo,
202 .revalidate_disk= ida_revalidate,
203 };
204
205
206 #ifdef CONFIG_PROC_FS
207
208 static struct proc_dir_entry *proc_array;
209
210 /*
211 * Get us a file in /proc/array that says something about each controller.
212 * Create /proc/array if it doesn't exist yet.
213 */
214 static void __init ida_procinit(int i)
215 {
216 if (proc_array == NULL) {
217 proc_array = proc_mkdir("cpqarray", proc_root_driver);
218 if (!proc_array) return;
219 }
220
221 create_proc_read_entry(hba[i]->devname, 0, proc_array,
222 ida_proc_get_info, hba[i]);
223 }
224
225 /*
226 * Report information about this controller.
227 */
228 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
229 {
230 off_t pos = 0;
231 off_t len = 0;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv;
235 #ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c;
237 unsigned long flags;
238 #endif
239
240 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n"
245 " Memory Address: 0x%08lx\n"
246 " I/O Port: 0x%04x\n"
247 " IRQ: %d\n"
248 " Logical drives: %d\n"
249 " Physical drives: %d\n\n"
250 " Current Q depth: %d\n"
251 " Max Q depth since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit);
260
261 pos += size; len += size;
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265
266 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 }
272
273 #ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n");
276 pos += size; len += size;
277
278 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
280 if (c) c=c->next;
281 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c);
283 pos += size; len += size;
284 c=c->next;
285 }
286
287 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
289 if (c) c=c->next;
290 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c);
292 pos += size; len += size;
293 c=c->next;
294 }
295
296 size = sprintf(buffer+len, "\n"); pos += size; len += size;
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298 #endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees);
301 pos += size; len += size;
302
303 *eof = 1;
304 *start = buffer+offset;
305 len -= offset;
306 if (len>length)
307 len = length;
308 return len;
309 }
310 #endif /* CONFIG_PROC_FS */
311
312 module_param_array(eisa, int, NULL, 0);
313
314 static void release_io_mem(ctlr_info_t *c)
315 {
316 /* if IO mem was not protected do nothing */
317 if( c->io_mem_addr == 0)
318 return;
319 release_region(c->io_mem_addr, c->io_mem_length);
320 c->io_mem_addr = 0;
321 c->io_mem_length = 0;
322 }
323
324 static void __devexit cpqarray_remove_one(int i)
325 {
326 int j;
327 char buff[4];
328
329 /* sendcmd will turn off interrupt, and send the flush...
330 * To write all data in the battery backed cache to disks
331 * no data returned, but don't want to send NULL to sendcmd */
332 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
333 {
334 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
335 i);
336 }
337 free_irq(hba[i]->intr, hba[i]);
338 iounmap(hba[i]->vaddr);
339 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
340 del_timer(&hba[i]->timer);
341 remove_proc_entry(hba[i]->devname, proc_array);
342 pci_free_consistent(hba[i]->pci_dev,
343 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
344 hba[i]->cmd_pool_dhandle);
345 kfree(hba[i]->cmd_pool_bits);
346 for(j = 0; j < NWD; j++) {
347 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
348 del_gendisk(ida_gendisk[i][j]);
349 put_disk(ida_gendisk[i][j]);
350 }
351 blk_cleanup_queue(hba[i]->queue);
352 release_io_mem(hba[i]);
353 free_hba(i);
354 }
355
356 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
357 {
358 int i;
359 ctlr_info_t *tmp_ptr;
360
361 if (pci_get_drvdata(pdev) == NULL) {
362 printk( KERN_ERR "cpqarray: Unable to remove device \n");
363 return;
364 }
365
366 tmp_ptr = pci_get_drvdata(pdev);
367 i = tmp_ptr->ctlr;
368 if (hba[i] == NULL) {
369 printk(KERN_ERR "cpqarray: controller %d appears to have"
370 "already been removed \n", i);
371 return;
372 }
373 pci_set_drvdata(pdev, NULL);
374
375 cpqarray_remove_one(i);
376 }
377
378 /* removing an instance that was not removed automatically..
379 * must be an eisa card.
380 */
381 static void __devexit cpqarray_remove_one_eisa (int i)
382 {
383 if (hba[i] == NULL) {
384 printk(KERN_ERR "cpqarray: controller %d appears to have"
385 "already been removed \n", i);
386 return;
387 }
388 cpqarray_remove_one(i);
389 }
390
391 /* pdev is NULL for eisa */
392 static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
393 {
394 struct request_queue *q;
395 int j;
396
397 /*
398 * register block devices
399 * Find disks and fill in structs
400 * Get an interrupt, set the Q depth and get into /proc
401 */
402
403 /* If this successful it should insure that we are the only */
404 /* instance of the driver */
405 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
406 goto Enomem4;
407 }
408 hba[i]->access.set_intr_mask(hba[i], 0);
409 if (request_irq(hba[i]->intr, do_ida_intr,
410 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
411 {
412 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
413 hba[i]->intr, hba[i]->devname);
414 goto Enomem3;
415 }
416
417 for (j=0; j<NWD; j++) {
418 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
419 if (!ida_gendisk[i][j])
420 goto Enomem2;
421 }
422
423 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
425 &(hba[i]->cmd_pool_dhandle));
426 hba[i]->cmd_pool_bits = kmalloc(
427 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
428 GFP_KERNEL);
429
430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
431 goto Enomem1;
432
433 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
434 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
435 printk(KERN_INFO "cpqarray: Finding drives on %s",
436 hba[i]->devname);
437
438 spin_lock_init(&hba[i]->lock);
439 q = blk_init_queue(do_ida_request, &hba[i]->lock);
440 if (!q)
441 goto Enomem1;
442
443 hba[i]->queue = q;
444 q->queuedata = hba[i];
445
446 getgeometry(i);
447 start_fwbk(i);
448
449 ida_procinit(i);
450
451 if (pdev)
452 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
453
454 /* This is a hardware imposed limit. */
455 blk_queue_max_hw_segments(q, SG_MAX);
456
457 /* This is a driver limit and could be eliminated. */
458 blk_queue_max_phys_segments(q, SG_MAX);
459
460 init_timer(&hba[i]->timer);
461 hba[i]->timer.expires = jiffies + IDA_TIMER;
462 hba[i]->timer.data = (unsigned long)hba[i];
463 hba[i]->timer.function = ida_timer;
464 add_timer(&hba[i]->timer);
465
466 /* Enable IRQ now that spinlock and rate limit timer are set up */
467 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
468
469 for(j=0; j<NWD; j++) {
470 struct gendisk *disk = ida_gendisk[i][j];
471 drv_info_t *drv = &hba[i]->drv[j];
472 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
473 disk->major = COMPAQ_SMART2_MAJOR + i;
474 disk->first_minor = j<<NWD_SHIFT;
475 disk->fops = &ida_fops;
476 if (j && !drv->nr_blks)
477 continue;
478 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
479 set_capacity(disk, drv->nr_blks);
480 disk->queue = hba[i]->queue;
481 disk->private_data = drv;
482 add_disk(disk);
483 }
484
485 /* done ! */
486 return(i);
487
488 Enomem1:
489 nr_ctlr = i;
490 kfree(hba[i]->cmd_pool_bits);
491 if (hba[i]->cmd_pool)
492 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
493 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
494 Enomem2:
495 while (j--) {
496 put_disk(ida_gendisk[i][j]);
497 ida_gendisk[i][j] = NULL;
498 }
499 free_irq(hba[i]->intr, hba[i]);
500 Enomem3:
501 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
502 Enomem4:
503 if (pdev)
504 pci_set_drvdata(pdev, NULL);
505 release_io_mem(hba[i]);
506 free_hba(i);
507
508 printk( KERN_ERR "cpqarray: out of memory");
509
510 return -1;
511 }
512
513 static int __init cpqarray_init_one( struct pci_dev *pdev,
514 const struct pci_device_id *ent)
515 {
516 int i;
517
518 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
519 " bus %d dev %d func %d\n",
520 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
521 PCI_FUNC(pdev->devfn));
522 i = alloc_cpqarray_hba();
523 if( i < 0 )
524 return (-1);
525 memset(hba[i], 0, sizeof(ctlr_info_t));
526 sprintf(hba[i]->devname, "ida%d", i);
527 hba[i]->ctlr = i;
528 /* Initialize the pdev driver private data */
529 pci_set_drvdata(pdev, hba[i]);
530
531 if (cpqarray_pci_init(hba[i], pdev) != 0) {
532 pci_set_drvdata(pdev, NULL);
533 release_io_mem(hba[i]);
534 free_hba(i);
535 return -1;
536 }
537
538 return (cpqarray_register_ctlr(i, pdev));
539 }
540
541 static struct pci_driver cpqarray_pci_driver = {
542 .name = "cpqarray",
543 .probe = cpqarray_init_one,
544 .remove = __devexit_p(cpqarray_remove_one_pci),
545 .id_table = cpqarray_pci_device_id,
546 };
547
548 /*
549 * This is it. Find all the controllers and register them.
550 * returns the number of block devices registered.
551 */
552 static int __init cpqarray_init(void)
553 {
554 int num_cntlrs_reg = 0;
555 int i;
556 int rc = 0;
557
558 /* detect controllers */
559 printk(DRIVER_NAME "\n");
560
561 rc = pci_register_driver(&cpqarray_pci_driver);
562 if (rc)
563 return rc;
564 cpqarray_eisa_detect();
565
566 for (i=0; i < MAX_CTLR; i++) {
567 if (hba[i] != NULL)
568 num_cntlrs_reg++;
569 }
570
571 return(num_cntlrs_reg);
572 }
573
574 /* Function to find the first free pointer into our hba[] array */
575 /* Returns -1 if no free entries are left. */
576 static int alloc_cpqarray_hba(void)
577 {
578 int i;
579
580 for(i=0; i< MAX_CTLR; i++) {
581 if (hba[i] == NULL) {
582 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
583 if(hba[i]==NULL) {
584 printk(KERN_ERR "cpqarray: out of memory.\n");
585 return (-1);
586 }
587 return (i);
588 }
589 }
590 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
591 " of 8 controllers.\n");
592 return(-1);
593 }
594
595 static void free_hba(int i)
596 {
597 kfree(hba[i]);
598 hba[i]=NULL;
599 }
600
601 /*
602 * Find the IO address of the controller, its IRQ and so forth. Fill
603 * in some basic stuff into the ctlr_info_t structure.
604 */
605 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
606 {
607 ushort vendor_id, device_id, command;
608 unchar cache_line_size, latency_timer;
609 unchar irq, revision;
610 unsigned long addr[6];
611 __u32 board_id;
612
613 int i;
614
615 c->pci_dev = pdev;
616 if (pci_enable_device(pdev)) {
617 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
618 return -1;
619 }
620 vendor_id = pdev->vendor;
621 device_id = pdev->device;
622 irq = pdev->irq;
623
624 for(i=0; i<6; i++)
625 addr[i] = pci_resource_start(pdev, i);
626
627 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
628 {
629 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
630 return -1;
631 }
632
633 pci_read_config_word(pdev, PCI_COMMAND, &command);
634 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
635 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
636 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
637
638 pci_read_config_dword(pdev, 0x2c, &board_id);
639
640 /* check to see if controller has been disabled */
641 if(!(command & 0x02)) {
642 printk(KERN_WARNING
643 "cpqarray: controller appears to be disabled\n");
644 return(-1);
645 }
646
647 DBGINFO(
648 printk("vendor_id = %x\n", vendor_id);
649 printk("device_id = %x\n", device_id);
650 printk("command = %x\n", command);
651 for(i=0; i<6; i++)
652 printk("addr[%d] = %lx\n", i, addr[i]);
653 printk("revision = %x\n", revision);
654 printk("irq = %x\n", irq);
655 printk("cache_line_size = %x\n", cache_line_size);
656 printk("latency_timer = %x\n", latency_timer);
657 printk("board_id = %x\n", board_id);
658 );
659
660 c->intr = irq;
661
662 for(i=0; i<6; i++) {
663 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
664 { /* IO space */
665 c->io_mem_addr = addr[i];
666 c->io_mem_length = pci_resource_end(pdev, i)
667 - pci_resource_start(pdev, i) + 1;
668 if(!request_region( c->io_mem_addr, c->io_mem_length,
669 "cpqarray"))
670 {
671 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
672 c->io_mem_addr = 0;
673 c->io_mem_length = 0;
674 }
675 break;
676 }
677 }
678
679 c->paddr = 0;
680 for(i=0; i<6; i++)
681 if (!(pci_resource_flags(pdev, i) &
682 PCI_BASE_ADDRESS_SPACE_IO)) {
683 c->paddr = pci_resource_start (pdev, i);
684 break;
685 }
686 if (!c->paddr)
687 return -1;
688 c->vaddr = remap_pci_mem(c->paddr, 128);
689 if (!c->vaddr)
690 return -1;
691 c->board_id = board_id;
692
693 for(i=0; i<NR_PRODUCTS; i++) {
694 if (board_id == products[i].board_id) {
695 c->product_name = products[i].product_name;
696 c->access = *(products[i].access);
697 break;
698 }
699 }
700 if (i == NR_PRODUCTS) {
701 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
702 " to access the SMART Array controller %08lx\n",
703 (unsigned long)board_id);
704 return -1;
705 }
706
707 return 0;
708 }
709
710 /*
711 * Map (physical) PCI mem into (virtual) kernel space
712 */
713 static void __iomem *remap_pci_mem(ulong base, ulong size)
714 {
715 ulong page_base = ((ulong) base) & PAGE_MASK;
716 ulong page_offs = ((ulong) base) - page_base;
717 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
718
719 return (page_remapped ? (page_remapped + page_offs) : NULL);
720 }
721
722 #ifndef MODULE
723 /*
724 * Config string is a comma separated set of i/o addresses of EISA cards.
725 */
726 static int cpqarray_setup(char *str)
727 {
728 int i, ints[9];
729
730 (void)get_options(str, ARRAY_SIZE(ints), ints);
731
732 for(i=0; i<ints[0] && i<8; i++)
733 eisa[i] = ints[i+1];
734 return 1;
735 }
736
737 __setup("smart2=", cpqarray_setup);
738
739 #endif
740
741 /*
742 * Find an EISA controller's signature. Set up an hba if we find it.
743 */
744 static int __init cpqarray_eisa_detect(void)
745 {
746 int i=0, j;
747 __u32 board_id;
748 int intr;
749 int ctlr;
750 int num_ctlr = 0;
751
752 while(i<8 && eisa[i]) {
753 ctlr = alloc_cpqarray_hba();
754 if(ctlr == -1)
755 break;
756 board_id = inl(eisa[i]+0xC80);
757 for(j=0; j < NR_PRODUCTS; j++)
758 if (board_id == products[j].board_id)
759 break;
760
761 if (j == NR_PRODUCTS) {
762 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
763 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
764 continue;
765 }
766
767 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
768 hba[ctlr]->io_mem_addr = eisa[i];
769 hba[ctlr]->io_mem_length = 0x7FF;
770 if(!request_region(hba[ctlr]->io_mem_addr,
771 hba[ctlr]->io_mem_length,
772 "cpqarray"))
773 {
774 printk(KERN_WARNING "cpqarray: I/O range already in "
775 "use addr = %lx length = %ld\n",
776 hba[ctlr]->io_mem_addr,
777 hba[ctlr]->io_mem_length);
778 free_hba(ctlr);
779 continue;
780 }
781
782 /*
783 * Read the config register to find our interrupt
784 */
785 intr = inb(eisa[i]+0xCC0) >> 4;
786 if (intr & 1) intr = 11;
787 else if (intr & 2) intr = 10;
788 else if (intr & 4) intr = 14;
789 else if (intr & 8) intr = 15;
790
791 hba[ctlr]->intr = intr;
792 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
793 hba[ctlr]->product_name = products[j].product_name;
794 hba[ctlr]->access = *(products[j].access);
795 hba[ctlr]->ctlr = ctlr;
796 hba[ctlr]->board_id = board_id;
797 hba[ctlr]->pci_dev = NULL; /* not PCI */
798
799 DBGINFO(
800 printk("i = %d, j = %d\n", i, j);
801 printk("irq = %x\n", intr);
802 printk("product name = %s\n", products[j].product_name);
803 printk("board_id = %x\n", board_id);
804 );
805
806 num_ctlr++;
807 i++;
808
809 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
810 printk(KERN_WARNING
811 "cpqarray: Can't register EISA controller %d\n",
812 ctlr);
813
814 }
815
816 return num_ctlr;
817 }
818
819 /*
820 * Open. Make sure the device is really there.
821 */
822 static int ida_open(struct inode *inode, struct file *filep)
823 {
824 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
825 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
826
827 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
828 /*
829 * Root is allowed to open raw volume zero even if it's not configured
830 * so array config can still work. I don't think I really like this,
831 * but I'm already using way to many device nodes to claim another one
832 * for "raw controller".
833 */
834 if (!drv->nr_blks) {
835 if (!capable(CAP_SYS_RAWIO))
836 return -ENXIO;
837 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
838 return -ENXIO;
839 }
840 host->usage_count++;
841 return 0;
842 }
843
844 /*
845 * Close. Sync first.
846 */
847 static int ida_release(struct inode *inode, struct file *filep)
848 {
849 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
850 host->usage_count--;
851 return 0;
852 }
853
854 /*
855 * Enqueuing and dequeuing functions for cmdlists.
856 */
857 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
858 {
859 if (*Qptr == NULL) {
860 *Qptr = c;
861 c->next = c->prev = c;
862 } else {
863 c->prev = (*Qptr)->prev;
864 c->next = (*Qptr);
865 (*Qptr)->prev->next = c;
866 (*Qptr)->prev = c;
867 }
868 }
869
870 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
871 {
872 if (c && c->next != c) {
873 if (*Qptr == c) *Qptr = c->next;
874 c->prev->next = c->next;
875 c->next->prev = c->prev;
876 } else {
877 *Qptr = NULL;
878 }
879 return c;
880 }
881
882 /*
883 * Get a request and submit it to the controller.
884 * This routine needs to grab all the requests it possibly can from the
885 * req Q and submit them. Interrupts are off (and need to be off) when you
886 * are in here (either via the dummy do_ida_request functions or by being
887 * called from the interrupt handler
888 */
889 static void do_ida_request(struct request_queue *q)
890 {
891 ctlr_info_t *h = q->queuedata;
892 cmdlist_t *c;
893 struct request *creq;
894 struct scatterlist tmp_sg[SG_MAX];
895 int i, dir, seg;
896
897 if (blk_queue_plugged(q))
898 goto startio;
899
900 queue_next:
901 creq = elv_next_request(q);
902 if (!creq)
903 goto startio;
904
905 BUG_ON(creq->nr_phys_segments > SG_MAX);
906
907 if ((c = cmd_alloc(h,1)) == NULL)
908 goto startio;
909
910 blkdev_dequeue_request(creq);
911
912 c->ctlr = h->ctlr;
913 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
914 c->hdr.size = sizeof(rblk_t) >> 2;
915 c->size += sizeof(rblk_t);
916
917 c->req.hdr.blk = creq->sector;
918 c->rq = creq;
919 DBGPX(
920 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
921 );
922 seg = blk_rq_map_sg(q, creq, tmp_sg);
923
924 /* Now do all the DMA Mappings */
925 if (rq_data_dir(creq) == READ)
926 dir = PCI_DMA_FROMDEVICE;
927 else
928 dir = PCI_DMA_TODEVICE;
929 for( i=0; i < seg; i++)
930 {
931 c->req.sg[i].size = tmp_sg[i].length;
932 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
933 tmp_sg[i].page,
934 tmp_sg[i].offset,
935 tmp_sg[i].length, dir);
936 }
937 DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
938 c->req.hdr.sg_cnt = seg;
939 c->req.hdr.blk_cnt = creq->nr_sectors;
940 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
941 c->type = CMD_RWREQ;
942
943 /* Put the request on the tail of the request queue */
944 addQ(&h->reqQ, c);
945 h->Qdepth++;
946 if (h->Qdepth > h->maxQsinceinit)
947 h->maxQsinceinit = h->Qdepth;
948
949 goto queue_next;
950
951 startio:
952 start_io(h);
953 }
954
955 /*
956 * start_io submits everything on a controller's request queue
957 * and moves it to the completion queue.
958 *
959 * Interrupts had better be off if you're in here
960 */
961 static void start_io(ctlr_info_t *h)
962 {
963 cmdlist_t *c;
964
965 while((c = h->reqQ) != NULL) {
966 /* Can't do anything if we're busy */
967 if (h->access.fifo_full(h) == 0)
968 return;
969
970 /* Get the first entry from the request Q */
971 removeQ(&h->reqQ, c);
972 h->Qdepth--;
973
974 /* Tell the controller to do our bidding */
975 h->access.submit_command(h, c);
976
977 /* Get onto the completion Q */
978 addQ(&h->cmpQ, c);
979 }
980 }
981
982 static inline void complete_buffers(struct bio *bio, int ok)
983 {
984 struct bio *xbh;
985 while(bio) {
986 int nr_sectors = bio_sectors(bio);
987
988 xbh = bio->bi_next;
989 bio->bi_next = NULL;
990
991 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
992
993 bio = xbh;
994 }
995 }
996 /*
997 * Mark all buffers that cmd was responsible for
998 */
999 static inline void complete_command(cmdlist_t *cmd, int timeout)
1000 {
1001 struct request *rq = cmd->rq;
1002 int ok=1;
1003 int i, ddir;
1004
1005 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1006 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1007 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1008 cmd->ctlr, cmd->hdr.unit);
1009 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1010 }
1011 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1012 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1013 cmd->ctlr, cmd->hdr.unit);
1014 ok = 0;
1015 }
1016 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1017 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1018 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1019 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1020 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1021 ok = 0;
1022 }
1023 if (timeout) ok = 0;
1024 /* unmap the DMA mapping for all the scatter gather elements */
1025 if (cmd->req.hdr.cmd == IDA_READ)
1026 ddir = PCI_DMA_FROMDEVICE;
1027 else
1028 ddir = PCI_DMA_TODEVICE;
1029 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1030 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1031 cmd->req.sg[i].size, ddir);
1032
1033 complete_buffers(rq->bio, ok);
1034
1035 if (blk_fs_request(rq)) {
1036 const int rw = rq_data_dir(rq);
1037
1038 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1039 }
1040
1041 add_disk_randomness(rq->rq_disk);
1042
1043 DBGPX(printk("Done with %p\n", rq););
1044 end_that_request_last(rq, ok ? 1 : -EIO);
1045 }
1046
1047 /*
1048 * The controller will interrupt us upon completion of commands.
1049 * Find the command on the completion queue, remove it, tell the OS and
1050 * try to queue up more IO
1051 */
1052 static irqreturn_t do_ida_intr(int irq, void *dev_id)
1053 {
1054 ctlr_info_t *h = dev_id;
1055 cmdlist_t *c;
1056 unsigned long istat;
1057 unsigned long flags;
1058 __u32 a,a1;
1059
1060 istat = h->access.intr_pending(h);
1061 /* Is this interrupt for us? */
1062 if (istat == 0)
1063 return IRQ_NONE;
1064
1065 /*
1066 * If there are completed commands in the completion queue,
1067 * we had better do something about it.
1068 */
1069 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1070 if (istat & FIFO_NOT_EMPTY) {
1071 while((a = h->access.command_completed(h))) {
1072 a1 = a; a &= ~3;
1073 if ((c = h->cmpQ) == NULL)
1074 {
1075 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1076 continue;
1077 }
1078 while(c->busaddr != a) {
1079 c = c->next;
1080 if (c == h->cmpQ)
1081 break;
1082 }
1083 /*
1084 * If we've found the command, take it off the
1085 * completion Q and free it
1086 */
1087 if (c->busaddr == a) {
1088 removeQ(&h->cmpQ, c);
1089 /* Check for invalid command.
1090 * Controller returns command error,
1091 * But rcode = 0.
1092 */
1093
1094 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1095 {
1096 c->req.hdr.rcode = RCODE_INVREQ;
1097 }
1098 if (c->type == CMD_RWREQ) {
1099 complete_command(c, 0);
1100 cmd_free(h, c, 1);
1101 } else if (c->type == CMD_IOCTL_PEND) {
1102 c->type = CMD_IOCTL_DONE;
1103 }
1104 continue;
1105 }
1106 }
1107 }
1108
1109 /*
1110 * See if we can queue up some more IO
1111 */
1112 do_ida_request(h->queue);
1113 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1114 return IRQ_HANDLED;
1115 }
1116
1117 /*
1118 * This timer was for timing out requests that haven't happened after
1119 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1120 * reset a flags structure so we don't flood the user with
1121 * "Non-Fatal error" messages.
1122 */
1123 static void ida_timer(unsigned long tdata)
1124 {
1125 ctlr_info_t *h = (ctlr_info_t*)tdata;
1126
1127 h->timer.expires = jiffies + IDA_TIMER;
1128 add_timer(&h->timer);
1129 h->misc_tflags = 0;
1130 }
1131
1132 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1133 {
1134 drv_info_t *drv = get_drv(bdev->bd_disk);
1135
1136 if (drv->cylinders) {
1137 geo->heads = drv->heads;
1138 geo->sectors = drv->sectors;
1139 geo->cylinders = drv->cylinders;
1140 } else {
1141 geo->heads = 0xff;
1142 geo->sectors = 0x3f;
1143 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1144 }
1145
1146 return 0;
1147 }
1148
1149 /*
1150 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1151 * setting readahead and submitting commands from userspace to the controller.
1152 */
1153 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1154 {
1155 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1156 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1157 int error;
1158 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1159 ida_ioctl_t *my_io;
1160
1161 switch(cmd) {
1162 case IDAGETDRVINFO:
1163 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1164 return -EFAULT;
1165 return 0;
1166 case IDAPASSTHRU:
1167 if (!capable(CAP_SYS_RAWIO))
1168 return -EPERM;
1169 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1170 if (!my_io)
1171 return -ENOMEM;
1172 error = -EFAULT;
1173 if (copy_from_user(my_io, io, sizeof(*my_io)))
1174 goto out_passthru;
1175 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1176 if (error)
1177 goto out_passthru;
1178 error = -EFAULT;
1179 if (copy_to_user(io, my_io, sizeof(*my_io)))
1180 goto out_passthru;
1181 error = 0;
1182 out_passthru:
1183 kfree(my_io);
1184 return error;
1185 case IDAGETCTLRSIG:
1186 if (!arg) return -EINVAL;
1187 put_user(host->ctlr_sig, (int __user *)arg);
1188 return 0;
1189 case IDAREVALIDATEVOLS:
1190 if (iminor(inode) != 0)
1191 return -ENXIO;
1192 return revalidate_allvol(host);
1193 case IDADRIVERVERSION:
1194 if (!arg) return -EINVAL;
1195 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1196 return 0;
1197 case IDAGETPCIINFO:
1198 {
1199
1200 ida_pci_info_struct pciinfo;
1201
1202 if (!arg) return -EINVAL;
1203 pciinfo.bus = host->pci_dev->bus->number;
1204 pciinfo.dev_fn = host->pci_dev->devfn;
1205 pciinfo.board_id = host->board_id;
1206 if(copy_to_user((void __user *) arg, &pciinfo,
1207 sizeof( ida_pci_info_struct)))
1208 return -EFAULT;
1209 return(0);
1210 }
1211
1212 default:
1213 return -EINVAL;
1214 }
1215
1216 }
1217 /*
1218 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1219 * The command block (io) has already been copied to kernel space for us,
1220 * however, any elements in the sglist need to be copied to kernel space
1221 * or copied back to userspace.
1222 *
1223 * Only root may perform a controller passthru command, however I'm not doing
1224 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1225 * putting a 64M buffer in the sglist is probably a *bad* idea.
1226 */
1227 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1228 {
1229 int ctlr = h->ctlr;
1230 cmdlist_t *c;
1231 void *p = NULL;
1232 unsigned long flags;
1233 int error;
1234
1235 if ((c = cmd_alloc(h, 0)) == NULL)
1236 return -ENOMEM;
1237 c->ctlr = ctlr;
1238 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1239 c->hdr.size = sizeof(rblk_t) >> 2;
1240 c->size += sizeof(rblk_t);
1241
1242 c->req.hdr.cmd = io->cmd;
1243 c->req.hdr.blk = io->blk;
1244 c->req.hdr.blk_cnt = io->blk_cnt;
1245 c->type = CMD_IOCTL_PEND;
1246
1247 /* Pre submit processing */
1248 switch(io->cmd) {
1249 case PASSTHRU_A:
1250 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1251 if (!p)
1252 {
1253 error = -ENOMEM;
1254 cmd_free(h, c, 0);
1255 return(error);
1256 }
1257 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1258 kfree(p);
1259 cmd_free(h, c, 0);
1260 return -EFAULT;
1261 }
1262 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1263 sizeof(ida_ioctl_t),
1264 PCI_DMA_BIDIRECTIONAL);
1265 c->req.sg[0].size = io->sg[0].size;
1266 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1267 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1268 c->req.hdr.sg_cnt = 1;
1269 break;
1270 case IDA_READ:
1271 case READ_FLASH_ROM:
1272 case SENSE_CONTROLLER_PERFORMANCE:
1273 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1274 if (!p)
1275 {
1276 error = -ENOMEM;
1277 cmd_free(h, c, 0);
1278 return(error);
1279 }
1280
1281 c->req.sg[0].size = io->sg[0].size;
1282 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1283 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1284 c->req.hdr.sg_cnt = 1;
1285 break;
1286 case IDA_WRITE:
1287 case IDA_WRITE_MEDIA:
1288 case DIAG_PASS_THRU:
1289 case COLLECT_BUFFER:
1290 case WRITE_FLASH_ROM:
1291 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1292 if (!p)
1293 {
1294 error = -ENOMEM;
1295 cmd_free(h, c, 0);
1296 return(error);
1297 }
1298 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1299 kfree(p);
1300 cmd_free(h, c, 0);
1301 return -EFAULT;
1302 }
1303 c->req.sg[0].size = io->sg[0].size;
1304 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1305 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1306 c->req.hdr.sg_cnt = 1;
1307 break;
1308 default:
1309 c->req.sg[0].size = sizeof(io->c);
1310 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1311 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1312 c->req.hdr.sg_cnt = 1;
1313 }
1314
1315 /* Put the request on the tail of the request queue */
1316 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1317 addQ(&h->reqQ, c);
1318 h->Qdepth++;
1319 start_io(h);
1320 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1321
1322 /* Wait for completion */
1323 while(c->type != CMD_IOCTL_DONE)
1324 schedule();
1325
1326 /* Unmap the DMA */
1327 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1328 PCI_DMA_BIDIRECTIONAL);
1329 /* Post submit processing */
1330 switch(io->cmd) {
1331 case PASSTHRU_A:
1332 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1333 sizeof(ida_ioctl_t),
1334 PCI_DMA_BIDIRECTIONAL);
1335 case IDA_READ:
1336 case DIAG_PASS_THRU:
1337 case SENSE_CONTROLLER_PERFORMANCE:
1338 case READ_FLASH_ROM:
1339 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1340 kfree(p);
1341 return -EFAULT;
1342 }
1343 /* fall through and free p */
1344 case IDA_WRITE:
1345 case IDA_WRITE_MEDIA:
1346 case COLLECT_BUFFER:
1347 case WRITE_FLASH_ROM:
1348 kfree(p);
1349 break;
1350 default:;
1351 /* Nothing to do */
1352 }
1353
1354 io->rcode = c->req.hdr.rcode;
1355 cmd_free(h, c, 0);
1356 return(0);
1357 }
1358
1359 /*
1360 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1361 * scheme to suballocte them to the driver. Operations that are not time
1362 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1363 * as the first argument to get a new command.
1364 */
1365 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1366 {
1367 cmdlist_t * c;
1368 int i;
1369 dma_addr_t cmd_dhandle;
1370
1371 if (!get_from_pool) {
1372 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1373 sizeof(cmdlist_t), &cmd_dhandle);
1374 if(c==NULL)
1375 return NULL;
1376 } else {
1377 do {
1378 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1379 if (i == NR_CMDS)
1380 return NULL;
1381 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1382 c = h->cmd_pool + i;
1383 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1384 h->nr_allocs++;
1385 }
1386
1387 memset(c, 0, sizeof(cmdlist_t));
1388 c->busaddr = cmd_dhandle;
1389 return c;
1390 }
1391
1392 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1393 {
1394 int i;
1395
1396 if (!got_from_pool) {
1397 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1398 c->busaddr);
1399 } else {
1400 i = c - h->cmd_pool;
1401 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1402 h->nr_frees++;
1403 }
1404 }
1405
1406 /***********************************************************************
1407 name: sendcmd
1408 Send a command to an IDA using the memory mapped FIFO interface
1409 and wait for it to complete.
1410 This routine should only be called at init time.
1411 ***********************************************************************/
1412 static int sendcmd(
1413 __u8 cmd,
1414 int ctlr,
1415 void *buff,
1416 size_t size,
1417 unsigned int blk,
1418 unsigned int blkcnt,
1419 unsigned int log_unit )
1420 {
1421 cmdlist_t *c;
1422 int complete;
1423 unsigned long temp;
1424 unsigned long i;
1425 ctlr_info_t *info_p = hba[ctlr];
1426
1427 c = cmd_alloc(info_p, 1);
1428 if(!c)
1429 return IO_ERROR;
1430 c->ctlr = ctlr;
1431 c->hdr.unit = log_unit;
1432 c->hdr.prio = 0;
1433 c->hdr.size = sizeof(rblk_t) >> 2;
1434 c->size += sizeof(rblk_t);
1435
1436 /* The request information. */
1437 c->req.hdr.next = 0;
1438 c->req.hdr.rcode = 0;
1439 c->req.bp = 0;
1440 c->req.hdr.sg_cnt = 1;
1441 c->req.hdr.reserved = 0;
1442
1443 if (size == 0)
1444 c->req.sg[0].size = 512;
1445 else
1446 c->req.sg[0].size = size;
1447
1448 c->req.hdr.blk = blk;
1449 c->req.hdr.blk_cnt = blkcnt;
1450 c->req.hdr.cmd = (unsigned char) cmd;
1451 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1452 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1453 /*
1454 * Disable interrupt
1455 */
1456 info_p->access.set_intr_mask(info_p, 0);
1457 /* Make sure there is room in the command FIFO */
1458 /* Actually it should be completely empty at this time. */
1459 for (i = 200000; i > 0; i--) {
1460 temp = info_p->access.fifo_full(info_p);
1461 if (temp != 0) {
1462 break;
1463 }
1464 udelay(10);
1465 DBG(
1466 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1467 " waiting!\n", ctlr);
1468 );
1469 }
1470 /*
1471 * Send the cmd
1472 */
1473 info_p->access.submit_command(info_p, c);
1474 complete = pollcomplete(ctlr);
1475
1476 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1477 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1478 if (complete != 1) {
1479 if (complete != c->busaddr) {
1480 printk( KERN_WARNING
1481 "cpqarray ida%d: idaSendPciCmd "
1482 "Invalid command list address returned! (%08lx)\n",
1483 ctlr, (unsigned long)complete);
1484 cmd_free(info_p, c, 1);
1485 return (IO_ERROR);
1486 }
1487 } else {
1488 printk( KERN_WARNING
1489 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1490 "No command list address returned!\n",
1491 ctlr);
1492 cmd_free(info_p, c, 1);
1493 return (IO_ERROR);
1494 }
1495
1496 if (c->req.hdr.rcode & 0x00FE) {
1497 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1498 printk( KERN_WARNING
1499 "cpqarray ida%d: idaSendPciCmd, error: "
1500 "Controller failed at init time "
1501 "cmd: 0x%x, return code = 0x%x\n",
1502 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1503
1504 cmd_free(info_p, c, 1);
1505 return (IO_ERROR);
1506 }
1507 }
1508 cmd_free(info_p, c, 1);
1509 return (IO_OK);
1510 }
1511
1512 /*
1513 * revalidate_allvol is for online array config utilities. After a
1514 * utility reconfigures the drives in the array, it can use this function
1515 * (through an ioctl) to make the driver zap any previous disk structs for
1516 * that controller and get new ones.
1517 *
1518 * Right now I'm using the getgeometry() function to do this, but this
1519 * function should probably be finer grained and allow you to revalidate one
1520 * particualar logical volume (instead of all of them on a particular
1521 * controller).
1522 */
1523 static int revalidate_allvol(ctlr_info_t *host)
1524 {
1525 int ctlr = host->ctlr;
1526 int i;
1527 unsigned long flags;
1528
1529 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1530 if (host->usage_count > 1) {
1531 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1532 printk(KERN_WARNING "cpqarray: Device busy for volume"
1533 " revalidation (usage=%d)\n", host->usage_count);
1534 return -EBUSY;
1535 }
1536 host->usage_count++;
1537 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1538
1539 /*
1540 * Set the partition and block size structures for all volumes
1541 * on this controller to zero. We will reread all of this data
1542 */
1543 set_capacity(ida_gendisk[ctlr][0], 0);
1544 for (i = 1; i < NWD; i++) {
1545 struct gendisk *disk = ida_gendisk[ctlr][i];
1546 if (disk->flags & GENHD_FL_UP)
1547 del_gendisk(disk);
1548 }
1549 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1550
1551 /*
1552 * Tell the array controller not to give us any interrupts while
1553 * we check the new geometry. Then turn interrupts back on when
1554 * we're done.
1555 */
1556 host->access.set_intr_mask(host, 0);
1557 getgeometry(ctlr);
1558 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1559
1560 for(i=0; i<NWD; i++) {
1561 struct gendisk *disk = ida_gendisk[ctlr][i];
1562 drv_info_t *drv = &host->drv[i];
1563 if (i && !drv->nr_blks)
1564 continue;
1565 blk_queue_hardsect_size(host->queue, drv->blk_size);
1566 set_capacity(disk, drv->nr_blks);
1567 disk->queue = host->queue;
1568 disk->private_data = drv;
1569 if (i)
1570 add_disk(disk);
1571 }
1572
1573 host->usage_count--;
1574 return 0;
1575 }
1576
1577 static int ida_revalidate(struct gendisk *disk)
1578 {
1579 drv_info_t *drv = disk->private_data;
1580 set_capacity(disk, drv->nr_blks);
1581 return 0;
1582 }
1583
1584 /********************************************************************
1585 name: pollcomplete
1586 Wait polling for a command to complete.
1587 The memory mapped FIFO is polled for the completion.
1588 Used only at init time, interrupts disabled.
1589 ********************************************************************/
1590 static int pollcomplete(int ctlr)
1591 {
1592 int done;
1593 int i;
1594
1595 /* Wait (up to 2 seconds) for a command to complete */
1596
1597 for (i = 200000; i > 0; i--) {
1598 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1599 if (done == 0) {
1600 udelay(10); /* a short fixed delay */
1601 } else
1602 return (done);
1603 }
1604 /* Invalid address to tell caller we ran out of time */
1605 return 1;
1606 }
1607 /*****************************************************************
1608 start_fwbk
1609 Starts controller firmwares background processing.
1610 Currently only the Integrated Raid controller needs this done.
1611 If the PCI mem address registers are written to after this,
1612 data corruption may occur
1613 *****************************************************************/
1614 static void start_fwbk(int ctlr)
1615 {
1616 id_ctlr_t *id_ctlr_buf;
1617 int ret_code;
1618
1619 if( (hba[ctlr]->board_id != 0x40400E11)
1620 && (hba[ctlr]->board_id != 0x40480E11) )
1621
1622 /* Not a Integrated Raid, so there is nothing for us to do */
1623 return;
1624 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1625 " processing\n");
1626 /* Command does not return anything, but idasend command needs a
1627 buffer */
1628 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1629 if(id_ctlr_buf==NULL)
1630 {
1631 printk(KERN_WARNING "cpqarray: Out of memory. "
1632 "Unable to start background processing.\n");
1633 return;
1634 }
1635 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1636 id_ctlr_buf, 0, 0, 0, 0);
1637 if(ret_code != IO_OK)
1638 printk(KERN_WARNING "cpqarray: Unable to start"
1639 " background processing\n");
1640
1641 kfree(id_ctlr_buf);
1642 }
1643 /*****************************************************************
1644 getgeometry
1645 Get ida logical volume geometry from the controller
1646 This is a large bit of code which once existed in two flavors,
1647 It is used only at init time.
1648 *****************************************************************/
1649 static void getgeometry(int ctlr)
1650 {
1651 id_log_drv_t *id_ldrive;
1652 id_ctlr_t *id_ctlr_buf;
1653 sense_log_drv_stat_t *id_lstatus_buf;
1654 config_t *sense_config_buf;
1655 unsigned int log_unit, log_index;
1656 int ret_code, size;
1657 drv_info_t *drv;
1658 ctlr_info_t *info_p = hba[ctlr];
1659 int i;
1660
1661 info_p->log_drv_map = 0;
1662
1663 id_ldrive = kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1664 if(id_ldrive == NULL)
1665 {
1666 printk( KERN_ERR "cpqarray: out of memory.\n");
1667 return;
1668 }
1669
1670 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1671 if(id_ctlr_buf == NULL)
1672 {
1673 kfree(id_ldrive);
1674 printk( KERN_ERR "cpqarray: out of memory.\n");
1675 return;
1676 }
1677
1678 id_lstatus_buf = kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1679 if(id_lstatus_buf == NULL)
1680 {
1681 kfree(id_ctlr_buf);
1682 kfree(id_ldrive);
1683 printk( KERN_ERR "cpqarray: out of memory.\n");
1684 return;
1685 }
1686
1687 sense_config_buf = kmalloc(sizeof(config_t), GFP_KERNEL);
1688 if(sense_config_buf == NULL)
1689 {
1690 kfree(id_lstatus_buf);
1691 kfree(id_ctlr_buf);
1692 kfree(id_ldrive);
1693 printk( KERN_ERR "cpqarray: out of memory.\n");
1694 return;
1695 }
1696
1697 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1698 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1699 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1700 memset(sense_config_buf, 0, sizeof(config_t));
1701
1702 info_p->phys_drives = 0;
1703 info_p->log_drv_map = 0;
1704 info_p->drv_assign_map = 0;
1705 info_p->drv_spare_map = 0;
1706 info_p->mp_failed_drv_map = 0; /* only initialized here */
1707 /* Get controllers info for this logical drive */
1708 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1709 if (ret_code == IO_ERROR) {
1710 /*
1711 * If can't get controller info, set the logical drive map to 0,
1712 * so the idastubopen will fail on all logical drives
1713 * on the controller.
1714 */
1715 /* Free all the buffers and return */
1716 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1717 kfree(sense_config_buf);
1718 kfree(id_lstatus_buf);
1719 kfree(id_ctlr_buf);
1720 kfree(id_ldrive);
1721 return;
1722 }
1723
1724 info_p->log_drives = id_ctlr_buf->nr_drvs;
1725 for(i=0;i<4;i++)
1726 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1727 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1728
1729 printk(" (%s)\n", info_p->product_name);
1730 /*
1731 * Initialize logical drive map to zero
1732 */
1733 log_index = 0;
1734 /*
1735 * Get drive geometry for all logical drives
1736 */
1737 if (id_ctlr_buf->nr_drvs > 16)
1738 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1739 "16 logical drives per controller.\n. "
1740 " Additional drives will not be "
1741 "detected\n", ctlr);
1742
1743 for (log_unit = 0;
1744 (log_index < id_ctlr_buf->nr_drvs)
1745 && (log_unit < NWD);
1746 log_unit++) {
1747 size = sizeof(sense_log_drv_stat_t);
1748
1749 /*
1750 Send "Identify logical drive status" cmd
1751 */
1752 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1753 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1754 if (ret_code == IO_ERROR) {
1755 /*
1756 If can't get logical drive status, set
1757 the logical drive map to 0, so the
1758 idastubopen will fail for all logical drives
1759 on the controller.
1760 */
1761 info_p->log_drv_map = 0;
1762 printk( KERN_WARNING
1763 "cpqarray ida%d: idaGetGeometry - Controller"
1764 " failed to report status of logical drive %d\n"
1765 "Access to this controller has been disabled\n",
1766 ctlr, log_unit);
1767 /* Free all the buffers and return */
1768 kfree(sense_config_buf);
1769 kfree(id_lstatus_buf);
1770 kfree(id_ctlr_buf);
1771 kfree(id_ldrive);
1772 return;
1773 }
1774 /*
1775 Make sure the logical drive is configured
1776 */
1777 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1778 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1779 sizeof(id_log_drv_t), 0, 0, log_unit);
1780 /*
1781 If error, the bit for this
1782 logical drive won't be set and
1783 idastubopen will return error.
1784 */
1785 if (ret_code != IO_ERROR) {
1786 drv = &info_p->drv[log_unit];
1787 drv->blk_size = id_ldrive->blk_size;
1788 drv->nr_blks = id_ldrive->nr_blks;
1789 drv->cylinders = id_ldrive->drv.cyl;
1790 drv->heads = id_ldrive->drv.heads;
1791 drv->sectors = id_ldrive->drv.sect_per_track;
1792 info_p->log_drv_map |= (1 << log_unit);
1793
1794 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1795 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1796 ret_code = sendcmd(SENSE_CONFIG,
1797 ctlr, sense_config_buf,
1798 sizeof(config_t), 0, 0, log_unit);
1799 if (ret_code == IO_ERROR) {
1800 info_p->log_drv_map = 0;
1801 /* Free all the buffers and return */
1802 printk(KERN_ERR "cpqarray: error sending sense config\n");
1803 kfree(sense_config_buf);
1804 kfree(id_lstatus_buf);
1805 kfree(id_ctlr_buf);
1806 kfree(id_ldrive);
1807 return;
1808
1809 }
1810
1811 info_p->phys_drives =
1812 sense_config_buf->ctlr_phys_drv;
1813 info_p->drv_assign_map
1814 |= sense_config_buf->drv_asgn_map;
1815 info_p->drv_assign_map
1816 |= sense_config_buf->spare_asgn_map;
1817 info_p->drv_spare_map
1818 |= sense_config_buf->spare_asgn_map;
1819 } /* end of if no error on id_ldrive */
1820 log_index = log_index + 1;
1821 } /* end of if logical drive configured */
1822 } /* end of for log_unit */
1823 kfree(sense_config_buf);
1824 kfree(id_ldrive);
1825 kfree(id_lstatus_buf);
1826 kfree(id_ctlr_buf);
1827 return;
1828
1829 }
1830
1831 static void __exit cpqarray_exit(void)
1832 {
1833 int i;
1834
1835 pci_unregister_driver(&cpqarray_pci_driver);
1836
1837 /* Double check that all controller entries have been removed */
1838 for(i=0; i<MAX_CTLR; i++) {
1839 if (hba[i] != NULL) {
1840 printk(KERN_WARNING "cpqarray: Removing EISA "
1841 "controller %d\n", i);
1842 cpqarray_remove_one_eisa(i);
1843 }
1844 }
1845
1846 remove_proc_entry("cpqarray", proc_root_driver);
1847 }
1848
1849 module_init(cpqarray_init)
1850 module_exit(cpqarray_exit)