]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/cpqarray.c
drivers: autoconvert trivial BKL users to private mutex
[mirror_ubuntu-bionic-kernel.git] / drivers / block / cpqarray.c
CommitLineData
1da177e4
LT
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
1da177e4
LT
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/bio.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/blkpg.h>
33#include <linux/timer.h>
34#include <linux/proc_fs.h>
ff2c3de3 35#include <linux/seq_file.h>
1da177e4
LT
36#include <linux/init.h>
37#include <linux/hdreg.h>
8a6cfeb6 38#include <linux/smp_lock.h>
1da177e4
LT
39#include <linux/spinlock.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
11763609 42#include <linux/scatterlist.h>
1da177e4
LT
43#include <asm/uaccess.h>
44#include <asm/io.h>
45
46
47#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
48
49#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
50#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
51
52/* Embedded module documentation macros - see modules.h */
53/* Original author Chris Frantz - Compaq Computer Corporation */
54MODULE_AUTHOR("Compaq Computer Corporation");
55MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
56MODULE_LICENSE("GPL");
57
58#include "cpqarray.h"
59#include "ida_cmd.h"
60#include "smart1,2.h"
61#include "ida_ioctl.h"
62
63#define READ_AHEAD 128
64#define NR_CMDS 128 /* This could probably go as high as ~400 */
65
66#define MAX_CTLR 8
67#define CTLR_SHIFT 8
68
69#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
70
71static int nr_ctlr;
72static ctlr_info_t *hba[MAX_CTLR];
73
74static int eisa[8];
75
945f390f 76#define NR_PRODUCTS ARRAY_SIZE(products)
1da177e4
LT
77
78/* board_id = Subsystem Device ID & Vendor ID
79 * product = Marketing Name for the board
945f390f 80 * access = Address of the struct of function pointers
1da177e4
LT
81 */
82static struct board_type products[] = {
83 { 0x0040110E, "IDA", &smart1_access },
84 { 0x0140110E, "IDA-2", &smart1_access },
85 { 0x1040110E, "IAES", &smart1_access },
86 { 0x2040110E, "SMART", &smart1_access },
87 { 0x3040110E, "SMART-2/E", &smart2e_access },
88 { 0x40300E11, "SMART-2/P", &smart2_access },
89 { 0x40310E11, "SMART-2SL", &smart2_access },
90 { 0x40320E11, "Smart Array 3200", &smart2_access },
91 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
92 { 0x40340E11, "Smart Array 221", &smart2_access },
93 { 0x40400E11, "Integrated Array", &smart4_access },
94 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
95 { 0x40500E11, "Smart Array 4200", &smart4_access },
96 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
97 { 0x40580E11, "Smart Array 431", &smart4_access },
98};
99
100/* define the PCI info for the PCI cards this driver can control */
101static const struct pci_device_id cpqarray_pci_device_id[] =
102{
103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
104 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
106 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
107 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
108 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
110 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
111 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
112 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
114 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
116 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
118 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
120 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
121 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
122 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
127
128static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
129
130/* Debug... */
131#define DBG(s) do { s } while(0)
132/* Debug (general info)... */
133#define DBGINFO(s) do { } while(0)
134/* Debug Paranoid... */
135#define DBGP(s) do { } while(0)
136/* Debug Extra Paranoid... */
137#define DBGPX(s) do { } while(0)
138
139static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
140static void __iomem *remap_pci_mem(ulong base, ulong size);
141static int cpqarray_eisa_detect(void);
142static int pollcomplete(int ctlr);
143static void getgeometry(int ctlr);
144static void start_fwbk(int ctlr);
145
146static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
147static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
148
149static void free_hba(int i);
150static int alloc_cpqarray_hba(void);
151
152static int sendcmd(
153 __u8 cmd,
154 int ctlr,
155 void *buff,
156 size_t size,
157 unsigned int blk,
158 unsigned int blkcnt,
159 unsigned int log_unit );
160
6e9624b8 161static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
47844fad
AV
162static int ida_release(struct gendisk *disk, fmode_t mode);
163static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
a885c8c4 164static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1da177e4
LT
165static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
166
165125e1 167static void do_ida_request(struct request_queue *q);
1da177e4
LT
168static void start_io(ctlr_info_t *h);
169
170static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
171static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
1da177e4
LT
172static inline void complete_command(cmdlist_t *cmd, int timeout);
173
7d12e780 174static irqreturn_t do_ida_intr(int irq, void *dev_id);
1da177e4
LT
175static void ida_timer(unsigned long tdata);
176static int ida_revalidate(struct gendisk *disk);
177static int revalidate_allvol(ctlr_info_t *host);
178static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
179
180#ifdef CONFIG_PROC_FS
181static void ida_procinit(int i);
1da177e4
LT
182#else
183static void ida_procinit(int i) {}
184#endif
185
186static inline drv_info_t *get_drv(struct gendisk *disk)
187{
188 return disk->private_data;
189}
190
191static inline ctlr_info_t *get_host(struct gendisk *disk)
192{
193 return disk->queue->queuedata;
194}
195
196
83d5cde4 197static const struct block_device_operations ida_fops = {
1da177e4 198 .owner = THIS_MODULE,
6e9624b8 199 .open = ida_unlocked_open,
47844fad 200 .release = ida_release,
8a6cfeb6 201 .ioctl = ida_ioctl,
a885c8c4 202 .getgeo = ida_getgeo,
1da177e4
LT
203 .revalidate_disk= ida_revalidate,
204};
205
206
207#ifdef CONFIG_PROC_FS
208
209static struct proc_dir_entry *proc_array;
ff2c3de3 210static const struct file_operations ida_proc_fops;
1da177e4
LT
211
212/*
213 * Get us a file in /proc/array that says something about each controller.
214 * Create /proc/array if it doesn't exist yet.
215 */
216static void __init ida_procinit(int i)
217{
218 if (proc_array == NULL) {
928b4d8c 219 proc_array = proc_mkdir("driver/cpqarray", NULL);
1da177e4
LT
220 if (!proc_array) return;
221 }
222
ff2c3de3 223 proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
1da177e4
LT
224}
225
226/*
227 * Report information about this controller.
228 */
ff2c3de3 229static int ida_proc_show(struct seq_file *m, void *v)
1da177e4 230{
ff2c3de3
AD
231 int i, ctlr;
232 ctlr_info_t *h = (ctlr_info_t*)m->private;
1da177e4
LT
233 drv_info_t *drv;
234#ifdef CPQ_PROC_PRINT_QUEUES
235 cmdlist_t *c;
236 unsigned long flags;
237#endif
238
239 ctlr = h->ctlr;
ff2c3de3 240 seq_printf(m, "%s: Compaq %s Controller\n"
1da177e4
LT
241 " Board ID: 0x%08lx\n"
242 " Firmware Revision: %c%c%c%c\n"
243 " Controller Sig: 0x%08lx\n"
244 " Memory Address: 0x%08lx\n"
245 " I/O Port: 0x%04x\n"
246 " IRQ: %d\n"
247 " Logical drives: %d\n"
248 " Physical drives: %d\n\n"
249 " Current Q depth: %d\n"
250 " Max Q depth since init: %d\n\n",
251 h->devname,
252 h->product_name,
253 (unsigned long)h->board_id,
254 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
255 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
256 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
257 h->log_drives, h->phys_drives,
258 h->Qdepth, h->maxQsinceinit);
259
ff2c3de3 260 seq_puts(m, "Logical Drive Info:\n");
1da177e4
LT
261
262 for(i=0; i<h->log_drives; i++) {
263 drv = &h->drv[i];
ff2c3de3 264 seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
1da177e4 265 ctlr, i, drv->blk_size, drv->nr_blks);
1da177e4
LT
266 }
267
268#ifdef CPQ_PROC_PRINT_QUEUES
269 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
ff2c3de3 270 seq_puts(m, "\nCurrent Queues:\n");
1da177e4
LT
271
272 c = h->reqQ;
ff2c3de3 273 seq_printf(m, "reqQ = %p", c);
1da177e4
LT
274 if (c) c=c->next;
275 while(c && c != h->reqQ) {
ff2c3de3 276 seq_printf(m, "->%p", c);
1da177e4
LT
277 c=c->next;
278 }
279
280 c = h->cmpQ;
ff2c3de3 281 seq_printf(m, "\ncmpQ = %p", c);
1da177e4
LT
282 if (c) c=c->next;
283 while(c && c != h->cmpQ) {
ff2c3de3 284 seq_printf(m, "->%p", c);
1da177e4
LT
285 c=c->next;
286 }
287
ff2c3de3 288 seq_putc(m, '\n');
1da177e4
LT
289 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
290#endif
ff2c3de3 291 seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
1da177e4 292 h->nr_allocs, h->nr_frees);
ff2c3de3 293 return 0;
1da177e4 294}
ff2c3de3
AD
295
296static int ida_proc_open(struct inode *inode, struct file *file)
297{
298 return single_open(file, ida_proc_show, PDE(inode)->data);
299}
300
301static const struct file_operations ida_proc_fops = {
302 .owner = THIS_MODULE,
303 .open = ida_proc_open,
304 .read = seq_read,
305 .llseek = seq_lseek,
306 .release = single_release,
307};
1da177e4
LT
308#endif /* CONFIG_PROC_FS */
309
310module_param_array(eisa, int, NULL, 0);
311
312static void release_io_mem(ctlr_info_t *c)
313{
314 /* if IO mem was not protected do nothing */
315 if( c->io_mem_addr == 0)
316 return;
317 release_region(c->io_mem_addr, c->io_mem_length);
318 c->io_mem_addr = 0;
319 c->io_mem_length = 0;
320}
321
322static void __devexit cpqarray_remove_one(int i)
323{
324 int j;
325 char buff[4];
326
327 /* sendcmd will turn off interrupt, and send the flush...
328 * To write all data in the battery backed cache to disks
329 * no data returned, but don't want to send NULL to sendcmd */
330 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
331 {
332 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
333 i);
334 }
335 free_irq(hba[i]->intr, hba[i]);
336 iounmap(hba[i]->vaddr);
337 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
338 del_timer(&hba[i]->timer);
339 remove_proc_entry(hba[i]->devname, proc_array);
340 pci_free_consistent(hba[i]->pci_dev,
341 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
342 hba[i]->cmd_pool_dhandle);
343 kfree(hba[i]->cmd_pool_bits);
344 for(j = 0; j < NWD; j++) {
345 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
346 del_gendisk(ida_gendisk[i][j]);
1da177e4
LT
347 put_disk(ida_gendisk[i][j]);
348 }
349 blk_cleanup_queue(hba[i]->queue);
350 release_io_mem(hba[i]);
351 free_hba(i);
352}
353
354static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
355{
356 int i;
357 ctlr_info_t *tmp_ptr;
358
359 if (pci_get_drvdata(pdev) == NULL) {
360 printk( KERN_ERR "cpqarray: Unable to remove device \n");
361 return;
362 }
363
364 tmp_ptr = pci_get_drvdata(pdev);
365 i = tmp_ptr->ctlr;
366 if (hba[i] == NULL) {
367 printk(KERN_ERR "cpqarray: controller %d appears to have"
368 "already been removed \n", i);
369 return;
370 }
371 pci_set_drvdata(pdev, NULL);
372
373 cpqarray_remove_one(i);
374}
375
376/* removing an instance that was not removed automatically..
377 * must be an eisa card.
378 */
379static void __devexit cpqarray_remove_one_eisa (int i)
380{
381 if (hba[i] == NULL) {
382 printk(KERN_ERR "cpqarray: controller %d appears to have"
383 "already been removed \n", i);
384 return;
385 }
386 cpqarray_remove_one(i);
387}
388
389/* pdev is NULL for eisa */
552618d1 390static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
1da177e4 391{
165125e1 392 struct request_queue *q;
1da177e4
LT
393 int j;
394
395 /*
396 * register block devices
397 * Find disks and fill in structs
398 * Get an interrupt, set the Q depth and get into /proc
399 */
400
401 /* If this successful it should insure that we are the only */
402 /* instance of the driver */
403 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
404 goto Enomem4;
405 }
406 hba[i]->access.set_intr_mask(hba[i], 0);
407 if (request_irq(hba[i]->intr, do_ida_intr,
69ab3912 408 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
1da177e4
LT
409 {
410 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
411 hba[i]->intr, hba[i]->devname);
412 goto Enomem3;
413 }
414
415 for (j=0; j<NWD; j++) {
416 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
417 if (!ida_gendisk[i][j])
418 goto Enomem2;
419 }
420
2e4934aa 421 hba[i]->cmd_pool = pci_alloc_consistent(
1da177e4
LT
422 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
423 &(hba[i]->cmd_pool_dhandle));
2e4934aa 424 hba[i]->cmd_pool_bits = kcalloc(
061837bc 425 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
1da177e4
LT
426 GFP_KERNEL);
427
428 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
429 goto Enomem1;
430
431 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
1da177e4
LT
432 printk(KERN_INFO "cpqarray: Finding drives on %s",
433 hba[i]->devname);
434
435 spin_lock_init(&hba[i]->lock);
436 q = blk_init_queue(do_ida_request, &hba[i]->lock);
437 if (!q)
438 goto Enomem1;
439
440 hba[i]->queue = q;
441 q->queuedata = hba[i];
442
443 getgeometry(i);
444 start_fwbk(i);
445
446 ida_procinit(i);
447
448 if (pdev)
449 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
450
451 /* This is a hardware imposed limit. */
8a78362c 452 blk_queue_max_segments(q, SG_MAX);
1da177e4 453
1da177e4
LT
454 init_timer(&hba[i]->timer);
455 hba[i]->timer.expires = jiffies + IDA_TIMER;
456 hba[i]->timer.data = (unsigned long)hba[i];
457 hba[i]->timer.function = ida_timer;
458 add_timer(&hba[i]->timer);
459
460 /* Enable IRQ now that spinlock and rate limit timer are set up */
461 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
462
463 for(j=0; j<NWD; j++) {
464 struct gendisk *disk = ida_gendisk[i][j];
465 drv_info_t *drv = &hba[i]->drv[j];
466 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
467 disk->major = COMPAQ_SMART2_MAJOR + i;
468 disk->first_minor = j<<NWD_SHIFT;
469 disk->fops = &ida_fops;
470 if (j && !drv->nr_blks)
471 continue;
e1defc4f 472 blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
1da177e4
LT
473 set_capacity(disk, drv->nr_blks);
474 disk->queue = hba[i]->queue;
475 disk->private_data = drv;
476 add_disk(disk);
477 }
478
479 /* done ! */
480 return(i);
481
482Enomem1:
483 nr_ctlr = i;
484 kfree(hba[i]->cmd_pool_bits);
485 if (hba[i]->cmd_pool)
486 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
487 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
488Enomem2:
489 while (j--) {
490 put_disk(ida_gendisk[i][j]);
491 ida_gendisk[i][j] = NULL;
492 }
493 free_irq(hba[i]->intr, hba[i]);
494Enomem3:
495 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
496Enomem4:
497 if (pdev)
498 pci_set_drvdata(pdev, NULL);
499 release_io_mem(hba[i]);
500 free_hba(i);
501
502 printk( KERN_ERR "cpqarray: out of memory");
503
504 return -1;
505}
506
d4a3895f 507static int __devinit cpqarray_init_one( struct pci_dev *pdev,
1da177e4
LT
508 const struct pci_device_id *ent)
509{
510 int i;
511
512 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
513 " bus %d dev %d func %d\n",
514 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
515 PCI_FUNC(pdev->devfn));
516 i = alloc_cpqarray_hba();
517 if( i < 0 )
518 return (-1);
519 memset(hba[i], 0, sizeof(ctlr_info_t));
520 sprintf(hba[i]->devname, "ida%d", i);
521 hba[i]->ctlr = i;
522 /* Initialize the pdev driver private data */
523 pci_set_drvdata(pdev, hba[i]);
524
525 if (cpqarray_pci_init(hba[i], pdev) != 0) {
526 pci_set_drvdata(pdev, NULL);
527 release_io_mem(hba[i]);
528 free_hba(i);
529 return -1;
530 }
531
532 return (cpqarray_register_ctlr(i, pdev));
533}
534
535static struct pci_driver cpqarray_pci_driver = {
536 .name = "cpqarray",
537 .probe = cpqarray_init_one,
538 .remove = __devexit_p(cpqarray_remove_one_pci),
539 .id_table = cpqarray_pci_device_id,
540};
541
542/*
543 * This is it. Find all the controllers and register them.
544 * returns the number of block devices registered.
545 */
546static int __init cpqarray_init(void)
547{
548 int num_cntlrs_reg = 0;
549 int i;
550 int rc = 0;
551
552 /* detect controllers */
553 printk(DRIVER_NAME "\n");
554
555 rc = pci_register_driver(&cpqarray_pci_driver);
556 if (rc)
557 return rc;
558 cpqarray_eisa_detect();
559
560 for (i=0; i < MAX_CTLR; i++) {
561 if (hba[i] != NULL)
562 num_cntlrs_reg++;
563 }
564
2197d18d
AB
565 if (num_cntlrs_reg)
566 return 0;
567 else {
568 pci_unregister_driver(&cpqarray_pci_driver);
569 return -ENODEV;
570 }
1da177e4
LT
571}
572
573/* Function to find the first free pointer into our hba[] array */
574/* Returns -1 if no free entries are left. */
575static int alloc_cpqarray_hba(void)
576{
577 int i;
578
579 for(i=0; i< MAX_CTLR; i++) {
580 if (hba[i] == NULL) {
581 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
582 if(hba[i]==NULL) {
583 printk(KERN_ERR "cpqarray: out of memory.\n");
584 return (-1);
585 }
586 return (i);
587 }
588 }
589 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
590 " of 8 controllers.\n");
591 return(-1);
592}
593
594static void free_hba(int i)
595{
596 kfree(hba[i]);
597 hba[i]=NULL;
598}
599
600/*
601 * Find the IO address of the controller, its IRQ and so forth. Fill
602 * in some basic stuff into the ctlr_info_t structure.
603 */
604static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
605{
606 ushort vendor_id, device_id, command;
607 unchar cache_line_size, latency_timer;
608 unchar irq, revision;
609 unsigned long addr[6];
610 __u32 board_id;
611
612 int i;
613
614 c->pci_dev = pdev;
0061d386 615 pci_set_master(pdev);
1da177e4
LT
616 if (pci_enable_device(pdev)) {
617 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
618 return -1;
619 }
620 vendor_id = pdev->vendor;
621 device_id = pdev->device;
622 irq = pdev->irq;
623
624 for(i=0; i<6; i++)
625 addr[i] = pci_resource_start(pdev, i);
626
627 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
628 {
629 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
630 return -1;
631 }
632
633 pci_read_config_word(pdev, PCI_COMMAND, &command);
634 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
635 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
636 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
637
638 pci_read_config_dword(pdev, 0x2c, &board_id);
639
640 /* check to see if controller has been disabled */
641 if(!(command & 0x02)) {
642 printk(KERN_WARNING
643 "cpqarray: controller appears to be disabled\n");
644 return(-1);
645 }
646
647DBGINFO(
648 printk("vendor_id = %x\n", vendor_id);
649 printk("device_id = %x\n", device_id);
650 printk("command = %x\n", command);
651 for(i=0; i<6; i++)
652 printk("addr[%d] = %lx\n", i, addr[i]);
653 printk("revision = %x\n", revision);
654 printk("irq = %x\n", irq);
655 printk("cache_line_size = %x\n", cache_line_size);
656 printk("latency_timer = %x\n", latency_timer);
657 printk("board_id = %x\n", board_id);
658);
659
660 c->intr = irq;
661
662 for(i=0; i<6; i++) {
663 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
664 { /* IO space */
665 c->io_mem_addr = addr[i];
666 c->io_mem_length = pci_resource_end(pdev, i)
667 - pci_resource_start(pdev, i) + 1;
668 if(!request_region( c->io_mem_addr, c->io_mem_length,
669 "cpqarray"))
670 {
671 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
672 c->io_mem_addr = 0;
673 c->io_mem_length = 0;
674 }
675 break;
676 }
677 }
678
679 c->paddr = 0;
680 for(i=0; i<6; i++)
681 if (!(pci_resource_flags(pdev, i) &
682 PCI_BASE_ADDRESS_SPACE_IO)) {
683 c->paddr = pci_resource_start (pdev, i);
684 break;
685 }
686 if (!c->paddr)
687 return -1;
688 c->vaddr = remap_pci_mem(c->paddr, 128);
689 if (!c->vaddr)
690 return -1;
691 c->board_id = board_id;
692
693 for(i=0; i<NR_PRODUCTS; i++) {
694 if (board_id == products[i].board_id) {
695 c->product_name = products[i].product_name;
696 c->access = *(products[i].access);
697 break;
698 }
699 }
700 if (i == NR_PRODUCTS) {
701 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
702 " to access the SMART Array controller %08lx\n",
703 (unsigned long)board_id);
704 return -1;
705 }
706
707 return 0;
708}
709
710/*
711 * Map (physical) PCI mem into (virtual) kernel space
712 */
713static void __iomem *remap_pci_mem(ulong base, ulong size)
714{
715 ulong page_base = ((ulong) base) & PAGE_MASK;
716 ulong page_offs = ((ulong) base) - page_base;
717 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
718
719 return (page_remapped ? (page_remapped + page_offs) : NULL);
720}
721
722#ifndef MODULE
723/*
724 * Config string is a comma separated set of i/o addresses of EISA cards.
725 */
726static int cpqarray_setup(char *str)
727{
728 int i, ints[9];
729
730 (void)get_options(str, ARRAY_SIZE(ints), ints);
731
732 for(i=0; i<ints[0] && i<8; i++)
733 eisa[i] = ints[i+1];
734 return 1;
735}
736
737__setup("smart2=", cpqarray_setup);
738
739#endif
740
741/*
742 * Find an EISA controller's signature. Set up an hba if we find it.
743 */
552618d1 744static int __devinit cpqarray_eisa_detect(void)
1da177e4
LT
745{
746 int i=0, j;
747 __u32 board_id;
748 int intr;
749 int ctlr;
750 int num_ctlr = 0;
751
752 while(i<8 && eisa[i]) {
753 ctlr = alloc_cpqarray_hba();
754 if(ctlr == -1)
755 break;
756 board_id = inl(eisa[i]+0xC80);
757 for(j=0; j < NR_PRODUCTS; j++)
758 if (board_id == products[j].board_id)
759 break;
760
761 if (j == NR_PRODUCTS) {
762 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
763 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
764 continue;
765 }
766
767 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
768 hba[ctlr]->io_mem_addr = eisa[i];
769 hba[ctlr]->io_mem_length = 0x7FF;
770 if(!request_region(hba[ctlr]->io_mem_addr,
771 hba[ctlr]->io_mem_length,
772 "cpqarray"))
773 {
774 printk(KERN_WARNING "cpqarray: I/O range already in "
775 "use addr = %lx length = %ld\n",
776 hba[ctlr]->io_mem_addr,
777 hba[ctlr]->io_mem_length);
778 free_hba(ctlr);
779 continue;
780 }
781
782 /*
783 * Read the config register to find our interrupt
784 */
785 intr = inb(eisa[i]+0xCC0) >> 4;
786 if (intr & 1) intr = 11;
787 else if (intr & 2) intr = 10;
788 else if (intr & 4) intr = 14;
789 else if (intr & 8) intr = 15;
790
791 hba[ctlr]->intr = intr;
792 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
793 hba[ctlr]->product_name = products[j].product_name;
794 hba[ctlr]->access = *(products[j].access);
795 hba[ctlr]->ctlr = ctlr;
796 hba[ctlr]->board_id = board_id;
797 hba[ctlr]->pci_dev = NULL; /* not PCI */
798
799DBGINFO(
800 printk("i = %d, j = %d\n", i, j);
801 printk("irq = %x\n", intr);
802 printk("product name = %s\n", products[j].product_name);
803 printk("board_id = %x\n", board_id);
804);
805
806 num_ctlr++;
807 i++;
808
809 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
810 printk(KERN_WARNING
811 "cpqarray: Can't register EISA controller %d\n",
812 ctlr);
813
814 }
815
816 return num_ctlr;
817}
818
819/*
820 * Open. Make sure the device is really there.
821 */
47844fad 822static int ida_open(struct block_device *bdev, fmode_t mode)
1da177e4 823{
47844fad
AV
824 drv_info_t *drv = get_drv(bdev->bd_disk);
825 ctlr_info_t *host = get_host(bdev->bd_disk);
1da177e4 826
47844fad 827 DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
1da177e4
LT
828 /*
829 * Root is allowed to open raw volume zero even if it's not configured
830 * so array config can still work. I don't think I really like this,
831 * but I'm already using way to many device nodes to claim another one
832 * for "raw controller".
833 */
834 if (!drv->nr_blks) {
835 if (!capable(CAP_SYS_RAWIO))
836 return -ENXIO;
837 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
838 return -ENXIO;
839 }
840 host->usage_count++;
841 return 0;
842}
843
6e9624b8
AB
844static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
845{
846 int ret;
847
848 lock_kernel();
849 ret = ida_open(bdev, mode);
850 unlock_kernel();
851
852 return ret;
853}
854
1da177e4
LT
855/*
856 * Close. Sync first.
857 */
47844fad 858static int ida_release(struct gendisk *disk, fmode_t mode)
1da177e4 859{
6e9624b8
AB
860 ctlr_info_t *host;
861
862 lock_kernel();
863 host = get_host(disk);
1da177e4 864 host->usage_count--;
6e9624b8
AB
865 unlock_kernel();
866
1da177e4
LT
867 return 0;
868}
869
870/*
871 * Enqueuing and dequeuing functions for cmdlists.
872 */
873static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
874{
875 if (*Qptr == NULL) {
876 *Qptr = c;
877 c->next = c->prev = c;
878 } else {
879 c->prev = (*Qptr)->prev;
880 c->next = (*Qptr);
881 (*Qptr)->prev->next = c;
882 (*Qptr)->prev = c;
883 }
884}
885
886static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
887{
888 if (c && c->next != c) {
889 if (*Qptr == c) *Qptr = c->next;
890 c->prev->next = c->next;
891 c->next->prev = c->prev;
892 } else {
893 *Qptr = NULL;
894 }
895 return c;
896}
897
898/*
899 * Get a request and submit it to the controller.
900 * This routine needs to grab all the requests it possibly can from the
901 * req Q and submit them. Interrupts are off (and need to be off) when you
902 * are in here (either via the dummy do_ida_request functions or by being
903 * called from the interrupt handler
904 */
165125e1 905static void do_ida_request(struct request_queue *q)
1da177e4
LT
906{
907 ctlr_info_t *h = q->queuedata;
908 cmdlist_t *c;
909 struct request *creq;
910 struct scatterlist tmp_sg[SG_MAX];
911 int i, dir, seg;
912
913 if (blk_queue_plugged(q))
914 goto startio;
915
916queue_next:
9934c8c0 917 creq = blk_peek_request(q);
1da177e4
LT
918 if (!creq)
919 goto startio;
920
089fe1b2 921 BUG_ON(creq->nr_phys_segments > SG_MAX);
1da177e4
LT
922
923 if ((c = cmd_alloc(h,1)) == NULL)
924 goto startio;
925
9934c8c0 926 blk_start_request(creq);
1da177e4
LT
927
928 c->ctlr = h->ctlr;
929 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
930 c->hdr.size = sizeof(rblk_t) >> 2;
931 c->size += sizeof(rblk_t);
932
83096ebf 933 c->req.hdr.blk = blk_rq_pos(creq);
1da177e4
LT
934 c->rq = creq;
935DBGPX(
83096ebf
TH
936 printk("sector=%d, nr_sectors=%u\n",
937 blk_rq_pos(creq), blk_rq_sectors(creq));
1da177e4 938);
45711f1a 939 sg_init_table(tmp_sg, SG_MAX);
1da177e4
LT
940 seg = blk_rq_map_sg(q, creq, tmp_sg);
941
942 /* Now do all the DMA Mappings */
943 if (rq_data_dir(creq) == READ)
944 dir = PCI_DMA_FROMDEVICE;
945 else
946 dir = PCI_DMA_TODEVICE;
947 for( i=0; i < seg; i++)
948 {
949 c->req.sg[i].size = tmp_sg[i].length;
950 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
45711f1a 951 sg_page(&tmp_sg[i]),
1da177e4
LT
952 tmp_sg[i].offset,
953 tmp_sg[i].length, dir);
954 }
83096ebf 955DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
1da177e4 956 c->req.hdr.sg_cnt = seg;
83096ebf 957 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
1da177e4
LT
958 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
959 c->type = CMD_RWREQ;
960
961 /* Put the request on the tail of the request queue */
962 addQ(&h->reqQ, c);
963 h->Qdepth++;
964 if (h->Qdepth > h->maxQsinceinit)
965 h->maxQsinceinit = h->Qdepth;
966
967 goto queue_next;
968
969startio:
970 start_io(h);
971}
972
973/*
974 * start_io submits everything on a controller's request queue
975 * and moves it to the completion queue.
976 *
977 * Interrupts had better be off if you're in here
978 */
979static void start_io(ctlr_info_t *h)
980{
981 cmdlist_t *c;
982
983 while((c = h->reqQ) != NULL) {
984 /* Can't do anything if we're busy */
985 if (h->access.fifo_full(h) == 0)
986 return;
987
988 /* Get the first entry from the request Q */
989 removeQ(&h->reqQ, c);
990 h->Qdepth--;
991
992 /* Tell the controller to do our bidding */
993 h->access.submit_command(h, c);
994
995 /* Get onto the completion Q */
996 addQ(&h->cmpQ, c);
997 }
998}
999
1da177e4
LT
1000/*
1001 * Mark all buffers that cmd was responsible for
1002 */
1003static inline void complete_command(cmdlist_t *cmd, int timeout)
1004{
1f794b60 1005 struct request *rq = cmd->rq;
ea6f06f4 1006 int error = 0;
1da177e4
LT
1007 int i, ddir;
1008
1009 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1010 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1011 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1012 cmd->ctlr, cmd->hdr.unit);
1013 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1014 }
1015 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1016 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1017 cmd->ctlr, cmd->hdr.unit);
ea6f06f4 1018 error = -EIO;
1da177e4
LT
1019 }
1020 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1021 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1022 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1023 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1024 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
ea6f06f4 1025 error = -EIO;
1da177e4 1026 }
ea6f06f4
KU
1027 if (timeout)
1028 error = -EIO;
1da177e4
LT
1029 /* unmap the DMA mapping for all the scatter gather elements */
1030 if (cmd->req.hdr.cmd == IDA_READ)
1031 ddir = PCI_DMA_FROMDEVICE;
1032 else
1033 ddir = PCI_DMA_TODEVICE;
1034 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1035 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1036 cmd->req.sg[i].size, ddir);
1037
1f794b60 1038 DBGPX(printk("Done with %p\n", rq););
40cbbb78 1039 __blk_end_request_all(rq, error);
1da177e4
LT
1040}
1041
1042/*
1043 * The controller will interrupt us upon completion of commands.
1044 * Find the command on the completion queue, remove it, tell the OS and
1045 * try to queue up more IO
1046 */
7d12e780 1047static irqreturn_t do_ida_intr(int irq, void *dev_id)
1da177e4
LT
1048{
1049 ctlr_info_t *h = dev_id;
1050 cmdlist_t *c;
1051 unsigned long istat;
1052 unsigned long flags;
1053 __u32 a,a1;
1054
1055 istat = h->access.intr_pending(h);
1056 /* Is this interrupt for us? */
1057 if (istat == 0)
1058 return IRQ_NONE;
1059
1060 /*
1061 * If there are completed commands in the completion queue,
1062 * we had better do something about it.
1063 */
1064 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1065 if (istat & FIFO_NOT_EMPTY) {
1066 while((a = h->access.command_completed(h))) {
1067 a1 = a; a &= ~3;
1068 if ((c = h->cmpQ) == NULL)
1069 {
1070 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1071 continue;
1072 }
1073 while(c->busaddr != a) {
1074 c = c->next;
1075 if (c == h->cmpQ)
1076 break;
1077 }
1078 /*
1079 * If we've found the command, take it off the
1080 * completion Q and free it
1081 */
1082 if (c->busaddr == a) {
1083 removeQ(&h->cmpQ, c);
1084 /* Check for invalid command.
1085 * Controller returns command error,
1086 * But rcode = 0.
1087 */
1088
1089 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1090 {
1091 c->req.hdr.rcode = RCODE_INVREQ;
1092 }
1093 if (c->type == CMD_RWREQ) {
1094 complete_command(c, 0);
1095 cmd_free(h, c, 1);
1096 } else if (c->type == CMD_IOCTL_PEND) {
1097 c->type = CMD_IOCTL_DONE;
1098 }
1099 continue;
1100 }
1101 }
1102 }
1103
1104 /*
1105 * See if we can queue up some more IO
1106 */
1107 do_ida_request(h->queue);
1108 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1109 return IRQ_HANDLED;
1110}
1111
1112/*
1113 * This timer was for timing out requests that haven't happened after
1114 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1115 * reset a flags structure so we don't flood the user with
1116 * "Non-Fatal error" messages.
1117 */
1118static void ida_timer(unsigned long tdata)
1119{
1120 ctlr_info_t *h = (ctlr_info_t*)tdata;
1121
1122 h->timer.expires = jiffies + IDA_TIMER;
1123 add_timer(&h->timer);
1124 h->misc_tflags = 0;
1125}
1126
a885c8c4
CH
1127static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1128{
1129 drv_info_t *drv = get_drv(bdev->bd_disk);
1130
1131 if (drv->cylinders) {
1132 geo->heads = drv->heads;
1133 geo->sectors = drv->sectors;
1134 geo->cylinders = drv->cylinders;
1135 } else {
1136 geo->heads = 0xff;
1137 geo->sectors = 0x3f;
1138 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1139 }
1140
1141 return 0;
1142}
1143
1da177e4
LT
1144/*
1145 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1146 * setting readahead and submitting commands from userspace to the controller.
1147 */
8a6cfeb6 1148static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1da177e4 1149{
47844fad
AV
1150 drv_info_t *drv = get_drv(bdev->bd_disk);
1151 ctlr_info_t *host = get_host(bdev->bd_disk);
1da177e4 1152 int error;
1da177e4
LT
1153 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1154 ida_ioctl_t *my_io;
1155
1156 switch(cmd) {
1da177e4
LT
1157 case IDAGETDRVINFO:
1158 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1159 return -EFAULT;
1160 return 0;
1161 case IDAPASSTHRU:
1162 if (!capable(CAP_SYS_RAWIO))
1163 return -EPERM;
1164 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1165 if (!my_io)
1166 return -ENOMEM;
1167 error = -EFAULT;
1168 if (copy_from_user(my_io, io, sizeof(*my_io)))
1169 goto out_passthru;
1170 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1171 if (error)
1172 goto out_passthru;
1173 error = -EFAULT;
1174 if (copy_to_user(io, my_io, sizeof(*my_io)))
1175 goto out_passthru;
1176 error = 0;
1177out_passthru:
1178 kfree(my_io);
1179 return error;
1180 case IDAGETCTLRSIG:
1181 if (!arg) return -EINVAL;
f6c4c8e1
KV
1182 if (put_user(host->ctlr_sig, (int __user *)arg))
1183 return -EFAULT;
1da177e4
LT
1184 return 0;
1185 case IDAREVALIDATEVOLS:
47844fad 1186 if (MINOR(bdev->bd_dev) != 0)
1da177e4
LT
1187 return -ENXIO;
1188 return revalidate_allvol(host);
1189 case IDADRIVERVERSION:
1190 if (!arg) return -EINVAL;
f6c4c8e1
KV
1191 if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
1192 return -EFAULT;
1da177e4
LT
1193 return 0;
1194 case IDAGETPCIINFO:
1195 {
1196
1197 ida_pci_info_struct pciinfo;
1198
1199 if (!arg) return -EINVAL;
1200 pciinfo.bus = host->pci_dev->bus->number;
1201 pciinfo.dev_fn = host->pci_dev->devfn;
1202 pciinfo.board_id = host->board_id;
1203 if(copy_to_user((void __user *) arg, &pciinfo,
1204 sizeof( ida_pci_info_struct)))
1205 return -EFAULT;
1206 return(0);
1207 }
1208
1209 default:
1210 return -EINVAL;
1211 }
1212
1213}
8a6cfeb6
AB
1214
1215static int ida_ioctl(struct block_device *bdev, fmode_t mode,
1216 unsigned int cmd, unsigned long param)
1217{
1218 int ret;
1219
1220 lock_kernel();
1221 ret = ida_locked_ioctl(bdev, mode, cmd, param);
1222 unlock_kernel();
1223
1224 return ret;
1225}
1226
1da177e4
LT
1227/*
1228 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1229 * The command block (io) has already been copied to kernel space for us,
1230 * however, any elements in the sglist need to be copied to kernel space
1231 * or copied back to userspace.
1232 *
1233 * Only root may perform a controller passthru command, however I'm not doing
1234 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1235 * putting a 64M buffer in the sglist is probably a *bad* idea.
1236 */
1237static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1238{
1239 int ctlr = h->ctlr;
1240 cmdlist_t *c;
1241 void *p = NULL;
1242 unsigned long flags;
1243 int error;
1244
1245 if ((c = cmd_alloc(h, 0)) == NULL)
1246 return -ENOMEM;
1247 c->ctlr = ctlr;
1248 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1249 c->hdr.size = sizeof(rblk_t) >> 2;
1250 c->size += sizeof(rblk_t);
1251
1252 c->req.hdr.cmd = io->cmd;
1253 c->req.hdr.blk = io->blk;
1254 c->req.hdr.blk_cnt = io->blk_cnt;
1255 c->type = CMD_IOCTL_PEND;
1256
1257 /* Pre submit processing */
1258 switch(io->cmd) {
1259 case PASSTHRU_A:
ad96a7a7
JL
1260 p = memdup_user(io->sg[0].addr, io->sg[0].size);
1261 if (IS_ERR(p)) {
1262 error = PTR_ERR(p);
1263 cmd_free(h, c, 0);
1264 return error;
1da177e4
LT
1265 }
1266 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1267 sizeof(ida_ioctl_t),
1268 PCI_DMA_BIDIRECTIONAL);
1269 c->req.sg[0].size = io->sg[0].size;
1270 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1271 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1272 c->req.hdr.sg_cnt = 1;
1273 break;
1274 case IDA_READ:
1275 case READ_FLASH_ROM:
1276 case SENSE_CONTROLLER_PERFORMANCE:
1277 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1278 if (!p)
1279 {
1280 error = -ENOMEM;
1281 cmd_free(h, c, 0);
1282 return(error);
1283 }
1284
1285 c->req.sg[0].size = io->sg[0].size;
1286 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1287 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1288 c->req.hdr.sg_cnt = 1;
1289 break;
1290 case IDA_WRITE:
1291 case IDA_WRITE_MEDIA:
1292 case DIAG_PASS_THRU:
1293 case COLLECT_BUFFER:
1294 case WRITE_FLASH_ROM:
ad96a7a7
JL
1295 p = memdup_user(io->sg[0].addr, io->sg[0].size);
1296 if (IS_ERR(p)) {
1297 error = PTR_ERR(p);
1298 cmd_free(h, c, 0);
1299 return error;
1da177e4 1300 }
1da177e4
LT
1301 c->req.sg[0].size = io->sg[0].size;
1302 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1303 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1304 c->req.hdr.sg_cnt = 1;
1305 break;
1306 default:
1307 c->req.sg[0].size = sizeof(io->c);
1308 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1309 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1310 c->req.hdr.sg_cnt = 1;
1311 }
1312
1313 /* Put the request on the tail of the request queue */
1314 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1315 addQ(&h->reqQ, c);
1316 h->Qdepth++;
1317 start_io(h);
1318 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1319
1320 /* Wait for completion */
1321 while(c->type != CMD_IOCTL_DONE)
1322 schedule();
1323
1324 /* Unmap the DMA */
1325 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1326 PCI_DMA_BIDIRECTIONAL);
1327 /* Post submit processing */
1328 switch(io->cmd) {
1329 case PASSTHRU_A:
1330 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1331 sizeof(ida_ioctl_t),
1332 PCI_DMA_BIDIRECTIONAL);
1333 case IDA_READ:
1334 case DIAG_PASS_THRU:
1335 case SENSE_CONTROLLER_PERFORMANCE:
1336 case READ_FLASH_ROM:
1337 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1338 kfree(p);
1339 return -EFAULT;
1340 }
1341 /* fall through and free p */
1342 case IDA_WRITE:
1343 case IDA_WRITE_MEDIA:
1344 case COLLECT_BUFFER:
1345 case WRITE_FLASH_ROM:
1346 kfree(p);
1347 break;
1348 default:;
1349 /* Nothing to do */
1350 }
1351
1352 io->rcode = c->req.hdr.rcode;
1353 cmd_free(h, c, 0);
1354 return(0);
1355}
1356
1357/*
1358 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1359 * scheme to suballocte them to the driver. Operations that are not time
1360 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1361 * as the first argument to get a new command.
1362 */
1363static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1364{
1365 cmdlist_t * c;
1366 int i;
1367 dma_addr_t cmd_dhandle;
1368
1369 if (!get_from_pool) {
1370 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1371 sizeof(cmdlist_t), &cmd_dhandle);
1372 if(c==NULL)
1373 return NULL;
1374 } else {
1375 do {
1376 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1377 if (i == NR_CMDS)
1378 return NULL;
1379 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1380 c = h->cmd_pool + i;
1381 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1382 h->nr_allocs++;
1383 }
1384
1385 memset(c, 0, sizeof(cmdlist_t));
1386 c->busaddr = cmd_dhandle;
1387 return c;
1388}
1389
1390static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1391{
1392 int i;
1393
1394 if (!got_from_pool) {
1395 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1396 c->busaddr);
1397 } else {
1398 i = c - h->cmd_pool;
1399 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1400 h->nr_frees++;
1401 }
1402}
1403
1404/***********************************************************************
1405 name: sendcmd
1406 Send a command to an IDA using the memory mapped FIFO interface
1407 and wait for it to complete.
1408 This routine should only be called at init time.
1409***********************************************************************/
1410static int sendcmd(
1411 __u8 cmd,
1412 int ctlr,
1413 void *buff,
1414 size_t size,
1415 unsigned int blk,
1416 unsigned int blkcnt,
1417 unsigned int log_unit )
1418{
1419 cmdlist_t *c;
1420 int complete;
1421 unsigned long temp;
1422 unsigned long i;
1423 ctlr_info_t *info_p = hba[ctlr];
1424
1425 c = cmd_alloc(info_p, 1);
1426 if(!c)
1427 return IO_ERROR;
1428 c->ctlr = ctlr;
1429 c->hdr.unit = log_unit;
1430 c->hdr.prio = 0;
1431 c->hdr.size = sizeof(rblk_t) >> 2;
1432 c->size += sizeof(rblk_t);
1433
1434 /* The request information. */
1435 c->req.hdr.next = 0;
1436 c->req.hdr.rcode = 0;
1437 c->req.bp = 0;
1438 c->req.hdr.sg_cnt = 1;
1439 c->req.hdr.reserved = 0;
1440
1441 if (size == 0)
1442 c->req.sg[0].size = 512;
1443 else
1444 c->req.sg[0].size = size;
1445
1446 c->req.hdr.blk = blk;
1447 c->req.hdr.blk_cnt = blkcnt;
1448 c->req.hdr.cmd = (unsigned char) cmd;
1449 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1450 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1451 /*
1452 * Disable interrupt
1453 */
1454 info_p->access.set_intr_mask(info_p, 0);
1455 /* Make sure there is room in the command FIFO */
1456 /* Actually it should be completely empty at this time. */
1457 for (i = 200000; i > 0; i--) {
1458 temp = info_p->access.fifo_full(info_p);
1459 if (temp != 0) {
1460 break;
1461 }
1462 udelay(10);
1463DBG(
1464 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1465 " waiting!\n", ctlr);
1466);
1467 }
1468 /*
1469 * Send the cmd
1470 */
1471 info_p->access.submit_command(info_p, c);
1472 complete = pollcomplete(ctlr);
1473
1474 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1475 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1476 if (complete != 1) {
1477 if (complete != c->busaddr) {
1478 printk( KERN_WARNING
1479 "cpqarray ida%d: idaSendPciCmd "
1480 "Invalid command list address returned! (%08lx)\n",
1481 ctlr, (unsigned long)complete);
1482 cmd_free(info_p, c, 1);
1483 return (IO_ERROR);
1484 }
1485 } else {
1486 printk( KERN_WARNING
1487 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1488 "No command list address returned!\n",
1489 ctlr);
1490 cmd_free(info_p, c, 1);
1491 return (IO_ERROR);
1492 }
1493
1494 if (c->req.hdr.rcode & 0x00FE) {
1495 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1496 printk( KERN_WARNING
1497 "cpqarray ida%d: idaSendPciCmd, error: "
1498 "Controller failed at init time "
1499 "cmd: 0x%x, return code = 0x%x\n",
1500 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1501
1502 cmd_free(info_p, c, 1);
1503 return (IO_ERROR);
1504 }
1505 }
1506 cmd_free(info_p, c, 1);
1507 return (IO_OK);
1508}
1509
1510/*
1511 * revalidate_allvol is for online array config utilities. After a
1512 * utility reconfigures the drives in the array, it can use this function
1513 * (through an ioctl) to make the driver zap any previous disk structs for
1514 * that controller and get new ones.
1515 *
1516 * Right now I'm using the getgeometry() function to do this, but this
1517 * function should probably be finer grained and allow you to revalidate one
1518 * particualar logical volume (instead of all of them on a particular
1519 * controller).
1520 */
1521static int revalidate_allvol(ctlr_info_t *host)
1522{
1523 int ctlr = host->ctlr;
1524 int i;
1525 unsigned long flags;
1526
1527 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1528 if (host->usage_count > 1) {
1529 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1530 printk(KERN_WARNING "cpqarray: Device busy for volume"
1531 " revalidation (usage=%d)\n", host->usage_count);
1532 return -EBUSY;
1533 }
1534 host->usage_count++;
1535 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1536
1537 /*
1538 * Set the partition and block size structures for all volumes
1539 * on this controller to zero. We will reread all of this data
1540 */
1541 set_capacity(ida_gendisk[ctlr][0], 0);
1542 for (i = 1; i < NWD; i++) {
1543 struct gendisk *disk = ida_gendisk[ctlr][i];
1544 if (disk->flags & GENHD_FL_UP)
1545 del_gendisk(disk);
1546 }
1547 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1548
1549 /*
1550 * Tell the array controller not to give us any interrupts while
1551 * we check the new geometry. Then turn interrupts back on when
1552 * we're done.
1553 */
1554 host->access.set_intr_mask(host, 0);
1555 getgeometry(ctlr);
1556 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1557
1558 for(i=0; i<NWD; i++) {
1559 struct gendisk *disk = ida_gendisk[ctlr][i];
1560 drv_info_t *drv = &host->drv[i];
1561 if (i && !drv->nr_blks)
1562 continue;
e1defc4f 1563 blk_queue_logical_block_size(host->queue, drv->blk_size);
1da177e4
LT
1564 set_capacity(disk, drv->nr_blks);
1565 disk->queue = host->queue;
1566 disk->private_data = drv;
1567 if (i)
1568 add_disk(disk);
1569 }
1570
1571 host->usage_count--;
1572 return 0;
1573}
1574
1575static int ida_revalidate(struct gendisk *disk)
1576{
1577 drv_info_t *drv = disk->private_data;
1578 set_capacity(disk, drv->nr_blks);
1579 return 0;
1580}
1581
1582/********************************************************************
1583 name: pollcomplete
1584 Wait polling for a command to complete.
1585 The memory mapped FIFO is polled for the completion.
1586 Used only at init time, interrupts disabled.
1587 ********************************************************************/
1588static int pollcomplete(int ctlr)
1589{
1590 int done;
1591 int i;
1592
1593 /* Wait (up to 2 seconds) for a command to complete */
1594
1595 for (i = 200000; i > 0; i--) {
1596 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1597 if (done == 0) {
1598 udelay(10); /* a short fixed delay */
1599 } else
1600 return (done);
1601 }
1602 /* Invalid address to tell caller we ran out of time */
1603 return 1;
1604}
1605/*****************************************************************
1606 start_fwbk
1607 Starts controller firmwares background processing.
1608 Currently only the Integrated Raid controller needs this done.
1609 If the PCI mem address registers are written to after this,
1610 data corruption may occur
1611*****************************************************************/
1612static void start_fwbk(int ctlr)
1613{
1614 id_ctlr_t *id_ctlr_buf;
1615 int ret_code;
1616
1617 if( (hba[ctlr]->board_id != 0x40400E11)
1618 && (hba[ctlr]->board_id != 0x40480E11) )
1619
1620 /* Not a Integrated Raid, so there is nothing for us to do */
1621 return;
1622 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1623 " processing\n");
1624 /* Command does not return anything, but idasend command needs a
1625 buffer */
5cbded58 1626 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1da177e4
LT
1627 if(id_ctlr_buf==NULL)
1628 {
1629 printk(KERN_WARNING "cpqarray: Out of memory. "
1630 "Unable to start background processing.\n");
1631 return;
1632 }
1633 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1634 id_ctlr_buf, 0, 0, 0, 0);
1635 if(ret_code != IO_OK)
1636 printk(KERN_WARNING "cpqarray: Unable to start"
1637 " background processing\n");
1638
1639 kfree(id_ctlr_buf);
1640}
1641/*****************************************************************
1642 getgeometry
1643 Get ida logical volume geometry from the controller
1644 This is a large bit of code which once existed in two flavors,
1645 It is used only at init time.
1646*****************************************************************/
1647static void getgeometry(int ctlr)
1648{
1649 id_log_drv_t *id_ldrive;
1650 id_ctlr_t *id_ctlr_buf;
1651 sense_log_drv_stat_t *id_lstatus_buf;
1652 config_t *sense_config_buf;
1653 unsigned int log_unit, log_index;
1654 int ret_code, size;
1655 drv_info_t *drv;
1656 ctlr_info_t *info_p = hba[ctlr];
1657 int i;
1658
1659 info_p->log_drv_map = 0;
1660
2e4934aa
MK
1661 id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1662 if (!id_ldrive) {
1da177e4 1663 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1664 goto err_0;
1da177e4
LT
1665 }
1666
2e4934aa
MK
1667 id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1668 if (!id_ctlr_buf) {
1da177e4 1669 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1670 goto err_1;
1da177e4
LT
1671 }
1672
2e4934aa
MK
1673 id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1674 if (!id_lstatus_buf) {
1da177e4 1675 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1676 goto err_2;
1da177e4
LT
1677 }
1678
2e4934aa
MK
1679 sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1680 if (!sense_config_buf) {
1da177e4 1681 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1682 goto err_3;
1da177e4
LT
1683 }
1684
1da177e4
LT
1685 info_p->phys_drives = 0;
1686 info_p->log_drv_map = 0;
1687 info_p->drv_assign_map = 0;
1688 info_p->drv_spare_map = 0;
1689 info_p->mp_failed_drv_map = 0; /* only initialized here */
1690 /* Get controllers info for this logical drive */
1691 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1692 if (ret_code == IO_ERROR) {
1693 /*
1694 * If can't get controller info, set the logical drive map to 0,
1695 * so the idastubopen will fail on all logical drives
1696 * on the controller.
1697 */
1da177e4 1698 printk(KERN_ERR "cpqarray: error sending ID controller\n");
2e4934aa 1699 goto err_4;
1da177e4
LT
1700 }
1701
1702 info_p->log_drives = id_ctlr_buf->nr_drvs;
1703 for(i=0;i<4;i++)
1704 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1705 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1706
1707 printk(" (%s)\n", info_p->product_name);
1708 /*
1709 * Initialize logical drive map to zero
1710 */
1711 log_index = 0;
1712 /*
1713 * Get drive geometry for all logical drives
1714 */
1715 if (id_ctlr_buf->nr_drvs > 16)
1716 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1717 "16 logical drives per controller.\n. "
1718 " Additional drives will not be "
1719 "detected\n", ctlr);
1720
1721 for (log_unit = 0;
1722 (log_index < id_ctlr_buf->nr_drvs)
1723 && (log_unit < NWD);
1724 log_unit++) {
1da177e4
LT
1725 size = sizeof(sense_log_drv_stat_t);
1726
1727 /*
1728 Send "Identify logical drive status" cmd
1729 */
1730 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1731 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1732 if (ret_code == IO_ERROR) {
1733 /*
1734 If can't get logical drive status, set
1735 the logical drive map to 0, so the
1736 idastubopen will fail for all logical drives
1737 on the controller.
1738 */
1739 info_p->log_drv_map = 0;
1740 printk( KERN_WARNING
1741 "cpqarray ida%d: idaGetGeometry - Controller"
1742 " failed to report status of logical drive %d\n"
1743 "Access to this controller has been disabled\n",
1744 ctlr, log_unit);
2e4934aa 1745 goto err_4;
1da177e4
LT
1746 }
1747 /*
1748 Make sure the logical drive is configured
1749 */
1750 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1751 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1752 sizeof(id_log_drv_t), 0, 0, log_unit);
1753 /*
1754 If error, the bit for this
1755 logical drive won't be set and
1756 idastubopen will return error.
1757 */
1758 if (ret_code != IO_ERROR) {
1759 drv = &info_p->drv[log_unit];
1760 drv->blk_size = id_ldrive->blk_size;
1761 drv->nr_blks = id_ldrive->nr_blks;
1762 drv->cylinders = id_ldrive->drv.cyl;
1763 drv->heads = id_ldrive->drv.heads;
1764 drv->sectors = id_ldrive->drv.sect_per_track;
1765 info_p->log_drv_map |= (1 << log_unit);
1766
1767 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1768 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1769 ret_code = sendcmd(SENSE_CONFIG,
1770 ctlr, sense_config_buf,
1771 sizeof(config_t), 0, 0, log_unit);
1772 if (ret_code == IO_ERROR) {
1773 info_p->log_drv_map = 0;
1da177e4 1774 printk(KERN_ERR "cpqarray: error sending sense config\n");
2e4934aa 1775 goto err_4;
1da177e4
LT
1776 }
1777
1da177e4
LT
1778 info_p->phys_drives =
1779 sense_config_buf->ctlr_phys_drv;
1780 info_p->drv_assign_map
1781 |= sense_config_buf->drv_asgn_map;
1782 info_p->drv_assign_map
1783 |= sense_config_buf->spare_asgn_map;
1784 info_p->drv_spare_map
1785 |= sense_config_buf->spare_asgn_map;
1786 } /* end of if no error on id_ldrive */
1787 log_index = log_index + 1;
1788 } /* end of if logical drive configured */
1789 } /* end of for log_unit */
2e4934aa
MK
1790
1791 /* Free all the buffers and return */
1792err_4:
1da177e4 1793 kfree(sense_config_buf);
2e4934aa 1794err_3:
1da177e4 1795 kfree(id_lstatus_buf);
2e4934aa 1796err_2:
1da177e4 1797 kfree(id_ctlr_buf);
2e4934aa
MK
1798err_1:
1799 kfree(id_ldrive);
1800err_0:
1da177e4 1801 return;
1da177e4
LT
1802}
1803
1804static void __exit cpqarray_exit(void)
1805{
1806 int i;
1807
1808 pci_unregister_driver(&cpqarray_pci_driver);
1809
1810 /* Double check that all controller entries have been removed */
1811 for(i=0; i<MAX_CTLR; i++) {
1812 if (hba[i] != NULL) {
1813 printk(KERN_WARNING "cpqarray: Removing EISA "
1814 "controller %d\n", i);
1815 cpqarray_remove_one_eisa(i);
1816 }
1817 }
1818
928b4d8c 1819 remove_proc_entry("driver/cpqarray", NULL);
1da177e4
LT
1820}
1821
1822module_init(cpqarray_init)
1823module_exit(cpqarray_exit)