]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/megaraid/megaraid_mm.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / megaraid / megaraid_mm.c
1 /*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.5 (Jan 21 2005)
14 *
15 * Common management module
16 */
17
18 #include "megaraid_mm.h"
19 #include <linux/smp_lock.h>
20
21
22 // Entry points for char node driver
23 static int mraid_mm_open(struct inode *, struct file *);
24 static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long);
25
26
27 // routines to convert to and from the old the format
28 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
29 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
30
31
32 // Helper functions
33 static int handle_drvrcmd(void __user *, uint8_t, int *);
34 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
35 static void ioctl_done(uioc_t *);
36 static void lld_timedout(unsigned long);
37 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
38 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
39 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
40 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
41 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
42 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
43 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
44 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
45
46 #ifdef CONFIG_COMPAT
47 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
48 #endif
49
50 MODULE_AUTHOR("LSI Logic Corporation");
51 MODULE_DESCRIPTION("LSI Logic Management Module");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
54
55 static int dbglevel = CL_ANN;
56 module_param_named(dlevel, dbglevel, int, 0);
57 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
58
59 EXPORT_SYMBOL(mraid_mm_register_adp);
60 EXPORT_SYMBOL(mraid_mm_unregister_adp);
61 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
62
63 static int majorno;
64 static uint32_t drvr_ver = 0x02200201;
65
66 static int adapters_count_g;
67 static struct list_head adapters_list_g;
68
69 static wait_queue_head_t wait_q;
70
71 static struct file_operations lsi_fops = {
72 .open = mraid_mm_open,
73 .ioctl = mraid_mm_ioctl,
74 #ifdef CONFIG_COMPAT
75 .compat_ioctl = mraid_mm_compat_ioctl,
76 #endif
77 .owner = THIS_MODULE,
78 };
79
80 /**
81 * mraid_mm_open - open routine for char node interface
82 * @inod : unused
83 * @filep : unused
84 *
85 * allow ioctl operations by apps only if they superuser privilege
86 */
87 static int
88 mraid_mm_open(struct inode *inode, struct file *filep)
89 {
90 /*
91 * Only allow superuser to access private ioctl interface
92 */
93 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
94
95 return 0;
96 }
97
98 /**
99 * mraid_mm_ioctl - module entry-point for ioctls
100 * @inode : inode (ignored)
101 * @filep : file operations pointer (ignored)
102 * @cmd : ioctl command
103 * @arg : user ioctl packet
104 */
105 static int
106 mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
107 unsigned long arg)
108 {
109 uioc_t *kioc;
110 char signature[EXT_IOCTL_SIGN_SZ] = {0};
111 int rval;
112 mraid_mmadp_t *adp;
113 uint8_t old_ioctl;
114 int drvrcmd_rval;
115 void __user *argp = (void __user *)arg;
116
117 /*
118 * Make sure only USCSICMD are issued through this interface.
119 * MIMD application would still fire different command.
120 */
121
122 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
123 return (-EINVAL);
124 }
125
126 /*
127 * Look for signature to see if this is the new or old ioctl format.
128 */
129 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
130 con_log(CL_ANN, (KERN_WARNING
131 "megaraid cmm: copy from usr addr failed\n"));
132 return (-EFAULT);
133 }
134
135 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
136 old_ioctl = 0;
137 else
138 old_ioctl = 1;
139
140 /*
141 * At present, we don't support the new ioctl packet
142 */
143 if (!old_ioctl )
144 return (-EINVAL);
145
146 /*
147 * If it is a driver ioctl (as opposed to fw ioctls), then we can
148 * handle the command locally. rval > 0 means it is not a drvr cmd
149 */
150 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
151
152 if (rval < 0)
153 return rval;
154 else if (rval == 0)
155 return drvrcmd_rval;
156
157 rval = 0;
158 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
159 return rval;
160 }
161
162 /*
163 * Check if adapter can accept ioctl. We may have marked it offline
164 * if any previous kioc had timedout on this controller.
165 */
166 if (!adp->quiescent) {
167 con_log(CL_ANN, (KERN_WARNING
168 "megaraid cmm: controller cannot accept cmds due to "
169 "earlier errors\n" ));
170 return -EFAULT;
171 }
172
173 /*
174 * The following call will block till a kioc is available
175 */
176 kioc = mraid_mm_alloc_kioc(adp);
177
178 /*
179 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
180 */
181 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
182 mraid_mm_dealloc_kioc(adp, kioc);
183 return rval;
184 }
185
186 kioc->done = ioctl_done;
187
188 /*
189 * Issue the IOCTL to the low level driver. After the IOCTL completes
190 * release the kioc if and only if it was _not_ timedout. If it was
191 * timedout, that means that resources are still with low level driver.
192 */
193 if ((rval = lld_ioctl(adp, kioc))) {
194
195 if (!kioc->timedout)
196 mraid_mm_dealloc_kioc(adp, kioc);
197
198 return rval;
199 }
200
201 /*
202 * Convert the kioc back to user space
203 */
204 rval = kioc_to_mimd(kioc, argp);
205
206 /*
207 * Return the kioc to free pool
208 */
209 mraid_mm_dealloc_kioc(adp, kioc);
210
211 return rval;
212 }
213
214
215 /**
216 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
217 * @umimd : User space mimd_t ioctl packet
218 * @adapter : pointer to the adapter (OUT)
219 */
220 static mraid_mmadp_t *
221 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
222 {
223 mraid_mmadp_t *adapter;
224 mimd_t mimd;
225 uint32_t adapno;
226 int iterator;
227
228
229 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
230 *rval = -EFAULT;
231 return NULL;
232 }
233
234 adapno = GETADAP(mimd.ui.fcs.adapno);
235
236 if (adapno >= adapters_count_g) {
237 *rval = -ENODEV;
238 return NULL;
239 }
240
241 adapter = NULL;
242 iterator = 0;
243
244 list_for_each_entry(adapter, &adapters_list_g, list) {
245 if (iterator++ == adapno) break;
246 }
247
248 if (!adapter) {
249 *rval = -ENODEV;
250 return NULL;
251 }
252
253 return adapter;
254 }
255
256 /*
257 * handle_drvrcmd - This routine checks if the opcode is a driver
258 * cmd and if it is, handles it.
259 * @arg : packet sent by the user app
260 * @old_ioctl : mimd if 1; uioc otherwise
261 */
262 static int
263 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
264 {
265 mimd_t __user *umimd;
266 mimd_t kmimd;
267 uint8_t opcode;
268 uint8_t subopcode;
269
270 if (old_ioctl)
271 goto old_packet;
272 else
273 goto new_packet;
274
275 new_packet:
276 return (-ENOTSUPP);
277
278 old_packet:
279 *rval = 0;
280 umimd = arg;
281
282 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
283 return (-EFAULT);
284
285 opcode = kmimd.ui.fcs.opcode;
286 subopcode = kmimd.ui.fcs.subopcode;
287
288 /*
289 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
290 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
291 * indicate that we cannot handle this.
292 */
293 if (opcode != 0x82)
294 return 1;
295
296 switch (subopcode) {
297
298 case MEGAIOC_QDRVRVER:
299
300 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
301 return (-EFAULT);
302
303 return 0;
304
305 case MEGAIOC_QNADAP:
306
307 *rval = adapters_count_g;
308
309 if (copy_to_user(kmimd.data, &adapters_count_g,
310 sizeof(uint32_t)))
311 return (-EFAULT);
312
313 return 0;
314
315 default:
316 /* cannot handle */
317 return 1;
318 }
319
320 return 0;
321 }
322
323
324 /**
325 * mimd_to_kioc - Converter from old to new ioctl format
326 *
327 * @umimd : user space old MIMD IOCTL
328 * @kioc : kernel space new format IOCTL
329 *
330 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
331 * new packet is in kernel space so that driver can perform operations on it
332 * freely.
333 */
334
335 static int
336 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
337 {
338 mbox64_t *mbox64;
339 mbox_t *mbox;
340 mraid_passthru_t *pthru32;
341 uint32_t adapno;
342 uint8_t opcode;
343 uint8_t subopcode;
344 mimd_t mimd;
345
346 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
347 return (-EFAULT);
348
349 /*
350 * Applications are not allowed to send extd pthru
351 */
352 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
353 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
354 return (-EINVAL);
355
356 opcode = mimd.ui.fcs.opcode;
357 subopcode = mimd.ui.fcs.subopcode;
358 adapno = GETADAP(mimd.ui.fcs.adapno);
359
360 if (adapno >= adapters_count_g)
361 return (-ENODEV);
362
363 kioc->adapno = adapno;
364 kioc->mb_type = MBOX_LEGACY;
365 kioc->app_type = APPTYPE_MIMD;
366
367 switch (opcode) {
368
369 case 0x82:
370
371 if (subopcode == MEGAIOC_QADAPINFO) {
372
373 kioc->opcode = GET_ADAP_INFO;
374 kioc->data_dir = UIOC_RD;
375 kioc->xferlen = sizeof(mraid_hba_info_t);
376
377 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
378 return (-ENOMEM);
379 }
380 else {
381 con_log(CL_ANN, (KERN_WARNING
382 "megaraid cmm: Invalid subop\n"));
383 return (-EINVAL);
384 }
385
386 break;
387
388 case 0x81:
389
390 kioc->opcode = MBOX_CMD;
391 kioc->xferlen = mimd.ui.fcs.length;
392 kioc->user_data_len = kioc->xferlen;
393 kioc->user_data = mimd.ui.fcs.buffer;
394
395 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
396 return (-ENOMEM);
397
398 if (mimd.outlen) kioc->data_dir = UIOC_RD;
399 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
400
401 break;
402
403 case 0x80:
404
405 kioc->opcode = MBOX_CMD;
406 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
407 mimd.outlen : mimd.inlen;
408 kioc->user_data_len = kioc->xferlen;
409 kioc->user_data = mimd.data;
410
411 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
412 return (-ENOMEM);
413
414 if (mimd.outlen) kioc->data_dir = UIOC_RD;
415 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
416
417 break;
418
419 default:
420 return (-EINVAL);
421 }
422
423 /*
424 * If driver command, nothing else to do
425 */
426 if (opcode == 0x82)
427 return 0;
428
429 /*
430 * This is a mailbox cmd; copy the mailbox from mimd
431 */
432 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
433 mbox = &mbox64->mbox32;
434 memcpy(mbox, mimd.mbox, 14);
435
436 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
437
438 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
439
440 if (kioc->data_dir & UIOC_WR) {
441 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
442 kioc->xferlen)) {
443 return (-EFAULT);
444 }
445 }
446
447 return 0;
448 }
449
450 /*
451 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
452 * Just like in above case, the beginning for memblk is treated as
453 * a mailbox. The passthru will begin at next 1K boundary. And the
454 * data will start 1K after that.
455 */
456 pthru32 = kioc->pthru32;
457 kioc->user_pthru = &umimd->pthru;
458 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
459
460 if (copy_from_user(pthru32, kioc->user_pthru,
461 sizeof(mraid_passthru_t))) {
462 return (-EFAULT);
463 }
464
465 pthru32->dataxferaddr = kioc->buf_paddr;
466 if (kioc->data_dir & UIOC_WR) {
467 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
468 pthru32->dataxferlen)) {
469 return (-EFAULT);
470 }
471 }
472
473 return 0;
474 }
475
476 /**
477 * mraid_mm_attch_buf - Attach a free dma buffer for required size
478 *
479 * @adp : Adapter softstate
480 * @kioc : kioc that the buffer needs to be attached to
481 * @xferlen : required length for buffer
482 *
483 * First we search for a pool with smallest buffer that is >= @xferlen. If
484 * that pool has no free buffer, we will try for the next bigger size. If none
485 * is available, we will try to allocate the smallest buffer that is >=
486 * @xferlen and attach it the pool.
487 */
488 static int
489 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
490 {
491 mm_dmapool_t *pool;
492 int right_pool = -1;
493 unsigned long flags;
494 int i;
495
496 kioc->pool_index = -1;
497 kioc->buf_vaddr = NULL;
498 kioc->buf_paddr = 0;
499 kioc->free_buf = 0;
500
501 /*
502 * We need xferlen amount of memory. See if we can get it from our
503 * dma pools. If we don't get exact size, we will try bigger buffer
504 */
505
506 for (i = 0; i < MAX_DMA_POOLS; i++) {
507
508 pool = &adp->dma_pool_list[i];
509
510 if (xferlen > pool->buf_size)
511 continue;
512
513 if (right_pool == -1)
514 right_pool = i;
515
516 spin_lock_irqsave(&pool->lock, flags);
517
518 if (!pool->in_use) {
519
520 pool->in_use = 1;
521 kioc->pool_index = i;
522 kioc->buf_vaddr = pool->vaddr;
523 kioc->buf_paddr = pool->paddr;
524
525 spin_unlock_irqrestore(&pool->lock, flags);
526 return 0;
527 }
528 else {
529 spin_unlock_irqrestore(&pool->lock, flags);
530 continue;
531 }
532 }
533
534 /*
535 * If xferlen doesn't match any of our pools, return error
536 */
537 if (right_pool == -1)
538 return -EINVAL;
539
540 /*
541 * We did not get any buffer from the preallocated pool. Let us try
542 * to allocate one new buffer. NOTE: This is a blocking call.
543 */
544 pool = &adp->dma_pool_list[right_pool];
545
546 spin_lock_irqsave(&pool->lock, flags);
547
548 kioc->pool_index = right_pool;
549 kioc->free_buf = 1;
550 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
551 &kioc->buf_paddr);
552 spin_unlock_irqrestore(&pool->lock, flags);
553
554 if (!kioc->buf_vaddr)
555 return -ENOMEM;
556
557 return 0;
558 }
559
560 /**
561 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
562 * @adp : Adapter softstate for this module
563 *
564 * The kioc_semaphore is initialized with number of kioc nodes in the
565 * free kioc pool. If the kioc pool is empty, this function blocks till
566 * a kioc becomes free.
567 */
568 static uioc_t *
569 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
570 {
571 uioc_t *kioc;
572 struct list_head* head;
573 unsigned long flags;
574
575 down(&adp->kioc_semaphore);
576
577 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
578
579 head = &adp->kioc_pool;
580
581 if (list_empty(head)) {
582 up(&adp->kioc_semaphore);
583 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
584
585 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
586 return NULL;
587 }
588
589 kioc = list_entry(head->next, uioc_t, list);
590 list_del_init(&kioc->list);
591
592 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
593
594 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
595 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
596
597 kioc->buf_vaddr = NULL;
598 kioc->buf_paddr = 0;
599 kioc->pool_index =-1;
600 kioc->free_buf = 0;
601 kioc->user_data = NULL;
602 kioc->user_data_len = 0;
603 kioc->user_pthru = NULL;
604 kioc->timedout = 0;
605
606 return kioc;
607 }
608
609 /**
610 * mraid_mm_dealloc_kioc - Return kioc to free pool
611 *
612 * @adp : Adapter softstate
613 * @kioc : uioc_t node to be returned to free pool
614 */
615 static void
616 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
617 {
618 mm_dmapool_t *pool;
619 unsigned long flags;
620
621 if (kioc->pool_index != -1) {
622 pool = &adp->dma_pool_list[kioc->pool_index];
623
624 /* This routine may be called in non-isr context also */
625 spin_lock_irqsave(&pool->lock, flags);
626
627 /*
628 * While attaching the dma buffer, if we didn't get the
629 * required buffer from the pool, we would have allocated
630 * it at the run time and set the free_buf flag. We must
631 * free that buffer. Otherwise, just mark that the buffer is
632 * not in use
633 */
634 if (kioc->free_buf == 1)
635 pci_pool_free(pool->handle, kioc->buf_vaddr,
636 kioc->buf_paddr);
637 else
638 pool->in_use = 0;
639
640 spin_unlock_irqrestore(&pool->lock, flags);
641 }
642
643 /* Return the kioc to the free pool */
644 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
645 list_add(&kioc->list, &adp->kioc_pool);
646 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
647
648 /* increment the free kioc count */
649 up(&adp->kioc_semaphore);
650
651 return;
652 }
653
654 /**
655 * lld_ioctl - Routine to issue ioctl to low level drvr
656 *
657 * @adp : The adapter handle
658 * @kioc : The ioctl packet with kernel addresses
659 */
660 static int
661 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
662 {
663 int rval;
664 struct timer_list timer;
665 struct timer_list *tp = NULL;
666
667 kioc->status = -ENODATA;
668 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
669
670 if (rval) return rval;
671
672 /*
673 * Start the timer
674 */
675 if (adp->timeout > 0) {
676 tp = &timer;
677 init_timer(tp);
678
679 tp->function = lld_timedout;
680 tp->data = (unsigned long)kioc;
681 tp->expires = jiffies + adp->timeout * HZ;
682
683 add_timer(tp);
684 }
685
686 /*
687 * Wait till the low level driver completes the ioctl. After this
688 * call, the ioctl either completed successfully or timedout.
689 */
690 wait_event(wait_q, (kioc->status != -ENODATA));
691 if (tp) {
692 del_timer_sync(tp);
693 }
694
695 /*
696 * If the command had timedout, we mark the controller offline
697 * before returning
698 */
699 if (kioc->timedout) {
700 adp->quiescent = 0;
701 }
702
703 return kioc->status;
704 }
705
706
707 /**
708 * ioctl_done - callback from the low level driver
709 *
710 * @kioc : completed ioctl packet
711 */
712 static void
713 ioctl_done(uioc_t *kioc)
714 {
715 uint32_t adapno;
716 int iterator;
717 mraid_mmadp_t* adapter;
718
719 /*
720 * When the kioc returns from driver, make sure it still doesn't
721 * have ENODATA in status. Otherwise, driver will hang on wait_event
722 * forever
723 */
724 if (kioc->status == -ENODATA) {
725 con_log(CL_ANN, (KERN_WARNING
726 "megaraid cmm: lld didn't change status!\n"));
727
728 kioc->status = -EINVAL;
729 }
730
731 /*
732 * Check if this kioc was timedout before. If so, nobody is waiting
733 * on this kioc. We don't have to wake up anybody. Instead, we just
734 * have to free the kioc
735 */
736 if (kioc->timedout) {
737 iterator = 0;
738 adapter = NULL;
739 adapno = kioc->adapno;
740
741 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
742 "ioctl that was timedout before\n"));
743
744 list_for_each_entry(adapter, &adapters_list_g, list) {
745 if (iterator++ == adapno) break;
746 }
747
748 kioc->timedout = 0;
749
750 if (adapter) {
751 mraid_mm_dealloc_kioc( adapter, kioc );
752 }
753 }
754 else {
755 wake_up(&wait_q);
756 }
757 }
758
759
760 /*
761 * lld_timedout : callback from the expired timer
762 *
763 * @ptr : ioctl packet that timed out
764 */
765 static void
766 lld_timedout(unsigned long ptr)
767 {
768 uioc_t *kioc = (uioc_t *)ptr;
769
770 kioc->status = -ETIME;
771 kioc->timedout = 1;
772
773 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
774
775 wake_up(&wait_q);
776 }
777
778
779 /**
780 * kioc_to_mimd : Converter from new back to old format
781 *
782 * @kioc : Kernel space IOCTL packet (successfully issued)
783 * @mimd : User space MIMD packet
784 */
785 static int
786 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
787 {
788 mimd_t kmimd;
789 uint8_t opcode;
790 uint8_t subopcode;
791
792 mbox64_t *mbox64;
793 mraid_passthru_t __user *upthru32;
794 mraid_passthru_t *kpthru32;
795 mcontroller_t cinfo;
796 mraid_hba_info_t *hinfo;
797
798
799 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
800 return (-EFAULT);
801
802 opcode = kmimd.ui.fcs.opcode;
803 subopcode = kmimd.ui.fcs.subopcode;
804
805 if (opcode == 0x82) {
806 switch (subopcode) {
807
808 case MEGAIOC_QADAPINFO:
809
810 hinfo = (mraid_hba_info_t *)(unsigned long)
811 kioc->buf_vaddr;
812
813 hinfo_to_cinfo(hinfo, &cinfo);
814
815 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
816 return (-EFAULT);
817
818 return 0;
819
820 default:
821 return (-EINVAL);
822 }
823
824 return 0;
825 }
826
827 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
828
829 if (kioc->user_pthru) {
830
831 upthru32 = kioc->user_pthru;
832 kpthru32 = kioc->pthru32;
833
834 if (copy_to_user(&upthru32->scsistatus,
835 &kpthru32->scsistatus,
836 sizeof(uint8_t))) {
837 return (-EFAULT);
838 }
839 }
840
841 if (kioc->user_data) {
842 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
843 kioc->user_data_len)) {
844 return (-EFAULT);
845 }
846 }
847
848 if (copy_to_user(&mimd->mbox[17],
849 &mbox64->mbox32.status, sizeof(uint8_t))) {
850 return (-EFAULT);
851 }
852
853 return 0;
854 }
855
856
857 /**
858 * hinfo_to_cinfo - Convert new format hba info into old format
859 *
860 * @hinfo : New format, more comprehensive adapter info
861 * @cinfo : Old format adapter info to support mimd_t apps
862 */
863 static void
864 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
865 {
866 if (!hinfo || !cinfo)
867 return;
868
869 cinfo->base = hinfo->baseport;
870 cinfo->irq = hinfo->irq;
871 cinfo->numldrv = hinfo->num_ldrv;
872 cinfo->pcibus = hinfo->pci_bus;
873 cinfo->pcidev = hinfo->pci_slot;
874 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
875 cinfo->pciid = hinfo->pci_device_id;
876 cinfo->pcivendor = hinfo->pci_vendor_id;
877 cinfo->pcislot = hinfo->pci_slot;
878 cinfo->uid = hinfo->unique_id;
879 }
880
881
882 /*
883 * mraid_mm_register_adp - Registration routine for low level drvrs
884 *
885 * @adp : Adapter objejct
886 */
887 int
888 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
889 {
890 mraid_mmadp_t *adapter;
891 mbox64_t *mbox_list;
892 uioc_t *kioc;
893 uint32_t rval;
894 int i;
895
896
897 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
898 return (-EINVAL);
899
900 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
901
902 if (!adapter) {
903 rval = -ENOMEM;
904 goto memalloc_error;
905 }
906
907 memset(adapter, 0, sizeof(mraid_mmadp_t));
908
909 adapter->unique_id = lld_adp->unique_id;
910 adapter->drvr_type = lld_adp->drvr_type;
911 adapter->drvr_data = lld_adp->drvr_data;
912 adapter->pdev = lld_adp->pdev;
913 adapter->issue_uioc = lld_adp->issue_uioc;
914 adapter->timeout = lld_adp->timeout;
915 adapter->max_kioc = lld_adp->max_kioc;
916 adapter->quiescent = 1;
917
918 /*
919 * Allocate single blocks of memory for all required kiocs,
920 * mailboxes and passthru structures.
921 */
922 adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
923 GFP_KERNEL);
924 adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
925 GFP_KERNEL);
926 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
927 adapter->pdev,
928 sizeof(mraid_passthru_t),
929 16, 0);
930
931 if (!adapter->kioc_list || !adapter->mbox_list ||
932 !adapter->pthru_dma_pool) {
933
934 con_log(CL_ANN, (KERN_WARNING
935 "megaraid cmm: out of memory, %s %d\n", __FUNCTION__,
936 __LINE__));
937
938 rval = (-ENOMEM);
939
940 goto memalloc_error;
941 }
942
943 /*
944 * Slice kioc_list and make a kioc_pool with the individiual kiocs
945 */
946 INIT_LIST_HEAD(&adapter->kioc_pool);
947 spin_lock_init(&adapter->kioc_pool_lock);
948 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
949
950 mbox_list = (mbox64_t *)adapter->mbox_list;
951
952 for (i = 0; i < lld_adp->max_kioc; i++) {
953
954 kioc = adapter->kioc_list + i;
955 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
956 kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
957 GFP_KERNEL, &kioc->pthru32_h);
958
959 if (!kioc->pthru32) {
960
961 con_log(CL_ANN, (KERN_WARNING
962 "megaraid cmm: out of memory, %s %d\n",
963 __FUNCTION__, __LINE__));
964
965 rval = (-ENOMEM);
966
967 goto pthru_dma_pool_error;
968 }
969
970 list_add_tail(&kioc->list, &adapter->kioc_pool);
971 }
972
973 // Setup the dma pools for data buffers
974 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
975 goto dma_pool_error;
976 }
977
978 list_add_tail(&adapter->list, &adapters_list_g);
979
980 adapters_count_g++;
981
982 return 0;
983
984 dma_pool_error:
985 /* Do nothing */
986
987 pthru_dma_pool_error:
988
989 for (i = 0; i < lld_adp->max_kioc; i++) {
990 kioc = adapter->kioc_list + i;
991 if (kioc->pthru32) {
992 pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
993 kioc->pthru32_h);
994 }
995 }
996
997 memalloc_error:
998
999 if (adapter->kioc_list)
1000 kfree(adapter->kioc_list);
1001
1002 if (adapter->mbox_list)
1003 kfree(adapter->mbox_list);
1004
1005 if (adapter->pthru_dma_pool)
1006 pci_pool_destroy(adapter->pthru_dma_pool);
1007
1008 if (adapter)
1009 kfree(adapter);
1010
1011 return rval;
1012 }
1013
1014
1015 /**
1016 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1017 *
1018 * For the given driver data, locate the adadpter in our global list and
1019 * return the corresponding handle, which is also used by applications to
1020 * uniquely identify an adapter.
1021 *
1022 * @param unique_id : adapter unique identifier
1023 *
1024 * @return adapter handle if found in the list
1025 * @return 0 if adapter could not be located, should never happen though
1026 */
1027 uint32_t
1028 mraid_mm_adapter_app_handle(uint32_t unique_id)
1029 {
1030 mraid_mmadp_t *adapter;
1031 mraid_mmadp_t *tmp;
1032 int index = 0;
1033
1034 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1035
1036 if (adapter->unique_id == unique_id) {
1037
1038 return MKADAP(index);
1039 }
1040
1041 index++;
1042 }
1043
1044 return 0;
1045 }
1046
1047
1048 /**
1049 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1050 *
1051 * @adp : Adapter softstate
1052 *
1053 * We maintain a pool of dma buffers per each adapter. Each pool has one
1054 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1055 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1056 * dont' want to waste too much memory by allocating more buffers per each
1057 * pool.
1058 */
1059 static int
1060 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1061 {
1062 mm_dmapool_t *pool;
1063 int bufsize;
1064 int i;
1065
1066 /*
1067 * Create MAX_DMA_POOLS number of pools
1068 */
1069 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1070
1071 for (i = 0; i < MAX_DMA_POOLS; i++){
1072
1073 pool = &adp->dma_pool_list[i];
1074
1075 pool->buf_size = bufsize;
1076 spin_lock_init(&pool->lock);
1077
1078 pool->handle = pci_pool_create("megaraid mm data buffer",
1079 adp->pdev, bufsize, 16, 0);
1080
1081 if (!pool->handle) {
1082 goto dma_pool_setup_error;
1083 }
1084
1085 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
1086 &pool->paddr);
1087
1088 if (!pool->vaddr)
1089 goto dma_pool_setup_error;
1090
1091 bufsize = bufsize * 2;
1092 }
1093
1094 return 0;
1095
1096 dma_pool_setup_error:
1097
1098 mraid_mm_teardown_dma_pools(adp);
1099 return (-ENOMEM);
1100 }
1101
1102
1103 /*
1104 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1105 * Assume no outstanding ioctls to llds.
1106 *
1107 * @unique_id : UID of the adpater
1108 */
1109 int
1110 mraid_mm_unregister_adp(uint32_t unique_id)
1111 {
1112 mraid_mmadp_t *adapter;
1113 mraid_mmadp_t *tmp;
1114
1115 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1116
1117
1118 if (adapter->unique_id == unique_id) {
1119
1120 adapters_count_g--;
1121
1122 list_del_init(&adapter->list);
1123
1124 mraid_mm_free_adp_resources(adapter);
1125
1126 kfree(adapter);
1127
1128 con_log(CL_ANN, (
1129 "megaraid cmm: Unregistered one adapter:%#x\n",
1130 unique_id));
1131
1132 return 0;
1133 }
1134 }
1135
1136 return (-ENODEV);
1137 }
1138
1139 /**
1140 * mraid_mm_free_adp_resources - Free adapter softstate
1141 *
1142 * @adp : Adapter softstate
1143 */
1144 static void
1145 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1146 {
1147 uioc_t *kioc;
1148 int i;
1149
1150 mraid_mm_teardown_dma_pools(adp);
1151
1152 for (i = 0; i < adp->max_kioc; i++) {
1153
1154 kioc = adp->kioc_list + i;
1155
1156 pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1157 kioc->pthru32_h);
1158 }
1159
1160 kfree(adp->kioc_list);
1161
1162 kfree(adp->mbox_list);
1163
1164 pci_pool_destroy(adp->pthru_dma_pool);
1165
1166
1167 return;
1168 }
1169
1170
1171 /**
1172 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1173 *
1174 * @adp : Adapter softstate
1175 */
1176 static void
1177 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1178 {
1179 int i;
1180 mm_dmapool_t *pool;
1181
1182 for (i = 0; i < MAX_DMA_POOLS; i++) {
1183
1184 pool = &adp->dma_pool_list[i];
1185
1186 if (pool->handle) {
1187
1188 if (pool->vaddr)
1189 pci_pool_free(pool->handle, pool->vaddr,
1190 pool->paddr);
1191
1192 pci_pool_destroy(pool->handle);
1193 pool->handle = NULL;
1194 }
1195 }
1196
1197 return;
1198 }
1199
1200 /**
1201 * mraid_mm_init : Module entry point
1202 */
1203 static int __init
1204 mraid_mm_init(void)
1205 {
1206 // Announce the driver version
1207 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1208 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1209
1210 majorno = register_chrdev(0, "megadev", &lsi_fops);
1211
1212 if (majorno < 0) {
1213 con_log(CL_ANN, ("megaraid cmm: cannot get major\n"));
1214 return majorno;
1215 }
1216
1217 init_waitqueue_head(&wait_q);
1218
1219 INIT_LIST_HEAD(&adapters_list_g);
1220
1221 return 0;
1222 }
1223
1224
1225 /**
1226 * mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine
1227 */
1228 #ifdef CONFIG_COMPAT
1229 static long
1230 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1231 unsigned long arg)
1232 {
1233 int err;
1234 lock_kernel();
1235 err = mraid_mm_ioctl(NULL, filep, cmd, arg);
1236 unlock_kernel();
1237 return err;
1238 }
1239 #endif
1240
1241 /**
1242 * mraid_mm_exit : Module exit point
1243 */
1244 static void __exit
1245 mraid_mm_exit(void)
1246 {
1247 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1248
1249 unregister_chrdev(majorno, "megadev");
1250 }
1251
1252 module_init(mraid_mm_init);
1253 module_exit(mraid_mm_exit);
1254
1255 /* vi: set ts=8 sw=8 tw=78: */