]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/aacraid/commctrl.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / aacraid / commctrl.c
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 * Module Name:
27 * commctrl.c
28 *
29 * Abstract: Contains all routines for control of the AFA comm layer
30 *
31 */
32
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/blkdev.h>
42 #include <linux/delay.h> /* ssleep prototype */
43 #include <linux/kthread.h>
44 #include <linux/semaphore.h>
45 #include <linux/uaccess.h>
46 #include <scsi/scsi_host.h>
47
48 #include "aacraid.h"
49
50 /**
51 * ioctl_send_fib - send a FIB from userspace
52 * @dev: adapter is being processed
53 * @arg: arguments to the ioctl call
54 *
55 * This routine sends a fib to the adapter on behalf of a user level
56 * program.
57 */
58 # define AAC_DEBUG_PREAMBLE KERN_INFO
59 # define AAC_DEBUG_POSTAMBLE
60
61 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
62 {
63 struct hw_fib * kfib;
64 struct fib *fibptr;
65 struct hw_fib * hw_fib = (struct hw_fib *)0;
66 dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
67 unsigned int size, osize;
68 int retval;
69
70 if (dev->in_reset) {
71 return -EBUSY;
72 }
73 fibptr = aac_fib_alloc(dev);
74 if(fibptr == NULL) {
75 return -ENOMEM;
76 }
77
78 kfib = fibptr->hw_fib_va;
79 /*
80 * First copy in the header so that we can check the size field.
81 */
82 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
83 aac_fib_free(fibptr);
84 return -EFAULT;
85 }
86 /*
87 * Since we copy based on the fib header size, make sure that we
88 * will not overrun the buffer when we copy the memory. Return
89 * an error if we would.
90 */
91 osize = size = le16_to_cpu(kfib->header.Size) +
92 sizeof(struct aac_fibhdr);
93 if (size < le16_to_cpu(kfib->header.SenderSize))
94 size = le16_to_cpu(kfib->header.SenderSize);
95 if (size > dev->max_fib_size) {
96 dma_addr_t daddr;
97
98 if (size > 2048) {
99 retval = -EINVAL;
100 goto cleanup;
101 }
102
103 kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
104 if (!kfib) {
105 retval = -ENOMEM;
106 goto cleanup;
107 }
108
109 /* Highjack the hw_fib */
110 hw_fib = fibptr->hw_fib_va;
111 hw_fib_pa = fibptr->hw_fib_pa;
112 fibptr->hw_fib_va = kfib;
113 fibptr->hw_fib_pa = daddr;
114 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
115 memcpy(kfib, hw_fib, dev->max_fib_size);
116 }
117
118 if (copy_from_user(kfib, arg, size)) {
119 retval = -EFAULT;
120 goto cleanup;
121 }
122
123 /* Sanity check the second copy */
124 if ((osize != le16_to_cpu(kfib->header.Size) +
125 sizeof(struct aac_fibhdr))
126 || (size < le16_to_cpu(kfib->header.SenderSize))) {
127 retval = -EINVAL;
128 goto cleanup;
129 }
130
131 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
132 aac_adapter_interrupt(dev);
133 /*
134 * Since we didn't really send a fib, zero out the state to allow
135 * cleanup code not to assert.
136 */
137 kfib->header.XferState = 0;
138 } else {
139 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
140 le16_to_cpu(kfib->header.Size) , FsaNormal,
141 1, 1, NULL, NULL);
142 if (retval) {
143 goto cleanup;
144 }
145 if (aac_fib_complete(fibptr) != 0) {
146 retval = -EINVAL;
147 goto cleanup;
148 }
149 }
150 /*
151 * Make sure that the size returned by the adapter (which includes
152 * the header) is less than or equal to the size of a fib, so we
153 * don't corrupt application data. Then copy that size to the user
154 * buffer. (Don't try to add the header information again, since it
155 * was already included by the adapter.)
156 */
157
158 retval = 0;
159 if (copy_to_user(arg, (void *)kfib, size))
160 retval = -EFAULT;
161 cleanup:
162 if (hw_fib) {
163 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
164 fibptr->hw_fib_pa = hw_fib_pa;
165 fibptr->hw_fib_va = hw_fib;
166 }
167 if (retval != -ERESTARTSYS)
168 aac_fib_free(fibptr);
169 return retval;
170 }
171
172 /**
173 * open_getadapter_fib - Get the next fib
174 *
175 * This routine will get the next Fib, if available, from the AdapterFibContext
176 * passed in from the user.
177 */
178
179 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
180 {
181 struct aac_fib_context * fibctx;
182 int status;
183
184 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
185 if (fibctx == NULL) {
186 status = -ENOMEM;
187 } else {
188 unsigned long flags;
189 struct list_head * entry;
190 struct aac_fib_context * context;
191
192 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
193 fibctx->size = sizeof(struct aac_fib_context);
194 /*
195 * Yes yes, I know this could be an index, but we have a
196 * better guarantee of uniqueness for the locked loop below.
197 * Without the aid of a persistent history, this also helps
198 * reduce the chance that the opaque context would be reused.
199 */
200 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
201 /*
202 * Initialize the mutex used to wait for the next AIF.
203 */
204 sema_init(&fibctx->wait_sem, 0);
205 fibctx->wait = 0;
206 /*
207 * Initialize the fibs and set the count of fibs on
208 * the list to 0.
209 */
210 fibctx->count = 0;
211 INIT_LIST_HEAD(&fibctx->fib_list);
212 fibctx->jiffies = jiffies/HZ;
213 /*
214 * Now add this context onto the adapter's
215 * AdapterFibContext list.
216 */
217 spin_lock_irqsave(&dev->fib_lock, flags);
218 /* Ensure that we have a unique identifier */
219 entry = dev->fib_list.next;
220 while (entry != &dev->fib_list) {
221 context = list_entry(entry, struct aac_fib_context, next);
222 if (context->unique == fibctx->unique) {
223 /* Not unique (32 bits) */
224 fibctx->unique++;
225 entry = dev->fib_list.next;
226 } else {
227 entry = entry->next;
228 }
229 }
230 list_add_tail(&fibctx->next, &dev->fib_list);
231 spin_unlock_irqrestore(&dev->fib_lock, flags);
232 if (copy_to_user(arg, &fibctx->unique,
233 sizeof(fibctx->unique))) {
234 status = -EFAULT;
235 } else {
236 status = 0;
237 }
238 }
239 return status;
240 }
241
242 /**
243 * next_getadapter_fib - get the next fib
244 * @dev: adapter to use
245 * @arg: ioctl argument
246 *
247 * This routine will get the next Fib, if available, from the AdapterFibContext
248 * passed in from the user.
249 */
250
251 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
252 {
253 struct fib_ioctl f;
254 struct fib *fib;
255 struct aac_fib_context *fibctx;
256 int status;
257 struct list_head * entry;
258 unsigned long flags;
259
260 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
261 return -EFAULT;
262 /*
263 * Verify that the HANDLE passed in was a valid AdapterFibContext
264 *
265 * Search the list of AdapterFibContext addresses on the adapter
266 * to be sure this is a valid address
267 */
268 spin_lock_irqsave(&dev->fib_lock, flags);
269 entry = dev->fib_list.next;
270 fibctx = NULL;
271
272 while (entry != &dev->fib_list) {
273 fibctx = list_entry(entry, struct aac_fib_context, next);
274 /*
275 * Extract the AdapterFibContext from the Input parameters.
276 */
277 if (fibctx->unique == f.fibctx) { /* We found a winner */
278 break;
279 }
280 entry = entry->next;
281 fibctx = NULL;
282 }
283 if (!fibctx) {
284 spin_unlock_irqrestore(&dev->fib_lock, flags);
285 dprintk ((KERN_INFO "Fib Context not found\n"));
286 return -EINVAL;
287 }
288
289 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
290 (fibctx->size != sizeof(struct aac_fib_context))) {
291 spin_unlock_irqrestore(&dev->fib_lock, flags);
292 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
293 return -EINVAL;
294 }
295 status = 0;
296 /*
297 * If there are no fibs to send back, then either wait or return
298 * -EAGAIN
299 */
300 return_fib:
301 if (!list_empty(&fibctx->fib_list)) {
302 /*
303 * Pull the next fib from the fibs
304 */
305 entry = fibctx->fib_list.next;
306 list_del(entry);
307
308 fib = list_entry(entry, struct fib, fiblink);
309 fibctx->count--;
310 spin_unlock_irqrestore(&dev->fib_lock, flags);
311 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
312 kfree(fib->hw_fib_va);
313 kfree(fib);
314 return -EFAULT;
315 }
316 /*
317 * Free the space occupied by this copy of the fib.
318 */
319 kfree(fib->hw_fib_va);
320 kfree(fib);
321 status = 0;
322 } else {
323 spin_unlock_irqrestore(&dev->fib_lock, flags);
324 /* If someone killed the AIF aacraid thread, restart it */
325 status = !dev->aif_thread;
326 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
327 /* Be paranoid, be very paranoid! */
328 kthread_stop(dev->thread);
329 ssleep(1);
330 dev->aif_thread = 0;
331 dev->thread = kthread_run(aac_command_thread, dev,
332 "%s", dev->name);
333 ssleep(1);
334 }
335 if (f.wait) {
336 if(down_interruptible(&fibctx->wait_sem) < 0) {
337 status = -ERESTARTSYS;
338 } else {
339 /* Lock again and retry */
340 spin_lock_irqsave(&dev->fib_lock, flags);
341 goto return_fib;
342 }
343 } else {
344 status = -EAGAIN;
345 }
346 }
347 fibctx->jiffies = jiffies/HZ;
348 return status;
349 }
350
351 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
352 {
353 struct fib *fib;
354
355 /*
356 * First free any FIBs that have not been consumed.
357 */
358 while (!list_empty(&fibctx->fib_list)) {
359 struct list_head * entry;
360 /*
361 * Pull the next fib from the fibs
362 */
363 entry = fibctx->fib_list.next;
364 list_del(entry);
365 fib = list_entry(entry, struct fib, fiblink);
366 fibctx->count--;
367 /*
368 * Free the space occupied by this copy of the fib.
369 */
370 kfree(fib->hw_fib_va);
371 kfree(fib);
372 }
373 /*
374 * Remove the Context from the AdapterFibContext List
375 */
376 list_del(&fibctx->next);
377 /*
378 * Invalidate context
379 */
380 fibctx->type = 0;
381 /*
382 * Free the space occupied by the Context
383 */
384 kfree(fibctx);
385 return 0;
386 }
387
388 /**
389 * close_getadapter_fib - close down user fib context
390 * @dev: adapter
391 * @arg: ioctl arguments
392 *
393 * This routine will close down the fibctx passed in from the user.
394 */
395
396 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
397 {
398 struct aac_fib_context *fibctx;
399 int status;
400 unsigned long flags;
401 struct list_head * entry;
402
403 /*
404 * Verify that the HANDLE passed in was a valid AdapterFibContext
405 *
406 * Search the list of AdapterFibContext addresses on the adapter
407 * to be sure this is a valid address
408 */
409
410 entry = dev->fib_list.next;
411 fibctx = NULL;
412
413 while(entry != &dev->fib_list) {
414 fibctx = list_entry(entry, struct aac_fib_context, next);
415 /*
416 * Extract the fibctx from the input parameters
417 */
418 if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
419 break;
420 entry = entry->next;
421 fibctx = NULL;
422 }
423
424 if (!fibctx)
425 return 0; /* Already gone */
426
427 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
428 (fibctx->size != sizeof(struct aac_fib_context)))
429 return -EINVAL;
430 spin_lock_irqsave(&dev->fib_lock, flags);
431 status = aac_close_fib_context(dev, fibctx);
432 spin_unlock_irqrestore(&dev->fib_lock, flags);
433 return status;
434 }
435
436 /**
437 * check_revision - close down user fib context
438 * @dev: adapter
439 * @arg: ioctl arguments
440 *
441 * This routine returns the driver version.
442 * Under Linux, there have been no version incompatibilities, so this is
443 * simple!
444 */
445
446 static int check_revision(struct aac_dev *dev, void __user *arg)
447 {
448 struct revision response;
449 char *driver_version = aac_driver_version;
450 u32 version;
451
452 response.compat = 1;
453 version = (simple_strtol(driver_version,
454 &driver_version, 10) << 24) | 0x00000400;
455 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
456 version += simple_strtol(driver_version + 1, NULL, 10);
457 response.version = cpu_to_le32(version);
458 # ifdef AAC_DRIVER_BUILD
459 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
460 # else
461 response.build = cpu_to_le32(9999);
462 # endif
463
464 if (copy_to_user(arg, &response, sizeof(response)))
465 return -EFAULT;
466 return 0;
467 }
468
469
470 /**
471 *
472 * aac_send_raw_scb
473 *
474 */
475
476 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
477 {
478 struct fib* srbfib;
479 int status;
480 struct aac_srb *srbcmd = NULL;
481 struct aac_hba_cmd_req *hbacmd = NULL;
482 struct user_aac_srb *user_srbcmd = NULL;
483 struct user_aac_srb __user *user_srb = arg;
484 struct aac_srb_reply __user *user_reply;
485 u32 chn;
486 u32 fibsize = 0;
487 u32 flags = 0;
488 s32 rcode = 0;
489 u32 data_dir;
490 void __user *sg_user[HBA_MAX_SG_EMBEDDED];
491 void *sg_list[HBA_MAX_SG_EMBEDDED];
492 u32 sg_count[HBA_MAX_SG_EMBEDDED];
493 u32 sg_indx = 0;
494 u32 byte_count = 0;
495 u32 actual_fibsize64, actual_fibsize = 0;
496 int i;
497 int is_native_device;
498 u64 address;
499
500
501 if (dev->in_reset) {
502 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
503 return -EBUSY;
504 }
505 if (!capable(CAP_SYS_ADMIN)){
506 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
507 return -EPERM;
508 }
509 /*
510 * Allocate and initialize a Fib then setup a SRB command
511 */
512 if (!(srbfib = aac_fib_alloc(dev))) {
513 return -ENOMEM;
514 }
515
516 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
517 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
518 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
519 rcode = -EFAULT;
520 goto cleanup;
521 }
522
523 if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
524 (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
525 rcode = -EINVAL;
526 goto cleanup;
527 }
528
529 user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
530 if (!user_srbcmd) {
531 dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
532 rcode = -ENOMEM;
533 goto cleanup;
534 }
535 if(copy_from_user(user_srbcmd, user_srb,fibsize)){
536 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
537 rcode = -EFAULT;
538 goto cleanup;
539 }
540
541 flags = user_srbcmd->flags; /* from user in cpu order */
542 switch (flags & (SRB_DataIn | SRB_DataOut)) {
543 case SRB_DataOut:
544 data_dir = DMA_TO_DEVICE;
545 break;
546 case (SRB_DataIn | SRB_DataOut):
547 data_dir = DMA_BIDIRECTIONAL;
548 break;
549 case SRB_DataIn:
550 data_dir = DMA_FROM_DEVICE;
551 break;
552 default:
553 data_dir = DMA_NONE;
554 }
555 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
556 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
557 user_srbcmd->sg.count));
558 rcode = -EINVAL;
559 goto cleanup;
560 }
561 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
562 dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
563 rcode = -EINVAL;
564 goto cleanup;
565 }
566 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
567 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
568 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
569 (sizeof(struct sgentry64) - sizeof(struct sgentry));
570 /* User made a mistake - should not continue */
571 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
572 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
573 "Raw SRB command calculated fibsize=%lu;%lu "
574 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
575 "issued fibsize=%d\n",
576 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
577 sizeof(struct aac_srb), sizeof(struct sgentry),
578 sizeof(struct sgentry64), fibsize));
579 rcode = -EINVAL;
580 goto cleanup;
581 }
582
583 chn = aac_logical_to_phys(user_srbcmd->channel);
584 if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
585 dev->hba_map[chn][user_srbcmd->id].devtype ==
586 AAC_DEVTYPE_NATIVE_RAW) {
587 is_native_device = 1;
588 hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
589 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
590
591 /* iu_type is a parameter of aac_hba_send */
592 switch (data_dir) {
593 case DMA_TO_DEVICE:
594 hbacmd->byte1 = 2;
595 break;
596 case DMA_FROM_DEVICE:
597 case DMA_BIDIRECTIONAL:
598 hbacmd->byte1 = 1;
599 break;
600 case DMA_NONE:
601 default:
602 break;
603 }
604 hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
605 hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
606
607 /*
608 * we fill in reply_qid later in aac_src_deliver_message
609 * we fill in iu_type, request_id later in aac_hba_send
610 * we fill in emb_data_desc_count, data_length later
611 * in sg list build
612 */
613
614 memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
615
616 address = (u64)srbfib->hw_error_pa;
617 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
618 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
619 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
620 hbacmd->emb_data_desc_count =
621 cpu_to_le32(user_srbcmd->sg.count);
622 srbfib->hbacmd_size = 64 +
623 user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
624
625 } else {
626 is_native_device = 0;
627 aac_fib_init(srbfib);
628
629 /* raw_srb FIB is not FastResponseCapable */
630 srbfib->hw_fib_va->header.XferState &=
631 ~cpu_to_le32(FastResponseCapable);
632
633 srbcmd = (struct aac_srb *) fib_data(srbfib);
634
635 // Fix up srb for endian and force some values
636
637 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
638 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
639 srbcmd->id = cpu_to_le32(user_srbcmd->id);
640 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
641 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
642 srbcmd->flags = cpu_to_le32(flags);
643 srbcmd->retry_limit = 0; // Obsolete parameter
644 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
645 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
646 }
647
648 byte_count = 0;
649 if (is_native_device) {
650 struct user_sgmap *usg32 = &user_srbcmd->sg;
651 struct user_sgmap64 *usg64 =
652 (struct user_sgmap64 *)&user_srbcmd->sg;
653
654 for (i = 0; i < usg32->count; i++) {
655 void *p;
656 u64 addr;
657
658 sg_count[i] = (actual_fibsize64 == fibsize) ?
659 usg64->sg[i].count : usg32->sg[i].count;
660 if (sg_count[i] >
661 (dev->scsi_host_ptr->max_sectors << 9)) {
662 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
663 i, sg_count[i],
664 dev->scsi_host_ptr->max_sectors << 9);
665 rcode = -EINVAL;
666 goto cleanup;
667 }
668
669 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
670 if (!p) {
671 rcode = -ENOMEM;
672 goto cleanup;
673 }
674
675 if (actual_fibsize64 == fibsize) {
676 addr = (u64)usg64->sg[i].addr[0];
677 addr += ((u64)usg64->sg[i].addr[1]) << 32;
678 } else {
679 addr = (u64)usg32->sg[i].addr;
680 }
681
682 sg_user[i] = (void __user *)(uintptr_t)addr;
683 sg_list[i] = p; // save so we can clean up later
684 sg_indx = i;
685
686 if (flags & SRB_DataOut) {
687 if (copy_from_user(p, sg_user[i],
688 sg_count[i])) {
689 rcode = -EFAULT;
690 goto cleanup;
691 }
692 }
693 addr = pci_map_single(dev->pdev, p, sg_count[i],
694 data_dir);
695 hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
696 hbacmd->sge[i].addr_lo = cpu_to_le32(
697 (u32)(addr & 0xffffffff));
698 hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
699 hbacmd->sge[i].flags = 0;
700 byte_count += sg_count[i];
701 }
702
703 if (usg32->count > 0) /* embedded sglist */
704 hbacmd->sge[usg32->count-1].flags =
705 cpu_to_le32(0x40000000);
706 hbacmd->data_length = cpu_to_le32(byte_count);
707
708 status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
709 NULL, NULL);
710
711 } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
712 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
713 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
714
715 /*
716 * This should also catch if user used the 32 bit sgmap
717 */
718 if (actual_fibsize64 == fibsize) {
719 actual_fibsize = actual_fibsize64;
720 for (i = 0; i < upsg->count; i++) {
721 u64 addr;
722 void* p;
723
724 sg_count[i] = upsg->sg[i].count;
725 if (sg_count[i] >
726 ((dev->adapter_info.options &
727 AAC_OPT_NEW_COMM) ?
728 (dev->scsi_host_ptr->max_sectors << 9) :
729 65536)) {
730 rcode = -EINVAL;
731 goto cleanup;
732 }
733 /* Does this really need to be GFP_DMA? */
734 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
735 if(!p) {
736 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
737 sg_count[i], i, upsg->count));
738 rcode = -ENOMEM;
739 goto cleanup;
740 }
741 addr = (u64)upsg->sg[i].addr[0];
742 addr += ((u64)upsg->sg[i].addr[1]) << 32;
743 sg_user[i] = (void __user *)(uintptr_t)addr;
744 sg_list[i] = p; // save so we can clean up later
745 sg_indx = i;
746
747 if (flags & SRB_DataOut) {
748 if (copy_from_user(p, sg_user[i],
749 sg_count[i])){
750 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
751 rcode = -EFAULT;
752 goto cleanup;
753 }
754 }
755 addr = pci_map_single(dev->pdev, p,
756 sg_count[i], data_dir);
757
758 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
759 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
760 byte_count += sg_count[i];
761 psg->sg[i].count = cpu_to_le32(sg_count[i]);
762 }
763 } else {
764 struct user_sgmap* usg;
765 usg = kmemdup(upsg,
766 actual_fibsize - sizeof(struct aac_srb)
767 + sizeof(struct sgmap), GFP_KERNEL);
768 if (!usg) {
769 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
770 rcode = -ENOMEM;
771 goto cleanup;
772 }
773 actual_fibsize = actual_fibsize64;
774
775 for (i = 0; i < usg->count; i++) {
776 u64 addr;
777 void* p;
778
779 sg_count[i] = usg->sg[i].count;
780 if (sg_count[i] >
781 ((dev->adapter_info.options &
782 AAC_OPT_NEW_COMM) ?
783 (dev->scsi_host_ptr->max_sectors << 9) :
784 65536)) {
785 kfree(usg);
786 rcode = -EINVAL;
787 goto cleanup;
788 }
789 /* Does this really need to be GFP_DMA? */
790 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
791 if(!p) {
792 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
793 sg_count[i], i, usg->count));
794 kfree(usg);
795 rcode = -ENOMEM;
796 goto cleanup;
797 }
798 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
799 sg_list[i] = p; // save so we can clean up later
800 sg_indx = i;
801
802 if (flags & SRB_DataOut) {
803 if (copy_from_user(p, sg_user[i],
804 sg_count[i])) {
805 kfree (usg);
806 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
807 rcode = -EFAULT;
808 goto cleanup;
809 }
810 }
811 addr = pci_map_single(dev->pdev, p,
812 sg_count[i], data_dir);
813
814 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
815 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
816 byte_count += sg_count[i];
817 psg->sg[i].count = cpu_to_le32(sg_count[i]);
818 }
819 kfree (usg);
820 }
821 srbcmd->count = cpu_to_le32(byte_count);
822 if (user_srbcmd->sg.count)
823 psg->count = cpu_to_le32(sg_indx+1);
824 else
825 psg->count = 0;
826 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
827 } else {
828 struct user_sgmap* upsg = &user_srbcmd->sg;
829 struct sgmap* psg = &srbcmd->sg;
830
831 if (actual_fibsize64 == fibsize) {
832 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
833 for (i = 0; i < upsg->count; i++) {
834 uintptr_t addr;
835 void* p;
836
837 sg_count[i] = usg->sg[i].count;
838 if (sg_count[i] >
839 ((dev->adapter_info.options &
840 AAC_OPT_NEW_COMM) ?
841 (dev->scsi_host_ptr->max_sectors << 9) :
842 65536)) {
843 rcode = -EINVAL;
844 goto cleanup;
845 }
846 /* Does this really need to be GFP_DMA? */
847 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
848 if (!p) {
849 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
850 sg_count[i], i, usg->count));
851 rcode = -ENOMEM;
852 goto cleanup;
853 }
854 addr = (u64)usg->sg[i].addr[0];
855 addr += ((u64)usg->sg[i].addr[1]) << 32;
856 sg_user[i] = (void __user *)addr;
857 sg_list[i] = p; // save so we can clean up later
858 sg_indx = i;
859
860 if (flags & SRB_DataOut) {
861 if (copy_from_user(p, sg_user[i],
862 sg_count[i])){
863 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
864 rcode = -EFAULT;
865 goto cleanup;
866 }
867 }
868 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
869
870 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
871 byte_count += usg->sg[i].count;
872 psg->sg[i].count = cpu_to_le32(sg_count[i]);
873 }
874 } else {
875 for (i = 0; i < upsg->count; i++) {
876 dma_addr_t addr;
877 void* p;
878
879 sg_count[i] = upsg->sg[i].count;
880 if (sg_count[i] >
881 ((dev->adapter_info.options &
882 AAC_OPT_NEW_COMM) ?
883 (dev->scsi_host_ptr->max_sectors << 9) :
884 65536)) {
885 rcode = -EINVAL;
886 goto cleanup;
887 }
888 p = kmalloc(sg_count[i], GFP_KERNEL);
889 if (!p) {
890 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
891 sg_count[i], i, upsg->count));
892 rcode = -ENOMEM;
893 goto cleanup;
894 }
895 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
896 sg_list[i] = p; // save so we can clean up later
897 sg_indx = i;
898
899 if (flags & SRB_DataOut) {
900 if (copy_from_user(p, sg_user[i],
901 sg_count[i])) {
902 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
903 rcode = -EFAULT;
904 goto cleanup;
905 }
906 }
907 addr = pci_map_single(dev->pdev, p,
908 sg_count[i], data_dir);
909
910 psg->sg[i].addr = cpu_to_le32(addr);
911 byte_count += sg_count[i];
912 psg->sg[i].count = cpu_to_le32(sg_count[i]);
913 }
914 }
915 srbcmd->count = cpu_to_le32(byte_count);
916 if (user_srbcmd->sg.count)
917 psg->count = cpu_to_le32(sg_indx+1);
918 else
919 psg->count = 0;
920 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
921 }
922
923 if (status == -ERESTARTSYS) {
924 rcode = -ERESTARTSYS;
925 goto cleanup;
926 }
927
928 if (status != 0) {
929 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
930 rcode = -ENXIO;
931 goto cleanup;
932 }
933
934 if (flags & SRB_DataIn) {
935 for(i = 0 ; i <= sg_indx; i++){
936 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
937 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
938 rcode = -EFAULT;
939 goto cleanup;
940
941 }
942 }
943 }
944
945 user_reply = arg + fibsize;
946 if (is_native_device) {
947 struct aac_hba_resp *err =
948 &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
949 struct aac_srb_reply reply;
950
951 reply.status = ST_OK;
952 if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
953 /* fast response */
954 reply.srb_status = SRB_STATUS_SUCCESS;
955 reply.scsi_status = 0;
956 reply.data_xfer_length = byte_count;
957 } else {
958 reply.srb_status = err->service_response;
959 reply.scsi_status = err->status;
960 reply.data_xfer_length = byte_count -
961 le32_to_cpu(err->residual_count);
962 reply.sense_data_size = err->sense_response_data_len;
963 memcpy(reply.sense_data, err->sense_response_buf,
964 AAC_SENSE_BUFFERSIZE);
965 }
966 if (copy_to_user(user_reply, &reply,
967 sizeof(struct aac_srb_reply))) {
968 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
969 rcode = -EFAULT;
970 goto cleanup;
971 }
972 } else {
973 struct aac_srb_reply *reply;
974
975 reply = (struct aac_srb_reply *) fib_data(srbfib);
976 if (copy_to_user(user_reply, reply,
977 sizeof(struct aac_srb_reply))) {
978 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
979 rcode = -EFAULT;
980 goto cleanup;
981 }
982 }
983
984 cleanup:
985 kfree(user_srbcmd);
986 if (rcode != -ERESTARTSYS) {
987 for (i = 0; i <= sg_indx; i++)
988 kfree(sg_list[i]);
989 aac_fib_complete(srbfib);
990 aac_fib_free(srbfib);
991 }
992
993 return rcode;
994 }
995
996 struct aac_pci_info {
997 u32 bus;
998 u32 slot;
999 };
1000
1001
1002 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
1003 {
1004 struct aac_pci_info pci_info;
1005
1006 pci_info.bus = dev->pdev->bus->number;
1007 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
1008
1009 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
1010 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
1011 return -EFAULT;
1012 }
1013 return 0;
1014 }
1015
1016 static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
1017 {
1018 struct aac_hba_info hbainfo;
1019
1020 hbainfo.adapter_number = (u8) dev->id;
1021 hbainfo.system_io_bus_number = dev->pdev->bus->number;
1022 hbainfo.device_number = (dev->pdev->devfn >> 3);
1023 hbainfo.function_number = (dev->pdev->devfn & 0x0007);
1024
1025 hbainfo.vendor_id = dev->pdev->vendor;
1026 hbainfo.device_id = dev->pdev->device;
1027 hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
1028 hbainfo.sub_system_id = dev->pdev->subsystem_device;
1029
1030 if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
1031 dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
1032 return -EFAULT;
1033 }
1034
1035 return 0;
1036 }
1037
1038 struct aac_reset_iop {
1039 u8 reset_type;
1040 };
1041
1042 static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1043 {
1044 struct aac_reset_iop reset;
1045 int retval;
1046
1047 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1048 return -EFAULT;
1049
1050 retval = aac_reset_adapter(dev, 0, reset.reset_type);
1051 return retval;
1052
1053 }
1054
1055 int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
1056 {
1057 int status;
1058
1059 mutex_lock(&dev->ioctl_mutex);
1060
1061 if (dev->adapter_shutdown) {
1062 status = -EACCES;
1063 goto cleanup;
1064 }
1065
1066 /*
1067 * HBA gets first crack
1068 */
1069
1070 status = aac_dev_ioctl(dev, cmd, arg);
1071 if (status != -ENOTTY)
1072 goto cleanup;
1073
1074 switch (cmd) {
1075 case FSACTL_MINIPORT_REV_CHECK:
1076 status = check_revision(dev, arg);
1077 break;
1078 case FSACTL_SEND_LARGE_FIB:
1079 case FSACTL_SENDFIB:
1080 status = ioctl_send_fib(dev, arg);
1081 break;
1082 case FSACTL_OPEN_GET_ADAPTER_FIB:
1083 status = open_getadapter_fib(dev, arg);
1084 break;
1085 case FSACTL_GET_NEXT_ADAPTER_FIB:
1086 status = next_getadapter_fib(dev, arg);
1087 break;
1088 case FSACTL_CLOSE_GET_ADAPTER_FIB:
1089 status = close_getadapter_fib(dev, arg);
1090 break;
1091 case FSACTL_SEND_RAW_SRB:
1092 status = aac_send_raw_srb(dev,arg);
1093 break;
1094 case FSACTL_GET_PCI_INFO:
1095 status = aac_get_pci_info(dev,arg);
1096 break;
1097 case FSACTL_GET_HBA_INFO:
1098 status = aac_get_hba_info(dev, arg);
1099 break;
1100 case FSACTL_RESET_IOP:
1101 status = aac_send_reset_adapter(dev, arg);
1102 break;
1103
1104 default:
1105 status = -ENOTTY;
1106 break;
1107 }
1108
1109 cleanup:
1110 mutex_unlock(&dev->ioctl_mutex);
1111
1112 return status;
1113 }
1114