2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Abstract: Contain all routines that are required for FSA host/adapter
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <asm/semaphore.h>
52 * fib_map_alloc - allocate the fib objects
53 * @dev: Adapter to allocate for
55 * Allocate and map the shared PCI space for the FIB blocks used to
56 * talk to the Adaptec firmware.
59 static int fib_map_alloc(struct aac_dev
*dev
)
62 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
63 dev
->pdev
, dev
->max_fib_size
, dev
->scsi_host_ptr
->can_queue
,
64 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
65 if((dev
->hw_fib_va
= pci_alloc_consistent(dev
->pdev
, dev
->max_fib_size
66 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
),
67 &dev
->hw_fib_pa
))==NULL
)
73 * aac_fib_map_free - free the fib objects
74 * @dev: Adapter to free
76 * Free the PCI mappings and the memory allocated for FIB blocks
80 void aac_fib_map_free(struct aac_dev
*dev
)
82 pci_free_consistent(dev
->pdev
, dev
->max_fib_size
* (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
), dev
->hw_fib_va
, dev
->hw_fib_pa
);
86 * aac_fib_setup - setup the fibs
87 * @dev: Adapter to set up
89 * Allocate the PCI space for the fibs, map it and then intialise the
90 * fib area, the unmapped fib data and also the free list
93 int aac_fib_setup(struct aac_dev
* dev
)
96 struct hw_fib
*hw_fib_va
;
100 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
101 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
102 dev
->init
->MaxIoCommands
= cpu_to_le32((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) >> 1);
103 dev
->scsi_host_ptr
->can_queue
= le32_to_cpu(dev
->init
->MaxIoCommands
) - AAC_NUM_MGT_FIB
;
108 hw_fib_va
= dev
->hw_fib_va
;
109 hw_fib_pa
= dev
->hw_fib_pa
;
110 memset(hw_fib_va
, 0, dev
->max_fib_size
* (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
112 * Initialise the fibs
114 for (i
= 0, fibptr
= &dev
->fibs
[i
]; i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
); i
++, fibptr
++)
117 fibptr
->hw_fib
= hw_fib_va
;
118 fibptr
->data
= (void *) fibptr
->hw_fib
->data
;
119 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
120 init_MUTEX_LOCKED(&fibptr
->event_wait
);
121 spin_lock_init(&fibptr
->event_lock
);
122 hw_fib_va
->header
.XferState
= cpu_to_le32(0xffffffff);
123 hw_fib_va
->header
.SenderSize
= cpu_to_le16(dev
->max_fib_size
);
124 fibptr
->hw_fib_pa
= hw_fib_pa
;
125 hw_fib_va
= (struct hw_fib
*)((unsigned char *)hw_fib_va
+ dev
->max_fib_size
);
126 hw_fib_pa
= hw_fib_pa
+ dev
->max_fib_size
;
129 * Add the fib chain to the free list
131 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
133 * Enable this to debug out of queue space
135 dev
->free_fib
= &dev
->fibs
[0];
140 * aac_fib_alloc - allocate a fib
141 * @dev: Adapter to allocate the fib for
143 * Allocate a fib from the adapter fib pool. If the pool is empty we
147 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
151 spin_lock_irqsave(&dev
->fib_lock
, flags
);
152 fibptr
= dev
->free_fib
;
154 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
157 dev
->free_fib
= fibptr
->next
;
158 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
160 * Set the proper node type code and node byte size
162 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
163 fibptr
->size
= sizeof(struct fib
);
165 * Null out fields that depend on being zero at the start of
168 fibptr
->hw_fib
->header
.XferState
= 0;
169 fibptr
->callback
= NULL
;
170 fibptr
->callback_data
= NULL
;
176 * aac_fib_free - free a fib
177 * @fibptr: fib to free up
179 * Frees up a fib and places it on the appropriate queue
180 * (either free or timed out)
183 void aac_fib_free(struct fib
*fibptr
)
187 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
188 if (fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
) {
189 aac_config
.fib_timeouts
++;
190 fibptr
->next
= fibptr
->dev
->timeout_fib
;
191 fibptr
->dev
->timeout_fib
= fibptr
;
193 if (fibptr
->hw_fib
->header
.XferState
!= 0) {
194 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
196 le32_to_cpu(fibptr
->hw_fib
->header
.XferState
));
198 fibptr
->next
= fibptr
->dev
->free_fib
;
199 fibptr
->dev
->free_fib
= fibptr
;
201 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
205 * aac_fib_init - initialise a fib
206 * @fibptr: The fib to initialize
208 * Set up the generic fib fields ready for use
211 void aac_fib_init(struct fib
*fibptr
)
213 struct hw_fib
*hw_fib
= fibptr
->hw_fib
;
215 hw_fib
->header
.StructType
= FIB_MAGIC
;
216 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
217 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
218 hw_fib
->header
.SenderFibAddress
= 0; /* Filled in later if needed */
219 hw_fib
->header
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
220 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
224 * fib_deallocate - deallocate a fib
225 * @fibptr: fib to deallocate
227 * Will deallocate and return to the free pool the FIB pointed to by the
231 static void fib_dealloc(struct fib
* fibptr
)
233 struct hw_fib
*hw_fib
= fibptr
->hw_fib
;
234 BUG_ON(hw_fib
->header
.StructType
!= FIB_MAGIC
);
235 hw_fib
->header
.XferState
= 0;
239 * Commuication primitives define and support the queuing method we use to
240 * support host to adapter commuication. All queue accesses happen through
241 * these routines and are the only routines which have a knowledge of the
242 * how these queues are implemented.
246 * aac_get_entry - get a queue entry
249 * @entry: Entry return
250 * @index: Index return
251 * @nonotify: notification control
253 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
254 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
258 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
260 struct aac_queue
* q
;
264 * All of the queues wrap when they reach the end, so we check
265 * to see if they have reached the end and if they have we just
266 * set the index back to zero. This is a wrap. You could or off
267 * the high bits in all updates but this is a bit faster I think.
270 q
= &dev
->queues
->queue
[qid
];
272 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
273 /* Interrupt Moderation, only interrupt for first two entries */
274 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
276 if (qid
== AdapNormCmdQueue
)
277 idx
= ADAP_NORM_CMD_ENTRIES
;
279 idx
= ADAP_NORM_RESP_ENTRIES
;
281 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
285 if (qid
== AdapNormCmdQueue
) {
286 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
287 *index
= 0; /* Wrap to front of the Producer Queue. */
289 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
290 *index
= 0; /* Wrap to front of the Producer Queue. */
293 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) { /* Queue is full */
294 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
298 *entry
= q
->base
+ *index
;
304 * aac_queue_get - get the next free QE
306 * @index: Returned index
307 * @priority: Priority of fib
308 * @fib: Fib to associate with the queue entry
309 * @wait: Wait if queue full
310 * @fibptr: Driver fib object to go with fib
311 * @nonotify: Don't notify the adapter
313 * Gets the next free QE off the requested priorty adapter command
314 * queue and associates the Fib with the QE. The QE represented by
315 * index is ready to insert on the queue when this routine returns
319 static int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
321 struct aac_entry
* entry
= NULL
;
324 if (qid
== AdapNormCmdQueue
) {
325 /* if no entries wait for some if caller wants to */
326 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
))
328 printk(KERN_ERR
"GetEntries failed\n");
331 * Setup queue entry with a command, status and fib mapped
333 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
336 while(!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
))
338 /* if no entries wait for some if caller wants to */
341 * Setup queue entry with command, status and fib mapped
343 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
344 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
345 /* Restore adapters pointer to the FIB */
346 hw_fib
->header
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
350 * If MapFib is true than we need to map the Fib and put pointers
351 * in the queue entry.
354 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
359 * Define the highest level of host to adapter communication routines.
360 * These routines will support host to adapter FS commuication. These
361 * routines have no knowledge of the commuication method used. This level
362 * sends and receives FIBs. This level has no knowledge of how these FIBs
363 * get passed back and forth.
367 * aac_fib_send - send a fib to the adapter
368 * @command: Command to send
370 * @size: Size of fib data area
371 * @priority: Priority of Fib
372 * @wait: Async/sync select
373 * @reply: True if a reply is wanted
374 * @callback: Called with reply
375 * @callback_data: Passed to callback
377 * Sends the requested FIB to the adapter and optionally will wait for a
378 * response FIB. If the caller does not wish to wait for a response than
379 * an event to wait on must be supplied. This event will be set when a
380 * response FIB is received from the adapter.
383 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
384 int priority
, int wait
, int reply
, fib_callback callback
,
387 struct aac_dev
* dev
= fibptr
->dev
;
388 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
389 struct aac_queue
* q
;
390 unsigned long flags
= 0;
391 unsigned long qflags
;
393 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
396 * There are 5 cases with the wait and reponse requested flags.
397 * The only invalid cases are if the caller requests to wait and
398 * does not request a response and if the caller does not want a
399 * response and the Fib is not allocated from pool. If a response
400 * is not requesed the Fib will just be deallocaed by the DPC
401 * routine when the response comes back from the adapter. No
402 * further processing will be done besides deleting the Fib. We
403 * will have a debug mode where the adapter can notify the host
404 * it had a problem and the host can log that fact.
406 if (wait
&& !reply
) {
408 } else if (!wait
&& reply
) {
409 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
410 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
411 } else if (!wait
&& !reply
) {
412 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
413 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
414 } else if (wait
&& reply
) {
415 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
416 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
419 * Map the fib into 32bits by using the fib number
422 hw_fib
->header
.SenderFibAddress
= cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
423 hw_fib
->header
.SenderData
= (u32
)(fibptr
- dev
->fibs
);
425 * Set FIB state to indicate where it came from and if we want a
426 * response from the adapter. Also load the command from the
429 * Map the hw fib pointer as a 32bit value
431 hw_fib
->header
.Command
= cpu_to_le16(command
);
432 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
433 fibptr
->hw_fib
->header
.Flags
= 0; /* 0 the flags field - internal only*/
435 * Set the size of the Fib we want to send to the adapter
437 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
438 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
442 * Get a queue entry connect the FIB to it and send an notify
443 * the adapter a command is ready.
445 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
448 * Fill in the Callback and CallbackContext if we are not
452 fibptr
->callback
= callback
;
453 fibptr
->callback_data
= callback_data
;
459 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
461 dprintk((KERN_DEBUG
"Fib contents:.\n"));
462 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
463 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
464 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
465 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib
));
466 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
467 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
471 q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
474 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
475 spin_lock_irqsave(q
->lock
, qflags
);
476 if (dev
->new_comm_interface
) {
477 unsigned long count
= 10000000L; /* 50 seconds */
479 spin_unlock_irqrestore(q
->lock
, qflags
);
480 while (aac_adapter_send(fibptr
) != 0) {
483 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
484 spin_lock_irqsave(q
->lock
, qflags
);
486 spin_unlock_irqrestore(q
->lock
, qflags
);
493 unsigned long nointr
= 0;
494 aac_queue_get( dev
, &index
, AdapNormCmdQueue
, hw_fib
, 1, fibptr
, &nointr
);
497 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
498 spin_unlock_irqrestore(q
->lock
, qflags
);
499 dprintk((KERN_DEBUG
"aac_fib_send: inserting a queue entry at index %d.\n",index
));
500 if (!(nointr
& aac_config
.irq_mod
))
501 aac_adapter_notify(dev
, AdapNormCmdQueue
);
505 * If the caller wanted us to wait for response wait now.
509 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
510 /* Only set for first known interruptable command */
513 * *VERY* Dangerous to time out a command, the
514 * assumption is made that we have no hope of
515 * functioning because an interrupt routing or other
516 * hardware failure has occurred.
518 unsigned long count
= 36000000L; /* 3 minutes */
519 while (down_trylock(&fibptr
->event_wait
)) {
521 spin_lock_irqsave(q
->lock
, qflags
);
523 spin_unlock_irqrestore(q
->lock
, qflags
);
525 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
526 "Usually a result of a PCI interrupt routing problem;\n"
527 "update mother board BIOS or consider utilizing one of\n"
528 "the SAFE mode kernel options (acpi, apic etc)\n");
534 } else if (down_interruptible(&fibptr
->event_wait
)) {
535 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
536 if (fibptr
->done
== 0) {
537 fibptr
->done
= 2; /* Tell interrupt we aborted */
538 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
541 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
543 BUG_ON(fibptr
->done
== 0);
545 if((fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)){
552 * If the user does not want a response than return success otherwise
562 * aac_consumer_get - get the top of the queue
565 * @entry: Return entry
567 * Will return a pointer to the entry on the top of the queue requested that
568 * we are a consumer of, and return the address of the queue entry. It does
569 * not change the state of the queue.
572 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
576 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
580 * The consumer index must be wrapped if we have reached
581 * the end of the queue, else we just use the entry
582 * pointed to by the header index
584 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
587 index
= le32_to_cpu(*q
->headers
.consumer
);
588 *entry
= q
->base
+ index
;
595 * aac_consumer_free - free consumer entry
600 * Frees up the current top of the queue we are a consumer of. If the
601 * queue was full notify the producer that the queue is no longer full.
604 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
609 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
612 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
613 *q
->headers
.consumer
= cpu_to_le32(1);
615 *q
->headers
.consumer
= cpu_to_le32(le32_to_cpu(*q
->headers
.consumer
)+1);
620 case HostNormCmdQueue
:
621 notify
= HostNormCmdNotFull
;
623 case HostNormRespQueue
:
624 notify
= HostNormRespNotFull
;
630 aac_adapter_notify(dev
, notify
);
635 * aac_fib_adapter_complete - complete adapter issued fib
636 * @fibptr: fib to complete
639 * Will do all necessary work to complete a FIB that was sent from
643 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
645 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
646 struct aac_dev
* dev
= fibptr
->dev
;
647 struct aac_queue
* q
;
648 unsigned long nointr
= 0;
649 unsigned long qflags
;
651 if (hw_fib
->header
.XferState
== 0) {
652 if (dev
->new_comm_interface
)
657 * If we plan to do anything check the structure type first.
659 if ( hw_fib
->header
.StructType
!= FIB_MAGIC
) {
660 if (dev
->new_comm_interface
)
665 * This block handles the case where the adapter had sent us a
666 * command and we have finished processing the command. We
667 * call completeFib when we are done processing the command
668 * and want to send a response back to the adapter. This will
669 * send the completed cdb to the adapter.
671 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
672 if (dev
->new_comm_interface
) {
676 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
678 size
+= sizeof(struct aac_fibhdr
);
679 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
681 hw_fib
->header
.Size
= cpu_to_le16(size
);
683 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
684 spin_lock_irqsave(q
->lock
, qflags
);
685 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
686 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
687 spin_unlock_irqrestore(q
->lock
, qflags
);
688 if (!(nointr
& (int)aac_config
.irq_mod
))
689 aac_adapter_notify(dev
, AdapNormRespQueue
);
694 printk(KERN_WARNING
"aac_fib_adapter_complete: Unknown xferstate detected.\n");
701 * aac_fib_complete - fib completion handler
702 * @fib: FIB to complete
704 * Will do all necessary work to complete a FIB.
707 int aac_fib_complete(struct fib
*fibptr
)
709 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
712 * Check for a fib which has already been completed
715 if (hw_fib
->header
.XferState
== 0)
718 * If we plan to do anything check the structure type first.
721 if (hw_fib
->header
.StructType
!= FIB_MAGIC
)
724 * This block completes a cdb which orginated on the host and we
725 * just need to deallocate the cdb or reinit it. At this point the
726 * command is complete that we had sent to the adapter and this
727 * cdb could be reused.
729 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
730 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
734 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
737 * This handles the case when the host has aborted the I/O
738 * to the adapter because the adapter is not responding
741 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
750 * aac_printf - handle printf from firmware
754 * Print a message passed to us by the controller firmware on the
758 void aac_printf(struct aac_dev
*dev
, u32 val
)
760 char *cp
= dev
->printfbuf
;
761 if (dev
->printf_enabled
)
763 int length
= val
& 0xffff;
764 int level
= (val
>> 16) & 0xffff;
767 * The size of the printfbuf is set in port.c
768 * There is no variable or define for it
774 if (level
== LOG_AAC_HIGH_ERROR
)
775 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
777 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
784 * aac_handle_aif - Handle a message from the firmware
785 * @dev: Which adapter this fib is from
786 * @fibptr: Pointer to fibptr from adapter
788 * This routine handles a driver notify fib from the adapter and
789 * dispatches it to the appropriate routine for handling.
792 #define AIF_SNIFF_TIMEOUT (30*HZ)
793 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
795 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
796 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
799 struct scsi_device
*device
;
805 } device_config_needed
;
807 /* Sniff for container changes */
809 if (!dev
|| !dev
->fsa_dev
)
814 * We have set this up to try and minimize the number of
815 * re-configures that take place. As a result of this when
816 * certain AIF's come in we will set a flag waiting for another
817 * type of AIF before setting the re-config flag.
819 switch (le32_to_cpu(aifcmd
->command
)) {
820 case AifCmdDriverNotify
:
821 switch (le32_to_cpu(((u32
*)aifcmd
->data
)[0])) {
823 * Morph or Expand complete
825 case AifDenMorphComplete
:
826 case AifDenVolumeExtendComplete
:
827 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
828 if (container
>= dev
->maximum_num_containers
)
832 * Find the scsi_device associated with the SCSI
833 * address. Make sure we have the right array, and if
834 * so set the flag to initiate a new re-config once we
835 * see an AifEnConfigChange AIF come through.
838 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
839 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
840 CONTAINER_TO_CHANNEL(container
),
841 CONTAINER_TO_ID(container
),
842 CONTAINER_TO_LUN(container
));
844 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
845 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
846 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
847 scsi_device_put(device
);
853 * If we are waiting on something and this happens to be
854 * that thing then set the re-configure flag.
856 if (container
!= (u32
)-1) {
857 if (container
>= dev
->maximum_num_containers
)
859 if ((dev
->fsa_dev
[container
].config_waiting_on
==
860 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
861 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
862 dev
->fsa_dev
[container
].config_waiting_on
= 0;
863 } else for (container
= 0;
864 container
< dev
->maximum_num_containers
; ++container
) {
865 if ((dev
->fsa_dev
[container
].config_waiting_on
==
866 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
867 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
868 dev
->fsa_dev
[container
].config_waiting_on
= 0;
872 case AifCmdEventNotify
:
873 switch (le32_to_cpu(((u32
*)aifcmd
->data
)[0])) {
877 case AifEnAddContainer
:
878 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
879 if (container
>= dev
->maximum_num_containers
)
881 dev
->fsa_dev
[container
].config_needed
= ADD
;
882 dev
->fsa_dev
[container
].config_waiting_on
=
884 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
890 case AifEnDeleteContainer
:
891 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
892 if (container
>= dev
->maximum_num_containers
)
894 dev
->fsa_dev
[container
].config_needed
= DELETE
;
895 dev
->fsa_dev
[container
].config_waiting_on
=
897 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
901 * Container change detected. If we currently are not
902 * waiting on something else, setup to wait on a Config Change.
904 case AifEnContainerChange
:
905 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
906 if (container
>= dev
->maximum_num_containers
)
908 if (dev
->fsa_dev
[container
].config_waiting_on
&&
909 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
911 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
912 dev
->fsa_dev
[container
].config_waiting_on
=
914 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
917 case AifEnConfigChange
:
923 * If we are waiting on something and this happens to be
924 * that thing then set the re-configure flag.
926 if (container
!= (u32
)-1) {
927 if (container
>= dev
->maximum_num_containers
)
929 if ((dev
->fsa_dev
[container
].config_waiting_on
==
930 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
931 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
932 dev
->fsa_dev
[container
].config_waiting_on
= 0;
933 } else for (container
= 0;
934 container
< dev
->maximum_num_containers
; ++container
) {
935 if ((dev
->fsa_dev
[container
].config_waiting_on
==
936 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
937 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
938 dev
->fsa_dev
[container
].config_waiting_on
= 0;
942 case AifCmdJobProgress
:
944 * These are job progress AIF's. When a Clear is being
945 * done on a container it is initially created then hidden from
946 * the OS. When the clear completes we don't get a config
947 * change so we monitor the job status complete on a clear then
948 * wait for a container change.
951 if ((((u32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
))
952 && ((((u32
*)aifcmd
->data
)[6] == ((u32
*)aifcmd
->data
)[5])
953 || (((u32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
)))) {
955 container
< dev
->maximum_num_containers
;
958 * Stomp on all config sequencing for all
961 dev
->fsa_dev
[container
].config_waiting_on
=
962 AifEnContainerChange
;
963 dev
->fsa_dev
[container
].config_needed
= ADD
;
964 dev
->fsa_dev
[container
].config_waiting_stamp
=
968 if ((((u32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
))
969 && (((u32
*)aifcmd
->data
)[6] == 0)
970 && (((u32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
))) {
972 container
< dev
->maximum_num_containers
;
975 * Stomp on all config sequencing for all
978 dev
->fsa_dev
[container
].config_waiting_on
=
979 AifEnContainerChange
;
980 dev
->fsa_dev
[container
].config_needed
= DELETE
;
981 dev
->fsa_dev
[container
].config_waiting_stamp
=
988 device_config_needed
= NOTHING
;
989 for (container
= 0; container
< dev
->maximum_num_containers
;
991 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
992 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
993 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
994 device_config_needed
=
995 dev
->fsa_dev
[container
].config_needed
;
996 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
1000 if (device_config_needed
== NOTHING
)
1004 * If we decided that a re-configuration needs to be done,
1005 * schedule it here on the way out the door, please close the door
1013 * Find the scsi_device associated with the SCSI address,
1014 * and mark it as changed, invalidating the cache. This deals
1015 * with changes to existing device IDs.
1018 if (!dev
|| !dev
->scsi_host_ptr
)
1021 * force reload of disk info via aac_probe_container
1023 if ((device_config_needed
== CHANGE
)
1024 && (dev
->fsa_dev
[container
].valid
== 1))
1025 dev
->fsa_dev
[container
].valid
= 2;
1026 if ((device_config_needed
== CHANGE
) ||
1027 (device_config_needed
== ADD
))
1028 aac_probe_container(dev
, container
);
1029 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1030 CONTAINER_TO_CHANNEL(container
),
1031 CONTAINER_TO_ID(container
),
1032 CONTAINER_TO_LUN(container
));
1034 switch (device_config_needed
) {
1036 scsi_remove_device(device
);
1039 if (!dev
->fsa_dev
[container
].valid
) {
1040 scsi_remove_device(device
);
1043 scsi_rescan_device(&device
->sdev_gendev
);
1048 scsi_device_put(device
);
1050 if (device_config_needed
== ADD
) {
1051 scsi_add_device(dev
->scsi_host_ptr
,
1052 CONTAINER_TO_CHANNEL(container
),
1053 CONTAINER_TO_ID(container
),
1054 CONTAINER_TO_LUN(container
));
1059 static int _aac_reset_adapter(struct aac_dev
*aac
)
1064 struct Scsi_Host
*host
;
1065 struct scsi_device
*dev
;
1066 struct scsi_cmnd
*command
;
1067 struct scsi_cmnd
*command_list
;
1072 * - in_reset is asserted, so no new i/o is getting to the
1074 * - The card is dead.
1076 host
= aac
->scsi_host_ptr
;
1077 scsi_block_requests(host
);
1078 aac_adapter_disable_int(aac
);
1079 spin_unlock_irq(host
->host_lock
);
1080 kthread_stop(aac
->thread
);
1083 * If a positive health, means in a known DEAD PANIC
1084 * state and the adapter could be reset to `try again'.
1086 retval
= aac_adapter_check_health(aac
);
1088 retval
= aac_adapter_sync_cmd(aac
, IOP_RESET_ALWAYS
,
1089 0, 0, 0, 0, 0, 0, &ret
, NULL
, NULL
, NULL
, NULL
);
1091 retval
= aac_adapter_sync_cmd(aac
, IOP_RESET
,
1092 0, 0, 0, 0, 0, 0, &ret
, NULL
, NULL
, NULL
, NULL
);
1096 if (ret
!= 0x00000001) {
1101 index
= aac
->cardtype
;
1104 * Re-initialize the adapter, first free resources, then carefully
1105 * apply the initialization sequence to come back again. Only risk
1106 * is a change in Firmware dropping cache, it is assumed the caller
1107 * will ensure that i/o is queisced and the card is flushed in that
1110 aac_fib_map_free(aac
);
1111 aac
->hw_fib_va
= NULL
;
1113 pci_free_consistent(aac
->pdev
, aac
->comm_size
, aac
->comm_addr
, aac
->comm_phys
);
1114 aac
->comm_addr
= NULL
;
1118 free_irq(aac
->pdev
->irq
, aac
);
1119 kfree(aac
->fsa_dev
);
1120 aac
->fsa_dev
= NULL
;
1121 if (aac_get_driver_ident(index
)->quirks
& AAC_QUIRK_31BIT
) {
1122 if (((retval
= pci_set_dma_mask(aac
->pdev
, DMA_32BIT_MASK
))) ||
1123 ((retval
= pci_set_consistent_dma_mask(aac
->pdev
, DMA_32BIT_MASK
))))
1126 if (((retval
= pci_set_dma_mask(aac
->pdev
, 0x7FFFFFFFULL
))) ||
1127 ((retval
= pci_set_consistent_dma_mask(aac
->pdev
, 0x7FFFFFFFULL
))))
1130 if ((retval
= (*(aac_get_driver_ident(index
)->init
))(aac
)))
1132 if (aac_get_driver_ident(index
)->quirks
& AAC_QUIRK_31BIT
)
1133 if ((retval
= pci_set_dma_mask(aac
->pdev
, DMA_32BIT_MASK
)))
1135 aac
->thread
= kthread_run(aac_command_thread
, aac
, aac
->name
);
1136 if (IS_ERR(aac
->thread
)) {
1137 retval
= PTR_ERR(aac
->thread
);
1140 (void)aac_get_adapter_info(aac
);
1141 quirks
= aac_get_driver_ident(index
)->quirks
;
1142 if ((quirks
& AAC_QUIRK_34SG
) && (host
->sg_tablesize
> 34)) {
1143 host
->sg_tablesize
= 34;
1144 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1146 if ((quirks
& AAC_QUIRK_17SG
) && (host
->sg_tablesize
> 17)) {
1147 host
->sg_tablesize
= 17;
1148 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1150 aac_get_config_status(aac
, 1);
1151 aac_get_containers(aac
);
1153 * This is where the assumption that the Adapter is quiesced
1156 command_list
= NULL
;
1157 __shost_for_each_device(dev
, host
) {
1158 unsigned long flags
;
1159 spin_lock_irqsave(&dev
->list_lock
, flags
);
1160 list_for_each_entry(command
, &dev
->cmd_list
, list
)
1161 if (command
->SCp
.phase
== AAC_OWNER_FIRMWARE
) {
1162 command
->SCp
.buffer
= (struct scatterlist
*)command_list
;
1163 command_list
= command
;
1165 spin_unlock_irqrestore(&dev
->list_lock
, flags
);
1167 while ((command
= command_list
)) {
1168 command_list
= (struct scsi_cmnd
*)command
->SCp
.buffer
;
1169 command
->SCp
.buffer
= NULL
;
1170 command
->result
= DID_OK
<< 16
1171 | COMMAND_COMPLETE
<< 8
1172 | SAM_STAT_TASK_SET_FULL
;
1173 command
->SCp
.phase
= AAC_OWNER_ERROR_HANDLER
;
1174 command
->scsi_done(command
);
1180 scsi_unblock_requests(host
);
1181 spin_lock_irq(host
->host_lock
);
1185 int aac_check_health(struct aac_dev
* aac
)
1188 unsigned long time_now
, flagv
= 0;
1189 struct list_head
* entry
;
1190 struct Scsi_Host
* host
;
1192 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1193 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1196 if (aac
->in_reset
|| !(BlinkLED
= aac_adapter_check_health(aac
))) {
1197 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1204 * aac_aifcmd.command = AifCmdEventNotify = 1
1205 * aac_aifcmd.seqnum = 0xFFFFFFFF
1206 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1207 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1208 * aac.aifcmd.data[2] = AifHighPriority = 3
1209 * aac.aifcmd.data[3] = BlinkLED
1212 time_now
= jiffies
/HZ
;
1213 entry
= aac
->fib_list
.next
;
1216 * For each Context that is on the
1217 * fibctxList, make a copy of the
1218 * fib, and then set the event to wake up the
1219 * thread that is waiting for it.
1221 while (entry
!= &aac
->fib_list
) {
1223 * Extract the fibctx
1225 struct aac_fib_context
*fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1226 struct hw_fib
* hw_fib
;
1229 * Check if the queue is getting
1232 if (fibctx
->count
> 20) {
1234 * It's *not* jiffies folks,
1235 * but jiffies / HZ, so do not
1238 u32 time_last
= fibctx
->jiffies
;
1240 * Has it been > 2 minutes
1241 * since the last read off
1244 if ((time_now
- time_last
) > aif_timeout
) {
1245 entry
= entry
->next
;
1246 aac_close_fib_context(aac
, fibctx
);
1251 * Warning: no sleep allowed while
1254 hw_fib
= kmalloc(sizeof(struct hw_fib
), GFP_ATOMIC
);
1255 fib
= kmalloc(sizeof(struct fib
), GFP_ATOMIC
);
1256 if (fib
&& hw_fib
) {
1257 struct aac_aifcmd
* aif
;
1259 memset(hw_fib
, 0, sizeof(struct hw_fib
));
1260 memset(fib
, 0, sizeof(struct fib
));
1261 fib
->hw_fib
= hw_fib
;
1264 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1265 fib
->size
= sizeof (struct fib
);
1266 fib
->data
= hw_fib
->data
;
1267 aif
= (struct aac_aifcmd
*)hw_fib
->data
;
1268 aif
->command
= cpu_to_le32(AifCmdEventNotify
);
1269 aif
->seqnum
= cpu_to_le32(0xFFFFFFFF);
1270 aif
->data
[0] = cpu_to_le32(AifEnExpEvent
);
1271 aif
->data
[1] = cpu_to_le32(AifExeFirmwarePanic
);
1272 aif
->data
[2] = cpu_to_le32(AifHighPriority
);
1273 aif
->data
[3] = cpu_to_le32(BlinkLED
);
1276 * Put the FIB onto the
1279 list_add_tail(&fib
->fiblink
, &fibctx
->fib_list
);
1282 * Set the event to wake up the
1283 * thread that will waiting.
1285 up(&fibctx
->wait_sem
);
1287 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1291 entry
= entry
->next
;
1294 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1297 printk(KERN_ERR
"%s: Host adapter dead %d\n", aac
->name
, BlinkLED
);
1301 printk(KERN_ERR
"%s: Host adapter BLINK LED 0x%x\n", aac
->name
, BlinkLED
);
1303 host
= aac
->scsi_host_ptr
;
1304 spin_lock_irqsave(host
->host_lock
, flagv
);
1305 BlinkLED
= _aac_reset_adapter(aac
);
1306 spin_unlock_irqrestore(host
->host_lock
, flagv
);
1316 * aac_command_thread - command processing thread
1317 * @dev: Adapter to monitor
1319 * Waits on the commandready event in it's queue. When the event gets set
1320 * it will pull FIBs off it's queue. It will continue to pull FIBs off
1321 * until the queue is empty. When the queue is empty it will wait for
1325 int aac_command_thread(void *data
)
1327 struct aac_dev
*dev
= data
;
1328 struct hw_fib
*hw_fib
, *hw_newfib
;
1329 struct fib
*fib
, *newfib
;
1330 struct aac_fib_context
*fibctx
;
1331 unsigned long flags
;
1332 DECLARE_WAITQUEUE(wait
, current
);
1335 * We can only have one thread per adapter for AIF's.
1337 if (dev
->aif_thread
)
1341 * Let the DPC know it has a place to send the AIF's to.
1343 dev
->aif_thread
= 1;
1344 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
1345 set_current_state(TASK_INTERRUPTIBLE
);
1346 dprintk ((KERN_INFO
"aac_command_thread start\n"));
1349 spin_lock_irqsave(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1350 while(!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
1351 struct list_head
*entry
;
1352 struct aac_aifcmd
* aifcmd
;
1354 set_current_state(TASK_RUNNING
);
1356 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
1359 spin_unlock_irqrestore(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1360 fib
= list_entry(entry
, struct fib
, fiblink
);
1362 * We will process the FIB here or pass it to a
1363 * worker thread that is TBD. We Really can't
1364 * do anything at this point since we don't have
1365 * anything defined for this thread to do.
1367 hw_fib
= fib
->hw_fib
;
1368 memset(fib
, 0, sizeof(struct fib
));
1369 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1370 fib
->size
= sizeof( struct fib
);
1371 fib
->hw_fib
= hw_fib
;
1372 fib
->data
= hw_fib
->data
;
1375 * We only handle AifRequest fibs from the adapter.
1377 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
1378 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
1379 /* Handle Driver Notify Events */
1380 aac_handle_aif(dev
, fib
);
1381 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
1382 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
1384 struct list_head
*entry
;
1385 /* The u32 here is important and intended. We are using
1386 32bit wrapping time to fit the adapter field */
1388 u32 time_now
, time_last
;
1389 unsigned long flagv
;
1391 struct hw_fib
** hw_fib_pool
, ** hw_fib_p
;
1392 struct fib
** fib_pool
, ** fib_p
;
1395 if ((aifcmd
->command
==
1396 cpu_to_le32(AifCmdEventNotify
)) ||
1398 cpu_to_le32(AifCmdJobProgress
))) {
1399 aac_handle_aif(dev
, fib
);
1402 time_now
= jiffies
/HZ
;
1405 * Warning: no sleep allowed while
1406 * holding spinlock. We take the estimate
1407 * and pre-allocate a set of fibs outside the
1410 num
= le32_to_cpu(dev
->init
->AdapterFibsSize
)
1411 / sizeof(struct hw_fib
); /* some extra */
1412 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
1413 entry
= dev
->fib_list
.next
;
1414 while (entry
!= &dev
->fib_list
) {
1415 entry
= entry
->next
;
1418 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
1422 && ((hw_fib_pool
= kmalloc(sizeof(struct hw_fib
*) * num
, GFP_KERNEL
)))
1423 && ((fib_pool
= kmalloc(sizeof(struct fib
*) * num
, GFP_KERNEL
)))) {
1424 hw_fib_p
= hw_fib_pool
;
1426 while (hw_fib_p
< &hw_fib_pool
[num
]) {
1427 if (!(*(hw_fib_p
++) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
))) {
1431 if (!(*(fib_p
++) = kmalloc(sizeof(struct fib
), GFP_KERNEL
))) {
1432 kfree(*(--hw_fib_p
));
1436 if ((num
= hw_fib_p
- hw_fib_pool
) == 0) {
1446 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
1447 entry
= dev
->fib_list
.next
;
1449 * For each Context that is on the
1450 * fibctxList, make a copy of the
1451 * fib, and then set the event to wake up the
1452 * thread that is waiting for it.
1454 hw_fib_p
= hw_fib_pool
;
1456 while (entry
!= &dev
->fib_list
) {
1458 * Extract the fibctx
1460 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1462 * Check if the queue is getting
1465 if (fibctx
->count
> 20)
1468 * It's *not* jiffies folks,
1469 * but jiffies / HZ so do not
1472 time_last
= fibctx
->jiffies
;
1474 * Has it been > 2 minutes
1475 * since the last read off
1478 if ((time_now
- time_last
) > aif_timeout
) {
1479 entry
= entry
->next
;
1480 aac_close_fib_context(dev
, fibctx
);
1485 * Warning: no sleep allowed while
1488 if (hw_fib_p
< &hw_fib_pool
[num
]) {
1489 hw_newfib
= *hw_fib_p
;
1490 *(hw_fib_p
++) = NULL
;
1494 * Make the copy of the FIB
1496 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
1497 memcpy(newfib
, fib
, sizeof(struct fib
));
1498 newfib
->hw_fib
= hw_newfib
;
1500 * Put the FIB onto the
1503 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
1506 * Set the event to wake up the
1507 * thread that is waiting.
1509 up(&fibctx
->wait_sem
);
1511 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1513 entry
= entry
->next
;
1516 * Set the status of this FIB
1518 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
1519 aac_fib_adapter_complete(fib
, sizeof(u32
));
1520 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
1521 /* Free up the remaining resources */
1522 hw_fib_p
= hw_fib_pool
;
1524 while (hw_fib_p
< &hw_fib_pool
[num
]) {
1534 spin_lock_irqsave(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1537 * There are no more AIF's
1539 spin_unlock_irqrestore(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1542 if (kthread_should_stop())
1544 set_current_state(TASK_INTERRUPTIBLE
);
1547 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
1548 dev
->aif_thread
= 0;