]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Adaptec AAC series RAID controller driver | |
fa195afe | 3 | * (c) Copyright 2001 Red Hat Inc. |
1da177e4 LT |
4 | * |
5 | * based on the old aacraid driver that is.. | |
6 | * Adaptec aacraid device driver for Linux. | |
7 | * | |
e8b12f0f | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
f4babba0 RAR |
9 | * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) |
10 | * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) | |
1da177e4 LT |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2, or (at your option) | |
15 | * any later version. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | * GNU General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; see the file COPYING. If not, write to | |
24 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
25 | * | |
26 | * Module Name: | |
27 | * dpcsup.c | |
28 | * | |
29 | * Abstract: All DPC processing routines for the cyclone board occur here. | |
30 | * | |
31 | * | |
32 | */ | |
33 | ||
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/types.h> | |
1da177e4 LT |
37 | #include <linux/spinlock.h> |
38 | #include <linux/slab.h> | |
39 | #include <linux/completion.h> | |
40 | #include <linux/blkdev.h> | |
6188e10d | 41 | #include <linux/semaphore.h> |
1da177e4 LT |
42 | |
43 | #include "aacraid.h" | |
44 | ||
45 | /** | |
46 | * aac_response_normal - Handle command replies | |
47 | * @q: Queue to read from | |
48 | * | |
49 | * This DPC routine will be run when the adapter interrupts us to let us | |
50 | * know there is a response on our normal priority queue. We will pull off | |
51 | * all QE there are and wake up all the waiters before exiting. We will | |
52 | * take a spinlock out on the queue before operating on it. | |
53 | */ | |
54 | ||
55 | unsigned int aac_response_normal(struct aac_queue * q) | |
56 | { | |
57 | struct aac_dev * dev = q->dev; | |
58 | struct aac_entry *entry; | |
59 | struct hw_fib * hwfib; | |
60 | struct fib * fib; | |
61 | int consumed = 0; | |
cacb6dc3 | 62 | unsigned long flags, mflags; |
1da177e4 | 63 | |
cacb6dc3 | 64 | spin_lock_irqsave(q->lock, flags); |
1da177e4 LT |
65 | /* |
66 | * Keep pulling response QEs off the response queue and waking | |
67 | * up the waiters until there are no more QEs. We then return | |
68 | * back to the system. If no response was requesed we just | |
69 | * deallocate the Fib here and continue. | |
70 | */ | |
71 | while(aac_consumer_get(dev, q, &entry)) | |
72 | { | |
73 | int fast; | |
74 | u32 index = le32_to_cpu(entry->addr); | |
75 | fast = index & 0x01; | |
8e0c5ebd | 76 | fib = &dev->fibs[index >> 2]; |
a8166a52 | 77 | hwfib = fib->hw_fib_va; |
1da177e4 LT |
78 | |
79 | aac_consumer_free(dev, q, HostNormRespQueue); | |
80 | /* | |
81 | * Remove this fib from the Outstanding I/O queue. | |
82 | * But only if it has not already been timed out. | |
83 | * | |
84 | * If the fib has been timed out already, then just | |
85 | * continue. The caller has already been notified that | |
86 | * the fib timed out. | |
87 | */ | |
ef616233 | 88 | atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); |
03d44337 MH |
89 | |
90 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | |
91 | spin_unlock_irqrestore(q->lock, flags); | |
92 | aac_fib_complete(fib); | |
93 | aac_fib_free(fib); | |
94 | spin_lock_irqsave(q->lock, flags); | |
1da177e4 LT |
95 | continue; |
96 | } | |
97 | spin_unlock_irqrestore(q->lock, flags); | |
98 | ||
99 | if (fast) { | |
100 | /* | |
101 | * Doctor the fib | |
102 | */ | |
56b58712 | 103 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); |
1da177e4 | 104 | hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); |
85d22bbf | 105 | fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; |
1da177e4 LT |
106 | } |
107 | ||
108 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); | |
109 | ||
110 | if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) | |
111 | { | |
56b58712 | 112 | __le32 *pstatus = (__le32 *)hwfib->data; |
1da177e4 LT |
113 | if (*pstatus & cpu_to_le32(0xffff0000)) |
114 | *pstatus = cpu_to_le32(ST_OK); | |
115 | } | |
116 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) | |
117 | { | |
118 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) | |
119 | FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); | |
120 | else | |
121 | FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); | |
122 | /* | |
123 | * NOTE: we cannot touch the fib after this | |
124 | * call, because it may have been deallocated. | |
125 | */ | |
126 | fib->callback(fib->callback_data, fib); | |
127 | } else { | |
128 | unsigned long flagv; | |
129 | spin_lock_irqsave(&fib->event_lock, flagv); | |
cacb6dc3 | 130 | if (!fib->done) { |
c8f7b073 | 131 | fib->done = 1; |
cacb6dc3 PNRCEH |
132 | up(&fib->event_wait); |
133 | } | |
1da177e4 | 134 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
cacb6dc3 PNRCEH |
135 | |
136 | spin_lock_irqsave(&dev->manage_lock, mflags); | |
137 | dev->management_fib_count--; | |
138 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | |
139 | ||
1da177e4 | 140 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); |
c8f7b073 | 141 | if (fib->done == 2) { |
cacb6dc3 PNRCEH |
142 | spin_lock_irqsave(&fib->event_lock, flagv); |
143 | fib->done = 0; | |
144 | spin_unlock_irqrestore(&fib->event_lock, flagv); | |
c8f7b073 MH |
145 | aac_fib_complete(fib); |
146 | aac_fib_free(fib); | |
147 | } | |
1da177e4 LT |
148 | } |
149 | consumed++; | |
150 | spin_lock_irqsave(q->lock, flags); | |
151 | } | |
152 | ||
153 | if (consumed > aac_config.peak_fibs) | |
154 | aac_config.peak_fibs = consumed; | |
155 | if (consumed == 0) | |
156 | aac_config.zero_fibs++; | |
157 | ||
158 | spin_unlock_irqrestore(q->lock, flags); | |
159 | return 0; | |
160 | } | |
161 | ||
162 | ||
163 | /** | |
164 | * aac_command_normal - handle commands | |
165 | * @q: queue to process | |
166 | * | |
167 | * This DPC routine will be queued when the adapter interrupts us to | |
168 | * let us know there is a command on our normal priority queue. We will | |
169 | * pull off all QE there are and wake up all the waiters before exiting. | |
170 | * We will take a spinlock out on the queue before operating on it. | |
171 | */ | |
172 | ||
173 | unsigned int aac_command_normal(struct aac_queue *q) | |
174 | { | |
175 | struct aac_dev * dev = q->dev; | |
176 | struct aac_entry *entry; | |
177 | unsigned long flags; | |
178 | ||
179 | spin_lock_irqsave(q->lock, flags); | |
180 | ||
181 | /* | |
182 | * Keep pulling response QEs off the response queue and waking | |
183 | * up the waiters until there are no more QEs. We then return | |
184 | * back to the system. | |
185 | */ | |
186 | while(aac_consumer_get(dev, q, &entry)) | |
187 | { | |
188 | struct fib fibctx; | |
189 | struct hw_fib * hw_fib; | |
190 | u32 index; | |
191 | struct fib *fib = &fibctx; | |
192 | ||
193 | index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); | |
194 | hw_fib = &dev->aif_base_va[index]; | |
195 | ||
196 | /* | |
197 | * Allocate a FIB at all costs. For non queued stuff | |
198 | * we can just use the stack so we are happy. We need | |
199 | * a fib object in order to manage the linked lists | |
200 | */ | |
201 | if (dev->aif_thread) | |
202 | if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) | |
203 | fib = &fibctx; | |
204 | ||
205 | memset(fib, 0, sizeof(struct fib)); | |
206 | INIT_LIST_HEAD(&fib->fiblink); | |
207 | fib->type = FSAFS_NTC_FIB_CONTEXT; | |
208 | fib->size = sizeof(struct fib); | |
a8166a52 | 209 | fib->hw_fib_va = hw_fib; |
1da177e4 LT |
210 | fib->data = hw_fib->data; |
211 | fib->dev = dev; | |
212 | ||
213 | ||
214 | if (dev->aif_thread && fib != &fibctx) { | |
215 | list_add_tail(&fib->fiblink, &q->cmdq); | |
216 | aac_consumer_free(dev, q, HostNormCmdQueue); | |
217 | wake_up_interruptible(&q->cmdready); | |
218 | } else { | |
219 | aac_consumer_free(dev, q, HostNormCmdQueue); | |
220 | spin_unlock_irqrestore(q->lock, flags); | |
221 | /* | |
222 | * Set the status of this FIB | |
223 | */ | |
56b58712 | 224 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); |
bfb35aa8 | 225 | aac_fib_adapter_complete(fib, sizeof(u32)); |
1da177e4 LT |
226 | spin_lock_irqsave(q->lock, flags); |
227 | } | |
228 | } | |
229 | spin_unlock_irqrestore(q->lock, flags); | |
230 | return 0; | |
231 | } | |
8e0c5ebd | 232 | |
e8b12f0f MR |
233 | /* |
234 | * | |
235 | * aac_aif_callback | |
236 | * @context: the context set in the fib - here it is scsi cmd | |
237 | * @fibptr: pointer to the fib | |
238 | * | |
239 | * Handles the AIFs - new method (SRC) | |
240 | * | |
241 | */ | |
242 | ||
243 | static void aac_aif_callback(void *context, struct fib * fibptr) | |
244 | { | |
245 | struct fib *fibctx; | |
246 | struct aac_dev *dev; | |
247 | struct aac_aifcmd *cmd; | |
248 | int status; | |
249 | ||
250 | fibctx = (struct fib *)context; | |
251 | BUG_ON(fibptr == NULL); | |
252 | dev = fibptr->dev; | |
253 | ||
3ffd6c5a RAR |
254 | if ((fibptr->hw_fib_va->header.XferState & |
255 | cpu_to_le32(NoMoreAifDataAvailable)) || | |
256 | dev->sa_firmware) { | |
e8b12f0f MR |
257 | aac_fib_complete(fibptr); |
258 | aac_fib_free(fibptr); | |
259 | return; | |
260 | } | |
261 | ||
262 | aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); | |
263 | ||
264 | aac_fib_init(fibctx); | |
265 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | |
266 | cmd->command = cpu_to_le32(AifReqEvent); | |
267 | ||
268 | status = aac_fib_send(AifRequest, | |
269 | fibctx, | |
270 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | |
271 | FsaNormal, | |
272 | 0, 1, | |
273 | (fib_callback)aac_aif_callback, fibctx); | |
274 | } | |
275 | ||
8e0c5ebd MH |
276 | |
277 | /** | |
278 | * aac_intr_normal - Handle command replies | |
279 | * @dev: Device | |
280 | * @index: completion reference | |
281 | * | |
282 | * This DPC routine will be run when the adapter interrupts us to let us | |
283 | * know there is a response on our normal priority queue. We will pull off | |
284 | * all QE there are and wake up all the waiters before exiting. | |
285 | */ | |
3ffd6c5a RAR |
286 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, |
287 | int isFastResponse, struct hw_fib *aif_fib) | |
8e0c5ebd | 288 | { |
cacb6dc3 | 289 | unsigned long mflags; |
f3307f72 | 290 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
e8b12f0f | 291 | if (isAif == 1) { /* AIF - common */ |
8e0c5ebd MH |
292 | struct hw_fib * hw_fib; |
293 | struct fib * fib; | |
294 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | |
295 | unsigned long flags; | |
296 | ||
8e0c5ebd MH |
297 | /* |
298 | * Allocate a FIB. For non queued stuff we can just use | |
299 | * the stack so we are happy. We need a fib object in order to | |
300 | * manage the linked lists. | |
301 | */ | |
302 | if ((!dev->aif_thread) | |
4dbc22d7 | 303 | || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) |
8e0c5ebd | 304 | return 1; |
4dbc22d7 | 305 | if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { |
8e0c5ebd MH |
306 | kfree (fib); |
307 | return 1; | |
308 | } | |
3ffd6c5a RAR |
309 | if (dev->sa_firmware) { |
310 | fib->hbacmd_size = index; /* store event type */ | |
311 | } else if (aif_fib != NULL) { | |
e8b12f0f MR |
312 | memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); |
313 | } else { | |
3ffd6c5a RAR |
314 | memcpy(hw_fib, (struct hw_fib *) |
315 | (((uintptr_t)(dev->regs.sa)) + index), | |
316 | sizeof(struct hw_fib)); | |
e8b12f0f | 317 | } |
8e0c5ebd MH |
318 | INIT_LIST_HEAD(&fib->fiblink); |
319 | fib->type = FSAFS_NTC_FIB_CONTEXT; | |
320 | fib->size = sizeof(struct fib); | |
a8166a52 | 321 | fib->hw_fib_va = hw_fib; |
8e0c5ebd MH |
322 | fib->data = hw_fib->data; |
323 | fib->dev = dev; | |
324 | ||
325 | spin_lock_irqsave(q->lock, flags); | |
326 | list_add_tail(&fib->fiblink, &q->cmdq); | |
327 | wake_up_interruptible(&q->cmdready); | |
328 | spin_unlock_irqrestore(q->lock, flags); | |
329 | return 1; | |
e8b12f0f MR |
330 | } else if (isAif == 2) { /* AIF - new (SRC) */ |
331 | struct fib *fibctx; | |
332 | struct aac_aifcmd *cmd; | |
333 | ||
334 | fibctx = aac_fib_alloc(dev); | |
335 | if (!fibctx) | |
336 | return 1; | |
337 | aac_fib_init(fibctx); | |
338 | ||
339 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | |
340 | cmd->command = cpu_to_le32(AifReqEvent); | |
341 | ||
342 | return aac_fib_send(AifRequest, | |
343 | fibctx, | |
344 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | |
345 | FsaNormal, | |
346 | 0, 1, | |
347 | (fib_callback)aac_aif_callback, fibctx); | |
8e0c5ebd | 348 | } else { |
e8b12f0f | 349 | struct fib *fib = &dev->fibs[index]; |
423400e6 | 350 | int start_callback = 0; |
8e0c5ebd MH |
351 | |
352 | /* | |
353 | * Remove this fib from the Outstanding I/O queue. | |
354 | * But only if it has not already been timed out. | |
355 | * | |
356 | * If the fib has been timed out already, then just | |
357 | * continue. The caller has already been notified that | |
358 | * the fib timed out. | |
359 | */ | |
ef616233 | 360 | atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); |
03d44337 MH |
361 | |
362 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | |
363 | aac_fib_complete(fib); | |
364 | aac_fib_free(fib); | |
8e0c5ebd MH |
365 | return 0; |
366 | } | |
367 | ||
8e0c5ebd MH |
368 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); |
369 | ||
423400e6 RAR |
370 | if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { |
371 | ||
372 | if (isFastResponse) | |
373 | fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; | |
374 | ||
375 | if (fib->callback) { | |
376 | start_callback = 1; | |
377 | } else { | |
378 | unsigned long flagv; | |
379 | int complete = 0; | |
380 | ||
381 | dprintk((KERN_INFO "event_wait up\n")); | |
382 | spin_lock_irqsave(&fib->event_lock, flagv); | |
383 | if (fib->done == 2) { | |
384 | fib->done = 1; | |
385 | complete = 1; | |
386 | } else { | |
387 | fib->done = 1; | |
388 | up(&fib->event_wait); | |
389 | } | |
390 | spin_unlock_irqrestore(&fib->event_lock, flagv); | |
391 | ||
392 | spin_lock_irqsave(&dev->manage_lock, mflags); | |
393 | dev->management_fib_count--; | |
394 | spin_unlock_irqrestore(&dev->manage_lock, | |
395 | mflags); | |
396 | ||
397 | FIB_COUNTER_INCREMENT(aac_config.NativeRecved); | |
398 | if (complete) | |
399 | aac_fib_complete(fib); | |
400 | } | |
8e0c5ebd | 401 | } else { |
423400e6 RAR |
402 | struct hw_fib *hwfib = fib->hw_fib_va; |
403 | ||
404 | if (isFastResponse) { | |
405 | /* Doctor the fib */ | |
406 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); | |
407 | hwfib->header.XferState |= | |
408 | cpu_to_le32(AdapterProcessed); | |
409 | fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; | |
cacb6dc3 | 410 | } |
cacb6dc3 | 411 | |
423400e6 RAR |
412 | if (hwfib->header.Command == |
413 | cpu_to_le16(NuFileSystem)) { | |
414 | __le32 *pstatus = (__le32 *)hwfib->data; | |
cacb6dc3 | 415 | |
423400e6 RAR |
416 | if (*pstatus & cpu_to_le32(0xffff0000)) |
417 | *pstatus = cpu_to_le32(ST_OK); | |
418 | } | |
419 | if (hwfib->header.XferState & | |
420 | cpu_to_le32(NoResponseExpected | Async)) { | |
421 | if (hwfib->header.XferState & cpu_to_le32( | |
422 | NoResponseExpected)) | |
423 | FIB_COUNTER_INCREMENT( | |
424 | aac_config.NoResponseRecved); | |
425 | else | |
426 | FIB_COUNTER_INCREMENT( | |
427 | aac_config.AsyncRecved); | |
428 | start_callback = 1; | |
429 | } else { | |
430 | unsigned long flagv; | |
431 | int complete = 0; | |
432 | ||
433 | dprintk((KERN_INFO "event_wait up\n")); | |
cacb6dc3 | 434 | spin_lock_irqsave(&fib->event_lock, flagv); |
423400e6 RAR |
435 | if (fib->done == 2) { |
436 | fib->done = 1; | |
437 | complete = 1; | |
438 | } else { | |
439 | fib->done = 1; | |
440 | up(&fib->event_wait); | |
441 | } | |
cacb6dc3 | 442 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
423400e6 RAR |
443 | |
444 | spin_lock_irqsave(&dev->manage_lock, mflags); | |
445 | dev->management_fib_count--; | |
446 | spin_unlock_irqrestore(&dev->manage_lock, | |
447 | mflags); | |
448 | ||
449 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | |
450 | if (complete) | |
451 | aac_fib_complete(fib); | |
452 | } | |
453 | } | |
454 | ||
455 | ||
456 | if (start_callback) { | |
457 | /* | |
458 | * NOTE: we cannot touch the fib after this | |
459 | * call, because it may have been deallocated. | |
460 | */ | |
461 | if (likely(fib->callback && fib->callback_data)) { | |
462 | fib->callback(fib->callback_data, fib); | |
463 | } else { | |
cacb6dc3 | 464 | aac_fib_complete(fib); |
423400e6 | 465 | aac_fib_free(fib); |
cacb6dc3 PNRCEH |
466 | } |
467 | ||
8e0c5ebd MH |
468 | } |
469 | return 0; | |
470 | } | |
471 | } |