]>
Commit | Line | Data |
---|---|---|
f7a60d71 JG |
1 | /* |
2 | * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/spinlock.h> | |
34 | ||
35 | #include "ipath_kernel.h" | |
36 | #include "ipath_verbs.h" | |
37 | #include "ipath_common.h" | |
38 | ||
39 | #define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */ | |
40 | ||
41 | static void vl15_watchdog_enq(struct ipath_devdata *dd) | |
42 | { | |
43 | /* ipath_sdma_lock must already be held */ | |
44 | if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) { | |
45 | unsigned long interval = (HZ + 19) / 20; | |
46 | dd->ipath_sdma_vl15_timer.expires = jiffies + interval; | |
47 | add_timer(&dd->ipath_sdma_vl15_timer); | |
48 | } | |
49 | } | |
50 | ||
51 | static void vl15_watchdog_deq(struct ipath_devdata *dd) | |
52 | { | |
53 | /* ipath_sdma_lock must already be held */ | |
54 | if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) { | |
55 | unsigned long interval = (HZ + 19) / 20; | |
56 | mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval); | |
57 | } else { | |
58 | del_timer(&dd->ipath_sdma_vl15_timer); | |
59 | } | |
60 | } | |
61 | ||
62 | static void vl15_watchdog_timeout(unsigned long opaque) | |
63 | { | |
64 | struct ipath_devdata *dd = (struct ipath_devdata *)opaque; | |
65 | ||
66 | if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) { | |
67 | ipath_dbg("vl15 watchdog timeout - clearing\n"); | |
68 | ipath_cancel_sends(dd, 1); | |
69 | ipath_hol_down(dd); | |
70 | } else { | |
71 | ipath_dbg("vl15 watchdog timeout - " | |
72 | "condition already cleared\n"); | |
73 | } | |
74 | } | |
75 | ||
76 | static void unmap_desc(struct ipath_devdata *dd, unsigned head) | |
77 | { | |
78 | __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0]; | |
79 | u64 desc[2]; | |
80 | dma_addr_t addr; | |
81 | size_t len; | |
82 | ||
83 | desc[0] = le64_to_cpu(descqp[0]); | |
84 | desc[1] = le64_to_cpu(descqp[1]); | |
85 | ||
86 | addr = (desc[1] << 32) | (desc[0] >> 32); | |
87 | len = (desc[0] >> 14) & (0x7ffULL << 2); | |
88 | dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE); | |
89 | } | |
90 | ||
91 | /* | |
92 | * ipath_sdma_lock should be locked before calling this. | |
93 | */ | |
94 | int ipath_sdma_make_progress(struct ipath_devdata *dd) | |
95 | { | |
96 | struct list_head *lp = NULL; | |
97 | struct ipath_sdma_txreq *txp = NULL; | |
98 | u16 dmahead; | |
99 | u16 start_idx = 0; | |
100 | int progress = 0; | |
101 | ||
102 | if (!list_empty(&dd->ipath_sdma_activelist)) { | |
103 | lp = dd->ipath_sdma_activelist.next; | |
104 | txp = list_entry(lp, struct ipath_sdma_txreq, list); | |
105 | start_idx = txp->start_idx; | |
106 | } | |
107 | ||
108 | /* | |
109 | * Read the SDMA head register in order to know that the | |
110 | * interrupt clear has been written to the chip. | |
111 | * Otherwise, we may not get an interrupt for the last | |
112 | * descriptor in the queue. | |
113 | */ | |
114 | dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead); | |
115 | /* sanity check return value for error handling (chip reset, etc.) */ | |
116 | if (dmahead >= dd->ipath_sdma_descq_cnt) | |
117 | goto done; | |
118 | ||
119 | while (dd->ipath_sdma_descq_head != dmahead) { | |
120 | if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC && | |
121 | dd->ipath_sdma_descq_head == start_idx) { | |
122 | unmap_desc(dd, dd->ipath_sdma_descq_head); | |
123 | start_idx++; | |
124 | if (start_idx == dd->ipath_sdma_descq_cnt) | |
125 | start_idx = 0; | |
126 | } | |
127 | ||
128 | /* increment free count and head */ | |
129 | dd->ipath_sdma_descq_removed++; | |
130 | if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt) | |
131 | dd->ipath_sdma_descq_head = 0; | |
132 | ||
133 | if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) { | |
134 | /* move to notify list */ | |
135 | if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) | |
136 | vl15_watchdog_deq(dd); | |
137 | list_move_tail(lp, &dd->ipath_sdma_notifylist); | |
138 | if (!list_empty(&dd->ipath_sdma_activelist)) { | |
139 | lp = dd->ipath_sdma_activelist.next; | |
140 | txp = list_entry(lp, struct ipath_sdma_txreq, | |
141 | list); | |
142 | start_idx = txp->start_idx; | |
143 | } else { | |
144 | lp = NULL; | |
145 | txp = NULL; | |
146 | } | |
147 | } | |
148 | progress = 1; | |
149 | } | |
150 | ||
151 | if (progress) | |
152 | tasklet_hi_schedule(&dd->ipath_sdma_notify_task); | |
153 | ||
154 | done: | |
155 | return progress; | |
156 | } | |
157 | ||
158 | static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list) | |
159 | { | |
160 | struct ipath_sdma_txreq *txp, *txp_next; | |
161 | ||
162 | list_for_each_entry_safe(txp, txp_next, list, list) { | |
163 | list_del_init(&txp->list); | |
164 | ||
165 | if (txp->callback) | |
166 | (*txp->callback)(txp->callback_cookie, | |
167 | txp->callback_status); | |
168 | } | |
169 | } | |
170 | ||
171 | static void sdma_notify_taskbody(struct ipath_devdata *dd) | |
172 | { | |
173 | unsigned long flags; | |
174 | struct list_head list; | |
175 | ||
176 | INIT_LIST_HEAD(&list); | |
177 | ||
178 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
179 | ||
180 | list_splice_init(&dd->ipath_sdma_notifylist, &list); | |
181 | ||
182 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
183 | ||
184 | ipath_sdma_notify(dd, &list); | |
185 | ||
186 | /* | |
187 | * The IB verbs layer needs to see the callback before getting | |
188 | * the call to ipath_ib_piobufavail() because the callback | |
189 | * handles releasing resources the next send will need. | |
190 | * Otherwise, we could do these calls in | |
191 | * ipath_sdma_make_progress(). | |
192 | */ | |
193 | ipath_ib_piobufavail(dd->verbs_dev); | |
194 | } | |
195 | ||
196 | static void sdma_notify_task(unsigned long opaque) | |
197 | { | |
198 | struct ipath_devdata *dd = (struct ipath_devdata *)opaque; | |
199 | ||
200 | if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) | |
201 | sdma_notify_taskbody(dd); | |
202 | } | |
203 | ||
204 | static void dump_sdma_state(struct ipath_devdata *dd) | |
205 | { | |
206 | unsigned long reg; | |
207 | ||
208 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus); | |
209 | ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg); | |
210 | ||
211 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl); | |
212 | ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg); | |
213 | ||
214 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0); | |
215 | ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg); | |
216 | ||
217 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1); | |
218 | ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg); | |
219 | ||
220 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2); | |
221 | ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg); | |
222 | ||
223 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail); | |
224 | ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg); | |
225 | ||
226 | reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead); | |
227 | ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg); | |
228 | } | |
229 | ||
230 | static void sdma_abort_task(unsigned long opaque) | |
231 | { | |
232 | struct ipath_devdata *dd = (struct ipath_devdata *) opaque; | |
f7a60d71 JG |
233 | u64 status; |
234 | unsigned long flags; | |
235 | ||
236 | if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) | |
237 | return; | |
238 | ||
239 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
240 | ||
241 | status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK; | |
242 | ||
243 | /* nothing to do */ | |
244 | if (status == IPATH_SDMA_ABORT_NONE) | |
245 | goto unlock; | |
246 | ||
247 | /* ipath_sdma_abort() is done, waiting for interrupt */ | |
248 | if (status == IPATH_SDMA_ABORT_DISARMED) { | |
249 | if (jiffies < dd->ipath_sdma_abort_intr_timeout) | |
250 | goto resched_noprint; | |
251 | /* give up, intr got lost somewhere */ | |
252 | ipath_dbg("give up waiting for SDMADISABLED intr\n"); | |
253 | __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); | |
254 | status = IPATH_SDMA_ABORT_ABORTED; | |
255 | } | |
256 | ||
257 | /* everything is stopped, time to clean up and restart */ | |
258 | if (status == IPATH_SDMA_ABORT_ABORTED) { | |
259 | struct ipath_sdma_txreq *txp, *txpnext; | |
260 | u64 hwstatus; | |
261 | int notify = 0; | |
262 | ||
263 | hwstatus = ipath_read_kreg64(dd, | |
264 | dd->ipath_kregs->kr_senddmastatus); | |
265 | ||
266 | if (/* ScoreBoardDrainInProg */ | |
267 | test_bit(63, &hwstatus) || | |
268 | /* AbortInProg */ | |
269 | test_bit(62, &hwstatus) || | |
270 | /* InternalSDmaEnable */ | |
271 | test_bit(61, &hwstatus) || | |
272 | /* ScbEmpty */ | |
273 | !test_bit(30, &hwstatus)) { | |
274 | if (dd->ipath_sdma_reset_wait > 0) { | |
275 | /* not done shutting down sdma */ | |
276 | --dd->ipath_sdma_reset_wait; | |
277 | goto resched; | |
278 | } | |
279 | ipath_cdbg(VERBOSE, "gave up waiting for quiescent " | |
280 | "status after SDMA reset, continuing\n"); | |
281 | dump_sdma_state(dd); | |
282 | } | |
283 | ||
284 | /* dequeue all "sent" requests */ | |
285 | list_for_each_entry_safe(txp, txpnext, | |
286 | &dd->ipath_sdma_activelist, list) { | |
287 | txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED; | |
288 | if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) | |
289 | vl15_watchdog_deq(dd); | |
290 | list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); | |
291 | notify = 1; | |
292 | } | |
293 | if (notify) | |
294 | tasklet_hi_schedule(&dd->ipath_sdma_notify_task); | |
295 | ||
296 | /* reset our notion of head and tail */ | |
297 | dd->ipath_sdma_descq_tail = 0; | |
298 | dd->ipath_sdma_descq_head = 0; | |
299 | dd->ipath_sdma_head_dma[0] = 0; | |
300 | dd->ipath_sdma_generation = 0; | |
301 | dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added; | |
302 | ||
303 | /* Reset SendDmaLenGen */ | |
304 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, | |
305 | (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18)); | |
306 | ||
307 | /* done with sdma state for a bit */ | |
308 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
309 | ||
124b4dcb DO |
310 | /* |
311 | * Don't restart sdma here. Wait until link is up to ACTIVE. | |
312 | * VL15 MADs used to bring the link up use PIO, and multiple | |
313 | * link transitions otherwise cause the sdma engine to be | |
314 | * stopped and started multiple times. | |
315 | * The disable is done here, including the shadow, so the | |
316 | * state is kept consistent. | |
317 | * See ipath_restart_sdma() for the actual starting of sdma. | |
318 | */ | |
f7a60d71 JG |
319 | spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); |
320 | dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; | |
321 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | |
322 | dd->ipath_sendctrl); | |
323 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
f7a60d71 | 324 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); |
f7a60d71 JG |
325 | |
326 | /* make sure I see next message */ | |
327 | dd->ipath_sdma_abort_jiffies = 0; | |
328 | ||
124b4dcb | 329 | goto done; |
f7a60d71 JG |
330 | } |
331 | ||
332 | resched: | |
333 | /* | |
334 | * for now, keep spinning | |
335 | * JAG - this is bad to just have default be a loop without | |
336 | * state change | |
337 | */ | |
338 | if (jiffies > dd->ipath_sdma_abort_jiffies) { | |
339 | ipath_dbg("looping with status 0x%016llx\n", | |
340 | dd->ipath_sdma_status); | |
341 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; | |
342 | } | |
343 | resched_noprint: | |
344 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
345 | if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) | |
346 | tasklet_hi_schedule(&dd->ipath_sdma_abort_task); | |
347 | return; | |
348 | ||
349 | unlock: | |
350 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
124b4dcb DO |
351 | done: |
352 | return; | |
f7a60d71 JG |
353 | } |
354 | ||
355 | /* | |
356 | * This is called from interrupt context. | |
357 | */ | |
358 | void ipath_sdma_intr(struct ipath_devdata *dd) | |
359 | { | |
360 | unsigned long flags; | |
361 | ||
362 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
363 | ||
364 | (void) ipath_sdma_make_progress(dd); | |
365 | ||
366 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
367 | } | |
368 | ||
369 | static int alloc_sdma(struct ipath_devdata *dd) | |
370 | { | |
371 | int ret = 0; | |
372 | ||
373 | /* Allocate memory for SendDMA descriptor FIFO */ | |
374 | dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev, | |
375 | SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL); | |
376 | ||
377 | if (!dd->ipath_sdma_descq) { | |
378 | ipath_dev_err(dd, "failed to allocate SendDMA descriptor " | |
379 | "FIFO memory\n"); | |
380 | ret = -ENOMEM; | |
381 | goto done; | |
382 | } | |
383 | ||
384 | dd->ipath_sdma_descq_cnt = | |
385 | SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc); | |
386 | ||
387 | /* Allocate memory for DMA of head register to memory */ | |
388 | dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev, | |
389 | PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL); | |
390 | if (!dd->ipath_sdma_head_dma) { | |
391 | ipath_dev_err(dd, "failed to allocate SendDMA head memory\n"); | |
392 | ret = -ENOMEM; | |
393 | goto cleanup_descq; | |
394 | } | |
395 | dd->ipath_sdma_head_dma[0] = 0; | |
396 | ||
397 | init_timer(&dd->ipath_sdma_vl15_timer); | |
398 | dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout; | |
399 | dd->ipath_sdma_vl15_timer.data = (unsigned long)dd; | |
400 | atomic_set(&dd->ipath_sdma_vl15_count, 0); | |
401 | ||
402 | goto done; | |
403 | ||
404 | cleanup_descq: | |
405 | dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, | |
406 | (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys); | |
407 | dd->ipath_sdma_descq = NULL; | |
408 | dd->ipath_sdma_descq_phys = 0; | |
409 | done: | |
410 | return ret; | |
411 | } | |
412 | ||
413 | int setup_sdma(struct ipath_devdata *dd) | |
414 | { | |
415 | int ret = 0; | |
416 | unsigned i, n; | |
417 | u64 tmp64; | |
418 | u64 senddmabufmask[3] = { 0 }; | |
419 | unsigned long flags; | |
420 | ||
421 | ret = alloc_sdma(dd); | |
422 | if (ret) | |
423 | goto done; | |
424 | ||
425 | if (!dd->ipath_sdma_descq) { | |
426 | ipath_dev_err(dd, "SendDMA memory not allocated\n"); | |
427 | goto done; | |
428 | } | |
429 | ||
430 | dd->ipath_sdma_status = 0; | |
431 | dd->ipath_sdma_abort_jiffies = 0; | |
432 | dd->ipath_sdma_generation = 0; | |
433 | dd->ipath_sdma_descq_tail = 0; | |
434 | dd->ipath_sdma_descq_head = 0; | |
435 | dd->ipath_sdma_descq_removed = 0; | |
436 | dd->ipath_sdma_descq_added = 0; | |
437 | ||
438 | /* Set SendDmaBase */ | |
439 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, | |
440 | dd->ipath_sdma_descq_phys); | |
441 | /* Set SendDmaLenGen */ | |
442 | tmp64 = dd->ipath_sdma_descq_cnt; | |
443 | tmp64 |= 1<<18; /* enable generation checking */ | |
444 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64); | |
445 | /* Set SendDmaTail */ | |
446 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, | |
447 | dd->ipath_sdma_descq_tail); | |
448 | /* Set SendDmaHeadAddr */ | |
449 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, | |
450 | dd->ipath_sdma_head_phys); | |
451 | ||
452 | /* Reserve all the former "kernel" piobufs */ | |
453 | n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved; | |
454 | for (i = dd->ipath_lastport_piobuf; i < n; ++i) { | |
455 | unsigned word = i / 64; | |
456 | unsigned bit = i & 63; | |
457 | BUG_ON(word >= 3); | |
458 | senddmabufmask[word] |= 1ULL << bit; | |
459 | } | |
460 | ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf, | |
461 | n - dd->ipath_lastport_piobuf, 0); | |
462 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, | |
463 | senddmabufmask[0]); | |
464 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, | |
465 | senddmabufmask[1]); | |
466 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, | |
467 | senddmabufmask[2]); | |
468 | ||
469 | INIT_LIST_HEAD(&dd->ipath_sdma_activelist); | |
470 | INIT_LIST_HEAD(&dd->ipath_sdma_notifylist); | |
471 | ||
472 | tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task, | |
473 | (unsigned long) dd); | |
474 | tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task, | |
475 | (unsigned long) dd); | |
476 | ||
124b4dcb DO |
477 | /* |
478 | * No use to turn on SDMA here, as link is probably not ACTIVE | |
479 | * Just mark it RUNNING and enable the interrupt, and let the | |
480 | * ipath_restart_sdma() on link transition to ACTIVE actually | |
481 | * enable it. | |
482 | */ | |
f7a60d71 | 483 | spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); |
124b4dcb | 484 | dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE; |
f7a60d71 JG |
485 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); |
486 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
487 | __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); | |
488 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); | |
489 | ||
490 | done: | |
491 | return ret; | |
492 | } | |
493 | ||
494 | void teardown_sdma(struct ipath_devdata *dd) | |
495 | { | |
496 | struct ipath_sdma_txreq *txp, *txpnext; | |
497 | unsigned long flags; | |
498 | dma_addr_t sdma_head_phys = 0; | |
499 | dma_addr_t sdma_descq_phys = 0; | |
500 | void *sdma_descq = NULL; | |
501 | void *sdma_head_dma = NULL; | |
502 | ||
503 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
504 | __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); | |
505 | __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); | |
506 | __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status); | |
507 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
508 | ||
509 | tasklet_kill(&dd->ipath_sdma_abort_task); | |
510 | tasklet_kill(&dd->ipath_sdma_notify_task); | |
511 | ||
512 | /* turn off sdma */ | |
513 | spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); | |
514 | dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; | |
515 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | |
516 | dd->ipath_sendctrl); | |
517 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
518 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); | |
519 | ||
520 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
521 | /* dequeue all "sent" requests */ | |
522 | list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, | |
523 | list) { | |
524 | txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN; | |
525 | if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) | |
526 | vl15_watchdog_deq(dd); | |
527 | list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); | |
528 | } | |
529 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
530 | ||
531 | sdma_notify_taskbody(dd); | |
532 | ||
533 | del_timer_sync(&dd->ipath_sdma_vl15_timer); | |
534 | ||
535 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
536 | ||
537 | dd->ipath_sdma_abort_jiffies = 0; | |
538 | ||
539 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0); | |
540 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0); | |
541 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0); | |
542 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0); | |
543 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0); | |
544 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0); | |
545 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0); | |
546 | ||
547 | if (dd->ipath_sdma_head_dma) { | |
548 | sdma_head_dma = (void *) dd->ipath_sdma_head_dma; | |
549 | sdma_head_phys = dd->ipath_sdma_head_phys; | |
550 | dd->ipath_sdma_head_dma = NULL; | |
551 | dd->ipath_sdma_head_phys = 0; | |
552 | } | |
553 | ||
554 | if (dd->ipath_sdma_descq) { | |
555 | sdma_descq = dd->ipath_sdma_descq; | |
556 | sdma_descq_phys = dd->ipath_sdma_descq_phys; | |
557 | dd->ipath_sdma_descq = NULL; | |
558 | dd->ipath_sdma_descq_phys = 0; | |
559 | } | |
560 | ||
561 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
562 | ||
563 | if (sdma_head_dma) | |
564 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | |
565 | sdma_head_dma, sdma_head_phys); | |
566 | ||
567 | if (sdma_descq) | |
568 | dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, | |
569 | sdma_descq, sdma_descq_phys); | |
570 | } | |
571 | ||
124b4dcb DO |
572 | /* |
573 | * [Re]start SDMA, if we use it, and it's not already OK. | |
574 | * This is called on transition to link ACTIVE, either the first or | |
575 | * subsequent times. | |
576 | */ | |
577 | void ipath_restart_sdma(struct ipath_devdata *dd) | |
578 | { | |
579 | unsigned long flags; | |
580 | int needed = 1; | |
581 | ||
582 | if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) | |
583 | goto bail; | |
584 | ||
585 | /* | |
586 | * First, make sure we should, which is to say, | |
587 | * check that we are "RUNNING" (not in teardown) | |
588 | * and not "SHUTDOWN" | |
589 | */ | |
590 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
591 | if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status) | |
592 | || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) | |
593 | needed = 0; | |
594 | else { | |
595 | __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); | |
596 | __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); | |
597 | __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); | |
598 | } | |
599 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
600 | if (!needed) { | |
601 | ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n", | |
602 | dd->ipath_sdma_status); | |
603 | goto bail; | |
604 | } | |
605 | spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); | |
606 | /* | |
607 | * First clear, just to be safe. Enable is only done | |
608 | * in chip on 0->1 transition | |
609 | */ | |
610 | dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; | |
611 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); | |
612 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
613 | dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE; | |
614 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); | |
615 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
616 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); | |
617 | ||
618 | bail: | |
619 | return; | |
620 | } | |
621 | ||
f7a60d71 JG |
622 | static inline void make_sdma_desc(struct ipath_devdata *dd, |
623 | u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset) | |
624 | { | |
625 | WARN_ON(addr & 3); | |
626 | /* SDmaPhyAddr[47:32] */ | |
627 | sdmadesc[1] = addr >> 32; | |
628 | /* SDmaPhyAddr[31:0] */ | |
629 | sdmadesc[0] = (addr & 0xfffffffcULL) << 32; | |
630 | /* SDmaGeneration[1:0] */ | |
631 | sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30; | |
632 | /* SDmaDwordCount[10:0] */ | |
633 | sdmadesc[0] |= (dwlen & 0x7ffULL) << 16; | |
634 | /* SDmaBufOffset[12:2] */ | |
635 | sdmadesc[0] |= dwoffset & 0x7ffULL; | |
636 | } | |
637 | ||
638 | /* | |
639 | * This function queues one IB packet onto the send DMA queue per call. | |
640 | * The caller is responsible for checking: | |
641 | * 1) The number of send DMA descriptor entries is less than the size of | |
642 | * the descriptor queue. | |
643 | * 2) The IB SGE addresses and lengths are 32-bit aligned | |
644 | * (except possibly the last SGE's length) | |
645 | * 3) The SGE addresses are suitable for passing to dma_map_single(). | |
646 | */ | |
647 | int ipath_sdma_verbs_send(struct ipath_devdata *dd, | |
648 | struct ipath_sge_state *ss, u32 dwords, | |
649 | struct ipath_verbs_txreq *tx) | |
650 | { | |
651 | ||
652 | unsigned long flags; | |
653 | struct ipath_sge *sge; | |
654 | int ret = 0; | |
655 | u16 tail; | |
656 | __le64 *descqp; | |
657 | u64 sdmadesc[2]; | |
658 | u32 dwoffset; | |
659 | dma_addr_t addr; | |
660 | ||
661 | if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) { | |
662 | ipath_dbg("packet size %X > ibmax %X, fail\n", | |
663 | tx->map_len + (dwords<<2), dd->ipath_ibmaxlen); | |
664 | ret = -EMSGSIZE; | |
665 | goto fail; | |
666 | } | |
667 | ||
668 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
669 | ||
670 | retry: | |
671 | if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) { | |
672 | ret = -EBUSY; | |
673 | goto unlock; | |
674 | } | |
675 | ||
676 | if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { | |
677 | if (ipath_sdma_make_progress(dd)) | |
678 | goto retry; | |
679 | ret = -ENOBUFS; | |
680 | goto unlock; | |
681 | } | |
682 | ||
683 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, | |
684 | tx->map_len, DMA_TO_DEVICE); | |
685 | if (dma_mapping_error(addr)) { | |
686 | ret = -EIO; | |
687 | goto unlock; | |
688 | } | |
689 | ||
690 | dwoffset = tx->map_len >> 2; | |
691 | make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); | |
692 | ||
693 | /* SDmaFirstDesc */ | |
694 | sdmadesc[0] |= 1ULL << 12; | |
695 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) | |
696 | sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */ | |
697 | ||
698 | /* write to the descq */ | |
699 | tail = dd->ipath_sdma_descq_tail; | |
700 | descqp = &dd->ipath_sdma_descq[tail].qw[0]; | |
701 | *descqp++ = cpu_to_le64(sdmadesc[0]); | |
702 | *descqp++ = cpu_to_le64(sdmadesc[1]); | |
703 | ||
704 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC) | |
705 | tx->txreq.start_idx = tail; | |
706 | ||
707 | /* increment the tail */ | |
708 | if (++tail == dd->ipath_sdma_descq_cnt) { | |
709 | tail = 0; | |
710 | descqp = &dd->ipath_sdma_descq[0].qw[0]; | |
711 | ++dd->ipath_sdma_generation; | |
712 | } | |
713 | ||
714 | sge = &ss->sge; | |
715 | while (dwords) { | |
716 | u32 dw; | |
717 | u32 len; | |
718 | ||
719 | len = dwords << 2; | |
720 | if (len > sge->length) | |
721 | len = sge->length; | |
722 | if (len > sge->sge_length) | |
723 | len = sge->sge_length; | |
724 | BUG_ON(len == 0); | |
725 | dw = (len + 3) >> 2; | |
726 | addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, | |
727 | DMA_TO_DEVICE); | |
728 | make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); | |
729 | /* SDmaUseLargeBuf has to be set in every descriptor */ | |
730 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) | |
731 | sdmadesc[0] |= 1ULL << 14; | |
732 | /* write to the descq */ | |
733 | *descqp++ = cpu_to_le64(sdmadesc[0]); | |
734 | *descqp++ = cpu_to_le64(sdmadesc[1]); | |
735 | ||
736 | /* increment the tail */ | |
737 | if (++tail == dd->ipath_sdma_descq_cnt) { | |
738 | tail = 0; | |
739 | descqp = &dd->ipath_sdma_descq[0].qw[0]; | |
740 | ++dd->ipath_sdma_generation; | |
741 | } | |
742 | sge->vaddr += len; | |
743 | sge->length -= len; | |
744 | sge->sge_length -= len; | |
745 | if (sge->sge_length == 0) { | |
746 | if (--ss->num_sge) | |
747 | *sge = *ss->sg_list++; | |
748 | } else if (sge->length == 0 && sge->mr != NULL) { | |
749 | if (++sge->n >= IPATH_SEGSZ) { | |
750 | if (++sge->m >= sge->mr->mapsz) | |
751 | break; | |
752 | sge->n = 0; | |
753 | } | |
754 | sge->vaddr = | |
755 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | |
756 | sge->length = | |
757 | sge->mr->map[sge->m]->segs[sge->n].length; | |
758 | } | |
759 | ||
760 | dwoffset += dw; | |
761 | dwords -= dw; | |
762 | } | |
763 | ||
764 | if (!tail) | |
765 | descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; | |
766 | descqp -= 2; | |
767 | /* SDmaLastDesc */ | |
768 | descqp[0] |= __constant_cpu_to_le64(1ULL << 11); | |
769 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { | |
770 | /* SDmaIntReq */ | |
771 | descqp[0] |= __constant_cpu_to_le64(1ULL << 15); | |
772 | } | |
773 | ||
774 | /* Commit writes to memory and advance the tail on the chip */ | |
775 | wmb(); | |
776 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); | |
777 | ||
778 | tx->txreq.next_descq_idx = tail; | |
779 | tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK; | |
780 | dd->ipath_sdma_descq_tail = tail; | |
781 | dd->ipath_sdma_descq_added += tx->txreq.sg_count; | |
782 | list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); | |
783 | if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) | |
784 | vl15_watchdog_enq(dd); | |
785 | ||
786 | unlock: | |
787 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
788 | fail: | |
789 | return ret; | |
790 | } |