]>
Commit | Line | Data |
---|---|---|
8babfa4f AJ |
1 | /* |
2 | * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/mm.h> | |
33 | #include <linux/types.h> | |
34 | #include <linux/device.h> | |
35 | #include <linux/dmapool.h> | |
d43c36dc | 36 | #include <linux/sched.h> |
8babfa4f AJ |
37 | #include <linux/slab.h> |
38 | #include <linux/list.h> | |
39 | #include <linux/highmem.h> | |
40 | #include <linux/io.h> | |
41 | #include <linux/uio.h> | |
42 | #include <linux/rbtree.h> | |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/delay.h> | |
45 | ||
46 | #include "ipath_kernel.h" | |
47 | #include "ipath_user_sdma.h" | |
48 | ||
49 | /* minimum size of header */ | |
50 | #define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64 | |
51 | /* expected size of headers (for dma_pool) */ | |
52 | #define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64 | |
53 | /* length mask in PBC (lower 11 bits) */ | |
54 | #define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1) | |
55 | ||
56 | struct ipath_user_sdma_pkt { | |
57 | u8 naddr; /* dimension of addr (1..3) ... */ | |
58 | u32 counter; /* sdma pkts queued counter for this entry */ | |
59 | u64 added; /* global descq number of entries */ | |
60 | ||
61 | struct { | |
62 | u32 offset; /* offset for kvaddr, addr */ | |
63 | u32 length; /* length in page */ | |
64 | u8 put_page; /* should we put_page? */ | |
65 | u8 dma_mapped; /* is page dma_mapped? */ | |
66 | struct page *page; /* may be NULL (coherent mem) */ | |
67 | void *kvaddr; /* FIXME: only for pio hack */ | |
68 | dma_addr_t addr; | |
69 | } addr[4]; /* max pages, any more and we coalesce */ | |
70 | struct list_head list; /* list element */ | |
71 | }; | |
72 | ||
73 | struct ipath_user_sdma_queue { | |
74 | /* | |
75 | * pkts sent to dma engine are queued on this | |
76 | * list head. the type of the elements of this | |
77 | * list are struct ipath_user_sdma_pkt... | |
78 | */ | |
79 | struct list_head sent; | |
80 | ||
81 | /* headers with expected length are allocated from here... */ | |
82 | char header_cache_name[64]; | |
83 | struct dma_pool *header_cache; | |
84 | ||
85 | /* packets are allocated from the slab cache... */ | |
86 | char pkt_slab_name[64]; | |
87 | struct kmem_cache *pkt_slab; | |
88 | ||
89 | /* as packets go on the queued queue, they are counted... */ | |
90 | u32 counter; | |
91 | u32 sent_counter; | |
92 | ||
93 | /* dma page table */ | |
94 | struct rb_root dma_pages_root; | |
95 | ||
96 | /* protect everything above... */ | |
97 | struct mutex lock; | |
98 | }; | |
99 | ||
100 | struct ipath_user_sdma_queue * | |
101 | ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport) | |
102 | { | |
103 | struct ipath_user_sdma_queue *pq = | |
104 | kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL); | |
105 | ||
106 | if (!pq) | |
107 | goto done; | |
108 | ||
109 | pq->counter = 0; | |
110 | pq->sent_counter = 0; | |
111 | INIT_LIST_HEAD(&pq->sent); | |
112 | ||
113 | mutex_init(&pq->lock); | |
114 | ||
115 | snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), | |
116 | "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport); | |
117 | pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, | |
118 | sizeof(struct ipath_user_sdma_pkt), | |
119 | 0, 0, NULL); | |
120 | ||
121 | if (!pq->pkt_slab) | |
122 | goto err_kfree; | |
123 | ||
124 | snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), | |
125 | "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport); | |
126 | pq->header_cache = dma_pool_create(pq->header_cache_name, | |
127 | dev, | |
128 | IPATH_USER_SDMA_EXP_HEADER_LENGTH, | |
129 | 4, 0); | |
130 | if (!pq->header_cache) | |
131 | goto err_slab; | |
132 | ||
133 | pq->dma_pages_root = RB_ROOT; | |
134 | ||
135 | goto done; | |
136 | ||
137 | err_slab: | |
138 | kmem_cache_destroy(pq->pkt_slab); | |
139 | err_kfree: | |
140 | kfree(pq); | |
141 | pq = NULL; | |
142 | ||
143 | done: | |
144 | return pq; | |
145 | } | |
146 | ||
147 | static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt, | |
148 | int i, size_t offset, size_t len, | |
149 | int put_page, int dma_mapped, | |
150 | struct page *page, | |
151 | void *kvaddr, dma_addr_t dma_addr) | |
152 | { | |
153 | pkt->addr[i].offset = offset; | |
154 | pkt->addr[i].length = len; | |
155 | pkt->addr[i].put_page = put_page; | |
156 | pkt->addr[i].dma_mapped = dma_mapped; | |
157 | pkt->addr[i].page = page; | |
158 | pkt->addr[i].kvaddr = kvaddr; | |
159 | pkt->addr[i].addr = dma_addr; | |
160 | } | |
161 | ||
162 | static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt, | |
163 | u32 counter, size_t offset, | |
164 | size_t len, int dma_mapped, | |
165 | struct page *page, | |
166 | void *kvaddr, dma_addr_t dma_addr) | |
167 | { | |
168 | pkt->naddr = 1; | |
169 | pkt->counter = counter; | |
170 | ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page, | |
171 | kvaddr, dma_addr); | |
172 | } | |
173 | ||
174 | /* we've too many pages in the iovec, coalesce to a single page */ | |
175 | static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd, | |
176 | struct ipath_user_sdma_pkt *pkt, | |
177 | const struct iovec *iov, | |
178 | unsigned long niov) { | |
179 | int ret = 0; | |
180 | struct page *page = alloc_page(GFP_KERNEL); | |
181 | void *mpage_save; | |
182 | char *mpage; | |
183 | int i; | |
184 | int len = 0; | |
185 | dma_addr_t dma_addr; | |
186 | ||
187 | if (!page) { | |
188 | ret = -ENOMEM; | |
189 | goto done; | |
190 | } | |
191 | ||
192 | mpage = kmap(page); | |
193 | mpage_save = mpage; | |
194 | for (i = 0; i < niov; i++) { | |
195 | int cfur; | |
196 | ||
197 | cfur = copy_from_user(mpage, | |
198 | iov[i].iov_base, iov[i].iov_len); | |
199 | if (cfur) { | |
200 | ret = -EFAULT; | |
201 | goto free_unmap; | |
202 | } | |
203 | ||
204 | mpage += iov[i].iov_len; | |
205 | len += iov[i].iov_len; | |
206 | } | |
207 | ||
208 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, | |
209 | DMA_TO_DEVICE); | |
8d8bb39b | 210 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
8babfa4f AJ |
211 | ret = -ENOMEM; |
212 | goto free_unmap; | |
213 | } | |
214 | ||
215 | ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save, | |
216 | dma_addr); | |
217 | pkt->naddr = 2; | |
218 | ||
219 | goto done; | |
220 | ||
221 | free_unmap: | |
222 | kunmap(page); | |
223 | __free_page(page); | |
224 | done: | |
225 | return ret; | |
226 | } | |
227 | ||
228 | /* how many pages in this iovec element? */ | |
229 | static int ipath_user_sdma_num_pages(const struct iovec *iov) | |
230 | { | |
231 | const unsigned long addr = (unsigned long) iov->iov_base; | |
232 | const unsigned long len = iov->iov_len; | |
233 | const unsigned long spage = addr & PAGE_MASK; | |
234 | const unsigned long epage = (addr + len - 1) & PAGE_MASK; | |
235 | ||
236 | return 1 + ((epage - spage) >> PAGE_SHIFT); | |
237 | } | |
238 | ||
25985edc | 239 | /* truncate length to page boundary */ |
8babfa4f AJ |
240 | static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len) |
241 | { | |
242 | const unsigned long offset = addr & ~PAGE_MASK; | |
243 | ||
244 | return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len; | |
245 | } | |
246 | ||
247 | static void ipath_user_sdma_free_pkt_frag(struct device *dev, | |
248 | struct ipath_user_sdma_queue *pq, | |
249 | struct ipath_user_sdma_pkt *pkt, | |
250 | int frag) | |
251 | { | |
252 | const int i = frag; | |
253 | ||
254 | if (pkt->addr[i].page) { | |
255 | if (pkt->addr[i].dma_mapped) | |
256 | dma_unmap_page(dev, | |
257 | pkt->addr[i].addr, | |
258 | pkt->addr[i].length, | |
259 | DMA_TO_DEVICE); | |
260 | ||
261 | if (pkt->addr[i].kvaddr) | |
262 | kunmap(pkt->addr[i].page); | |
263 | ||
264 | if (pkt->addr[i].put_page) | |
265 | put_page(pkt->addr[i].page); | |
266 | else | |
267 | __free_page(pkt->addr[i].page); | |
268 | } else if (pkt->addr[i].kvaddr) | |
269 | /* free coherent mem from cache... */ | |
270 | dma_pool_free(pq->header_cache, | |
271 | pkt->addr[i].kvaddr, pkt->addr[i].addr); | |
272 | } | |
273 | ||
274 | /* return number of pages pinned... */ | |
275 | static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, | |
276 | struct ipath_user_sdma_pkt *pkt, | |
277 | unsigned long addr, int tlen, int npages) | |
278 | { | |
279 | struct page *pages[2]; | |
280 | int j; | |
281 | int ret; | |
282 | ||
283 | ret = get_user_pages(current, current->mm, addr, | |
284 | npages, 0, 1, pages, NULL); | |
285 | ||
286 | if (ret != npages) { | |
287 | int i; | |
288 | ||
289 | for (i = 0; i < ret; i++) | |
290 | put_page(pages[i]); | |
291 | ||
292 | ret = -ENOMEM; | |
293 | goto done; | |
294 | } | |
295 | ||
296 | for (j = 0; j < npages; j++) { | |
297 | /* map the pages... */ | |
298 | const int flen = | |
299 | ipath_user_sdma_page_length(addr, tlen); | |
300 | dma_addr_t dma_addr = | |
301 | dma_map_page(&dd->pcidev->dev, | |
302 | pages[j], 0, flen, DMA_TO_DEVICE); | |
303 | unsigned long fofs = addr & ~PAGE_MASK; | |
304 | ||
8d8bb39b | 305 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
8babfa4f AJ |
306 | ret = -ENOMEM; |
307 | goto done; | |
308 | } | |
309 | ||
310 | ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1, | |
311 | pages[j], kmap(pages[j]), | |
312 | dma_addr); | |
313 | ||
314 | pkt->naddr++; | |
315 | addr += flen; | |
316 | tlen -= flen; | |
317 | } | |
318 | ||
319 | done: | |
320 | return ret; | |
321 | } | |
322 | ||
323 | static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd, | |
324 | struct ipath_user_sdma_queue *pq, | |
325 | struct ipath_user_sdma_pkt *pkt, | |
326 | const struct iovec *iov, | |
327 | unsigned long niov) | |
328 | { | |
329 | int ret = 0; | |
330 | unsigned long idx; | |
331 | ||
332 | for (idx = 0; idx < niov; idx++) { | |
333 | const int npages = ipath_user_sdma_num_pages(iov + idx); | |
334 | const unsigned long addr = (unsigned long) iov[idx].iov_base; | |
335 | ||
336 | ret = ipath_user_sdma_pin_pages(dd, pkt, | |
337 | addr, iov[idx].iov_len, | |
338 | npages); | |
339 | if (ret < 0) | |
340 | goto free_pkt; | |
341 | } | |
342 | ||
343 | goto done; | |
344 | ||
345 | free_pkt: | |
346 | for (idx = 0; idx < pkt->naddr; idx++) | |
347 | ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); | |
348 | ||
349 | done: | |
350 | return ret; | |
351 | } | |
352 | ||
353 | static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd, | |
354 | struct ipath_user_sdma_queue *pq, | |
355 | struct ipath_user_sdma_pkt *pkt, | |
356 | const struct iovec *iov, | |
357 | unsigned long niov, int npages) | |
358 | { | |
359 | int ret = 0; | |
360 | ||
361 | if (npages >= ARRAY_SIZE(pkt->addr)) | |
362 | ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov); | |
363 | else | |
364 | ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); | |
365 | ||
366 | return ret; | |
367 | } | |
368 | ||
369 | /* free a packet list -- return counter value of last packet */ | |
370 | static void ipath_user_sdma_free_pkt_list(struct device *dev, | |
371 | struct ipath_user_sdma_queue *pq, | |
372 | struct list_head *list) | |
373 | { | |
374 | struct ipath_user_sdma_pkt *pkt, *pkt_next; | |
375 | ||
376 | list_for_each_entry_safe(pkt, pkt_next, list, list) { | |
377 | int i; | |
378 | ||
379 | for (i = 0; i < pkt->naddr; i++) | |
380 | ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i); | |
381 | ||
382 | kmem_cache_free(pq->pkt_slab, pkt); | |
383 | } | |
384 | } | |
385 | ||
386 | /* | |
387 | * copy headers, coalesce etc -- pq->lock must be held | |
388 | * | |
389 | * we queue all the packets to list, returning the | |
390 | * number of bytes total. list must be empty initially, | |
391 | * as, if there is an error we clean it... | |
392 | */ | |
393 | static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd, | |
394 | struct ipath_user_sdma_queue *pq, | |
395 | struct list_head *list, | |
396 | const struct iovec *iov, | |
397 | unsigned long niov, | |
398 | int maxpkts) | |
399 | { | |
400 | unsigned long idx = 0; | |
401 | int ret = 0; | |
402 | int npkts = 0; | |
403 | struct page *page = NULL; | |
404 | __le32 *pbc; | |
405 | dma_addr_t dma_addr; | |
406 | struct ipath_user_sdma_pkt *pkt = NULL; | |
407 | size_t len; | |
408 | size_t nw; | |
409 | u32 counter = pq->counter; | |
410 | int dma_mapped = 0; | |
411 | ||
412 | while (idx < niov && npkts < maxpkts) { | |
413 | const unsigned long addr = (unsigned long) iov[idx].iov_base; | |
414 | const unsigned long idx_save = idx; | |
415 | unsigned pktnw; | |
416 | unsigned pktnwc; | |
417 | int nfrags = 0; | |
418 | int npages = 0; | |
419 | int cfur; | |
420 | ||
421 | dma_mapped = 0; | |
422 | len = iov[idx].iov_len; | |
423 | nw = len >> 2; | |
424 | page = NULL; | |
425 | ||
426 | pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); | |
427 | if (!pkt) { | |
428 | ret = -ENOMEM; | |
429 | goto free_list; | |
430 | } | |
431 | ||
432 | if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH || | |
433 | len > PAGE_SIZE || len & 3 || addr & 3) { | |
434 | ret = -EINVAL; | |
435 | goto free_pkt; | |
436 | } | |
437 | ||
438 | if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH) | |
439 | pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL, | |
440 | &dma_addr); | |
441 | else | |
442 | pbc = NULL; | |
443 | ||
444 | if (!pbc) { | |
445 | page = alloc_page(GFP_KERNEL); | |
446 | if (!page) { | |
447 | ret = -ENOMEM; | |
448 | goto free_pkt; | |
449 | } | |
450 | pbc = kmap(page); | |
451 | } | |
452 | ||
453 | cfur = copy_from_user(pbc, iov[idx].iov_base, len); | |
454 | if (cfur) { | |
455 | ret = -EFAULT; | |
456 | goto free_pbc; | |
457 | } | |
458 | ||
459 | /* | |
460 | * this assignment is a bit strange. it's because the | |
461 | * the pbc counts the number of 32 bit words in the full | |
462 | * packet _except_ the first word of the pbc itself... | |
463 | */ | |
464 | pktnwc = nw - 1; | |
465 | ||
466 | /* | |
467 | * pktnw computation yields the number of 32 bit words | |
468 | * that the caller has indicated in the PBC. note that | |
469 | * this is one less than the total number of words that | |
470 | * goes to the send DMA engine as the first 32 bit word | |
471 | * of the PBC itself is not counted. Armed with this count, | |
472 | * we can verify that the packet is consistent with the | |
473 | * iovec lengths. | |
474 | */ | |
475 | pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK; | |
476 | if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) { | |
477 | ret = -EINVAL; | |
478 | goto free_pbc; | |
479 | } | |
480 | ||
481 | ||
482 | idx++; | |
483 | while (pktnwc < pktnw && idx < niov) { | |
484 | const size_t slen = iov[idx].iov_len; | |
485 | const unsigned long faddr = | |
486 | (unsigned long) iov[idx].iov_base; | |
487 | ||
488 | if (slen & 3 || faddr & 3 || !slen || | |
489 | slen > PAGE_SIZE) { | |
490 | ret = -EINVAL; | |
491 | goto free_pbc; | |
492 | } | |
493 | ||
494 | npages++; | |
495 | if ((faddr & PAGE_MASK) != | |
496 | ((faddr + slen - 1) & PAGE_MASK)) | |
497 | npages++; | |
498 | ||
499 | pktnwc += slen >> 2; | |
500 | idx++; | |
501 | nfrags++; | |
502 | } | |
503 | ||
504 | if (pktnwc != pktnw) { | |
505 | ret = -EINVAL; | |
506 | goto free_pbc; | |
507 | } | |
508 | ||
509 | if (page) { | |
510 | dma_addr = dma_map_page(&dd->pcidev->dev, | |
511 | page, 0, len, DMA_TO_DEVICE); | |
8d8bb39b | 512 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
8babfa4f AJ |
513 | ret = -ENOMEM; |
514 | goto free_pbc; | |
515 | } | |
516 | ||
517 | dma_mapped = 1; | |
518 | } | |
519 | ||
520 | ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped, | |
521 | page, pbc, dma_addr); | |
522 | ||
523 | if (nfrags) { | |
524 | ret = ipath_user_sdma_init_payload(dd, pq, pkt, | |
525 | iov + idx_save + 1, | |
526 | nfrags, npages); | |
527 | if (ret < 0) | |
528 | goto free_pbc_dma; | |
529 | } | |
530 | ||
531 | counter++; | |
532 | npkts++; | |
533 | ||
534 | list_add_tail(&pkt->list, list); | |
535 | } | |
536 | ||
537 | ret = idx; | |
538 | goto done; | |
539 | ||
540 | free_pbc_dma: | |
541 | if (dma_mapped) | |
542 | dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE); | |
543 | free_pbc: | |
544 | if (page) { | |
545 | kunmap(page); | |
546 | __free_page(page); | |
547 | } else | |
548 | dma_pool_free(pq->header_cache, pbc, dma_addr); | |
549 | free_pkt: | |
550 | kmem_cache_free(pq->pkt_slab, pkt); | |
551 | free_list: | |
552 | ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); | |
553 | done: | |
554 | return ret; | |
555 | } | |
556 | ||
557 | static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq, | |
558 | u32 c) | |
559 | { | |
560 | pq->sent_counter = c; | |
561 | } | |
562 | ||
563 | /* try to clean out queue -- needs pq->lock */ | |
564 | static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd, | |
565 | struct ipath_user_sdma_queue *pq) | |
566 | { | |
567 | struct list_head free_list; | |
568 | struct ipath_user_sdma_pkt *pkt; | |
569 | struct ipath_user_sdma_pkt *pkt_prev; | |
570 | int ret = 0; | |
571 | ||
572 | INIT_LIST_HEAD(&free_list); | |
573 | ||
574 | list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { | |
575 | s64 descd = dd->ipath_sdma_descq_removed - pkt->added; | |
576 | ||
577 | if (descd < 0) | |
578 | break; | |
579 | ||
580 | list_move_tail(&pkt->list, &free_list); | |
581 | ||
582 | /* one more packet cleaned */ | |
583 | ret++; | |
584 | } | |
585 | ||
586 | if (!list_empty(&free_list)) { | |
587 | u32 counter; | |
588 | ||
589 | pkt = list_entry(free_list.prev, | |
590 | struct ipath_user_sdma_pkt, list); | |
591 | counter = pkt->counter; | |
592 | ||
593 | ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); | |
594 | ipath_user_sdma_set_complete_counter(pq, counter); | |
595 | } | |
596 | ||
597 | return ret; | |
598 | } | |
599 | ||
600 | void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq) | |
601 | { | |
602 | if (!pq) | |
603 | return; | |
604 | ||
605 | kmem_cache_destroy(pq->pkt_slab); | |
606 | dma_pool_destroy(pq->header_cache); | |
607 | kfree(pq); | |
608 | } | |
609 | ||
610 | /* clean descriptor queue, returns > 0 if some elements cleaned */ | |
611 | static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd) | |
612 | { | |
613 | int ret; | |
614 | unsigned long flags; | |
615 | ||
616 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
617 | ret = ipath_sdma_make_progress(dd); | |
618 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
619 | ||
620 | return ret; | |
621 | } | |
622 | ||
623 | /* we're in close, drain packets so that we can cleanup successfully... */ | |
624 | void ipath_user_sdma_queue_drain(struct ipath_devdata *dd, | |
625 | struct ipath_user_sdma_queue *pq) | |
626 | { | |
627 | int i; | |
628 | ||
629 | if (!pq) | |
630 | return; | |
631 | ||
632 | for (i = 0; i < 100; i++) { | |
633 | mutex_lock(&pq->lock); | |
634 | if (list_empty(&pq->sent)) { | |
635 | mutex_unlock(&pq->lock); | |
636 | break; | |
637 | } | |
638 | ipath_user_sdma_hwqueue_clean(dd); | |
639 | ipath_user_sdma_queue_clean(dd, pq); | |
640 | mutex_unlock(&pq->lock); | |
641 | msleep(10); | |
642 | } | |
643 | ||
644 | if (!list_empty(&pq->sent)) { | |
645 | struct list_head free_list; | |
646 | ||
647 | printk(KERN_INFO "drain: lists not empty: forcing!\n"); | |
648 | INIT_LIST_HEAD(&free_list); | |
649 | mutex_lock(&pq->lock); | |
650 | list_splice_init(&pq->sent, &free_list); | |
651 | ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); | |
652 | mutex_unlock(&pq->lock); | |
653 | } | |
654 | } | |
655 | ||
656 | static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd, | |
657 | u64 addr, u64 dwlen, u64 dwoffset) | |
658 | { | |
659 | return cpu_to_le64(/* SDmaPhyAddr[31:0] */ | |
660 | ((addr & 0xfffffffcULL) << 32) | | |
661 | /* SDmaGeneration[1:0] */ | |
662 | ((dd->ipath_sdma_generation & 3ULL) << 30) | | |
663 | /* SDmaDwordCount[10:0] */ | |
664 | ((dwlen & 0x7ffULL) << 16) | | |
665 | /* SDmaBufOffset[12:2] */ | |
666 | (dwoffset & 0x7ffULL)); | |
667 | } | |
668 | ||
669 | static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) | |
670 | { | |
9c3da099 | 671 | return descq | cpu_to_le64(1ULL << 12); |
8babfa4f AJ |
672 | } |
673 | ||
674 | static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) | |
675 | { | |
676 | /* last */ /* dma head */ | |
9c3da099 | 677 | return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); |
8babfa4f AJ |
678 | } |
679 | ||
680 | static inline __le64 ipath_sdma_make_desc1(u64 addr) | |
681 | { | |
682 | /* SDmaPhyAddr[47:32] */ | |
683 | return cpu_to_le64(addr >> 32); | |
684 | } | |
685 | ||
686 | static void ipath_user_sdma_send_frag(struct ipath_devdata *dd, | |
687 | struct ipath_user_sdma_pkt *pkt, int idx, | |
688 | unsigned ofs, u16 tail) | |
689 | { | |
690 | const u64 addr = (u64) pkt->addr[idx].addr + | |
691 | (u64) pkt->addr[idx].offset; | |
692 | const u64 dwlen = (u64) pkt->addr[idx].length / 4; | |
693 | __le64 *descqp; | |
694 | __le64 descq0; | |
695 | ||
696 | descqp = &dd->ipath_sdma_descq[tail].qw[0]; | |
697 | ||
698 | descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs); | |
699 | if (idx == 0) | |
700 | descq0 = ipath_sdma_make_first_desc0(descq0); | |
701 | if (idx == pkt->naddr - 1) | |
702 | descq0 = ipath_sdma_make_last_desc0(descq0); | |
703 | ||
704 | descqp[0] = descq0; | |
705 | descqp[1] = ipath_sdma_make_desc1(addr); | |
706 | } | |
707 | ||
708 | /* pq->lock must be held, get packets on the wire... */ | |
709 | static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd, | |
710 | struct ipath_user_sdma_queue *pq, | |
711 | struct list_head *pktlist) | |
712 | { | |
713 | int ret = 0; | |
714 | unsigned long flags; | |
715 | u16 tail; | |
716 | ||
717 | if (list_empty(pktlist)) | |
718 | return 0; | |
719 | ||
720 | if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE))) | |
721 | return -ECOMM; | |
722 | ||
723 | spin_lock_irqsave(&dd->ipath_sdma_lock, flags); | |
724 | ||
725 | if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) { | |
726 | ret = -ECOMM; | |
727 | goto unlock; | |
728 | } | |
729 | ||
730 | tail = dd->ipath_sdma_descq_tail; | |
731 | while (!list_empty(pktlist)) { | |
732 | struct ipath_user_sdma_pkt *pkt = | |
733 | list_entry(pktlist->next, struct ipath_user_sdma_pkt, | |
734 | list); | |
735 | int i; | |
736 | unsigned ofs = 0; | |
737 | u16 dtail = tail; | |
738 | ||
739 | if (pkt->naddr > ipath_sdma_descq_freecnt(dd)) | |
740 | goto unlock_check_tail; | |
741 | ||
742 | for (i = 0; i < pkt->naddr; i++) { | |
743 | ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail); | |
744 | ofs += pkt->addr[i].length >> 2; | |
745 | ||
746 | if (++tail == dd->ipath_sdma_descq_cnt) { | |
747 | tail = 0; | |
748 | ++dd->ipath_sdma_generation; | |
749 | } | |
750 | } | |
751 | ||
752 | if ((ofs<<2) > dd->ipath_ibmaxlen) { | |
753 | ipath_dbg("packet size %X > ibmax %X, fail\n", | |
754 | ofs<<2, dd->ipath_ibmaxlen); | |
755 | ret = -EMSGSIZE; | |
756 | goto unlock; | |
757 | } | |
758 | ||
759 | /* | |
760 | * if the packet is >= 2KB mtu equivalent, we have to use | |
761 | * the large buffers, and have to mark each descriptor as | |
762 | * part of a large buffer packet. | |
763 | */ | |
764 | if (ofs >= IPATH_SMALLBUF_DWORDS) { | |
765 | for (i = 0; i < pkt->naddr; i++) { | |
766 | dd->ipath_sdma_descq[dtail].qw[0] |= | |
9c3da099 | 767 | cpu_to_le64(1ULL << 14); |
8babfa4f AJ |
768 | if (++dtail == dd->ipath_sdma_descq_cnt) |
769 | dtail = 0; | |
770 | } | |
771 | } | |
772 | ||
773 | dd->ipath_sdma_descq_added += pkt->naddr; | |
774 | pkt->added = dd->ipath_sdma_descq_added; | |
775 | list_move_tail(&pkt->list, &pq->sent); | |
776 | ret++; | |
777 | } | |
778 | ||
779 | unlock_check_tail: | |
780 | /* advance the tail on the chip if necessary */ | |
781 | if (dd->ipath_sdma_descq_tail != tail) { | |
782 | wmb(); | |
783 | ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); | |
784 | dd->ipath_sdma_descq_tail = tail; | |
785 | } | |
786 | ||
787 | unlock: | |
788 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | |
789 | ||
790 | return ret; | |
791 | } | |
792 | ||
793 | int ipath_user_sdma_writev(struct ipath_devdata *dd, | |
794 | struct ipath_user_sdma_queue *pq, | |
795 | const struct iovec *iov, | |
796 | unsigned long dim) | |
797 | { | |
798 | int ret = 0; | |
799 | struct list_head list; | |
800 | int npkts = 0; | |
801 | ||
802 | INIT_LIST_HEAD(&list); | |
803 | ||
804 | mutex_lock(&pq->lock); | |
805 | ||
806 | if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) { | |
807 | ipath_user_sdma_hwqueue_clean(dd); | |
808 | ipath_user_sdma_queue_clean(dd, pq); | |
809 | } | |
810 | ||
811 | while (dim) { | |
812 | const int mxp = 8; | |
813 | ||
814 | down_write(¤t->mm->mmap_sem); | |
815 | ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); | |
816 | up_write(¤t->mm->mmap_sem); | |
817 | ||
818 | if (ret <= 0) | |
819 | goto done_unlock; | |
820 | else { | |
821 | dim -= ret; | |
822 | iov += ret; | |
823 | } | |
824 | ||
825 | /* force packets onto the sdma hw queue... */ | |
826 | if (!list_empty(&list)) { | |
827 | /* | |
828 | * lazily clean hw queue. the 4 is a guess of about | |
829 | * how many sdma descriptors a packet will take (it | |
830 | * doesn't have to be perfect). | |
831 | */ | |
832 | if (ipath_sdma_descq_freecnt(dd) < ret * 4) { | |
833 | ipath_user_sdma_hwqueue_clean(dd); | |
834 | ipath_user_sdma_queue_clean(dd, pq); | |
835 | } | |
836 | ||
837 | ret = ipath_user_sdma_push_pkts(dd, pq, &list); | |
838 | if (ret < 0) | |
839 | goto done_unlock; | |
840 | else { | |
841 | npkts += ret; | |
842 | pq->counter += ret; | |
843 | ||
844 | if (!list_empty(&list)) | |
845 | goto done_unlock; | |
846 | } | |
847 | } | |
848 | } | |
849 | ||
850 | done_unlock: | |
851 | if (!list_empty(&list)) | |
852 | ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); | |
853 | mutex_unlock(&pq->lock); | |
854 | ||
855 | return (ret < 0) ? ret : npkts; | |
856 | } | |
857 | ||
858 | int ipath_user_sdma_make_progress(struct ipath_devdata *dd, | |
859 | struct ipath_user_sdma_queue *pq) | |
860 | { | |
861 | int ret = 0; | |
862 | ||
863 | mutex_lock(&pq->lock); | |
864 | ipath_user_sdma_hwqueue_clean(dd); | |
865 | ret = ipath_user_sdma_queue_clean(dd, pq); | |
866 | mutex_unlock(&pq->lock); | |
867 | ||
868 | return ret; | |
869 | } | |
870 | ||
871 | u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq) | |
872 | { | |
873 | return pq->sent_counter; | |
874 | } | |
875 | ||
876 | u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq) | |
877 | { | |
878 | return pq->counter; | |
879 | } | |
880 |