]>
Commit | Line | Data |
---|---|---|
f931551b RC |
1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | |
3 | * All rights reserved. | |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/pci.h> | |
36 | #include <linux/poll.h> | |
37 | #include <linux/cdev.h> | |
38 | #include <linux/swap.h> | |
39 | #include <linux/vmalloc.h> | |
40 | #include <linux/highmem.h> | |
41 | #include <linux/io.h> | |
42 | #include <linux/uio.h> | |
43 | #include <linux/jiffies.h> | |
44 | #include <asm/pgtable.h> | |
45 | #include <linux/delay.h> | |
46 | ||
47 | #include "qib.h" | |
48 | #include "qib_common.h" | |
49 | #include "qib_user_sdma.h" | |
50 | ||
51 | static int qib_open(struct inode *, struct file *); | |
52 | static int qib_close(struct inode *, struct file *); | |
53 | static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); | |
54 | static ssize_t qib_aio_write(struct kiocb *, const struct iovec *, | |
55 | unsigned long, loff_t); | |
56 | static unsigned int qib_poll(struct file *, struct poll_table_struct *); | |
57 | static int qib_mmapf(struct file *, struct vm_area_struct *); | |
58 | ||
59 | static const struct file_operations qib_file_ops = { | |
60 | .owner = THIS_MODULE, | |
61 | .write = qib_write, | |
62 | .aio_write = qib_aio_write, | |
63 | .open = qib_open, | |
64 | .release = qib_close, | |
65 | .poll = qib_poll, | |
6038f373 AB |
66 | .mmap = qib_mmapf, |
67 | .llseek = noop_llseek, | |
f931551b RC |
68 | }; |
69 | ||
70 | /* | |
71 | * Convert kernel virtual addresses to physical addresses so they don't | |
72 | * potentially conflict with the chip addresses used as mmap offsets. | |
73 | * It doesn't really matter what mmap offset we use as long as we can | |
74 | * interpret it correctly. | |
75 | */ | |
76 | static u64 cvt_kvaddr(void *p) | |
77 | { | |
78 | struct page *page; | |
79 | u64 paddr = 0; | |
80 | ||
81 | page = vmalloc_to_page(p); | |
82 | if (page) | |
83 | paddr = page_to_pfn(page) << PAGE_SHIFT; | |
84 | ||
85 | return paddr; | |
86 | } | |
87 | ||
88 | static int qib_get_base_info(struct file *fp, void __user *ubase, | |
89 | size_t ubase_size) | |
90 | { | |
91 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | |
92 | int ret = 0; | |
93 | struct qib_base_info *kinfo = NULL; | |
94 | struct qib_devdata *dd = rcd->dd; | |
95 | struct qib_pportdata *ppd = rcd->ppd; | |
96 | unsigned subctxt_cnt; | |
97 | int shared, master; | |
98 | size_t sz; | |
99 | ||
100 | subctxt_cnt = rcd->subctxt_cnt; | |
101 | if (!subctxt_cnt) { | |
102 | shared = 0; | |
103 | master = 0; | |
104 | subctxt_cnt = 1; | |
105 | } else { | |
106 | shared = 1; | |
107 | master = !subctxt_fp(fp); | |
108 | } | |
109 | ||
110 | sz = sizeof(*kinfo); | |
111 | /* If context sharing is not requested, allow the old size structure */ | |
112 | if (!shared) | |
113 | sz -= 7 * sizeof(u64); | |
114 | if (ubase_size < sz) { | |
115 | ret = -EINVAL; | |
116 | goto bail; | |
117 | } | |
118 | ||
119 | kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); | |
120 | if (kinfo == NULL) { | |
121 | ret = -ENOMEM; | |
122 | goto bail; | |
123 | } | |
124 | ||
125 | ret = dd->f_get_base_info(rcd, kinfo); | |
126 | if (ret < 0) | |
127 | goto bail; | |
128 | ||
129 | kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; | |
130 | kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; | |
131 | kinfo->spi_tidegrcnt = rcd->rcvegrcnt; | |
132 | kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; | |
133 | /* | |
134 | * have to mmap whole thing | |
135 | */ | |
136 | kinfo->spi_rcv_egrbuftotlen = | |
137 | rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; | |
138 | kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; | |
139 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / | |
140 | rcd->rcvegrbuf_chunks; | |
141 | kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; | |
142 | if (master) | |
143 | kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; | |
144 | /* | |
145 | * for this use, may be cfgctxts summed over all chips that | |
146 | * are are configured and present | |
147 | */ | |
148 | kinfo->spi_nctxts = dd->cfgctxts; | |
149 | /* unit (chip/board) our context is on */ | |
150 | kinfo->spi_unit = dd->unit; | |
151 | kinfo->spi_port = ppd->port; | |
152 | /* for now, only a single page */ | |
153 | kinfo->spi_tid_maxsize = PAGE_SIZE; | |
154 | ||
155 | /* | |
156 | * Doing this per context, and based on the skip value, etc. This has | |
157 | * to be the actual buffer size, since the protocol code treats it | |
158 | * as an array. | |
159 | * | |
160 | * These have to be set to user addresses in the user code via mmap. | |
161 | * These values are used on return to user code for the mmap target | |
162 | * addresses only. For 32 bit, same 44 bit address problem, so use | |
163 | * the physical address, not virtual. Before 2.6.11, using the | |
164 | * page_address() macro worked, but in 2.6.11, even that returns the | |
165 | * full 64 bit address (upper bits all 1's). So far, using the | |
166 | * physical addresses (or chip offsets, for chip mapping) works, but | |
167 | * no doubt some future kernel release will change that, and we'll be | |
168 | * on to yet another method of dealing with this. | |
169 | * Normally only one of rcvhdr_tailaddr or rhf_offset is useful | |
170 | * since the chips with non-zero rhf_offset don't normally | |
171 | * enable tail register updates to host memory, but for testing, | |
172 | * both can be enabled and used. | |
173 | */ | |
174 | kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; | |
175 | kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys; | |
176 | kinfo->spi_rhf_offset = dd->rhf_offset; | |
177 | kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys; | |
178 | kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; | |
179 | /* setup per-unit (not port) status area for user programs */ | |
180 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + | |
181 | (char *) ppd->statusp - | |
182 | (char *) dd->pioavailregs_dma; | |
183 | kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; | |
184 | if (!shared) { | |
185 | kinfo->spi_piocnt = rcd->piocnt; | |
186 | kinfo->spi_piobufbase = (u64) rcd->piobufs; | |
187 | kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask); | |
188 | } else if (master) { | |
189 | kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) + | |
190 | (rcd->piocnt % subctxt_cnt); | |
191 | /* Master's PIO buffers are after all the slave's */ | |
192 | kinfo->spi_piobufbase = (u64) rcd->piobufs + | |
193 | dd->palign * | |
194 | (rcd->piocnt - kinfo->spi_piocnt); | |
195 | } else { | |
196 | unsigned slave = subctxt_fp(fp) - 1; | |
197 | ||
198 | kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt; | |
199 | kinfo->spi_piobufbase = (u64) rcd->piobufs + | |
200 | dd->palign * kinfo->spi_piocnt * slave; | |
201 | } | |
202 | ||
203 | if (shared) { | |
204 | kinfo->spi_sendbuf_status = | |
205 | cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]); | |
206 | /* only spi_subctxt_* fields should be set in this block! */ | |
207 | kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase); | |
208 | ||
209 | kinfo->spi_subctxt_rcvegrbuf = | |
210 | cvt_kvaddr(rcd->subctxt_rcvegrbuf); | |
211 | kinfo->spi_subctxt_rcvhdr_base = | |
212 | cvt_kvaddr(rcd->subctxt_rcvhdr_base); | |
213 | } | |
214 | ||
215 | /* | |
216 | * All user buffers are 2KB buffers. If we ever support | |
217 | * giving 4KB buffers to user processes, this will need some | |
218 | * work. Can't use piobufbase directly, because it has | |
219 | * both 2K and 4K buffer base values. | |
220 | */ | |
221 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / | |
222 | dd->palign; | |
223 | kinfo->spi_pioalign = dd->palign; | |
224 | kinfo->spi_qpair = QIB_KD_QP; | |
225 | /* | |
226 | * user mode PIO buffers are always 2KB, even when 4KB can | |
227 | * be received, and sent via the kernel; this is ibmaxlen | |
228 | * for 2K MTU. | |
229 | */ | |
230 | kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); | |
231 | kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ | |
232 | kinfo->spi_ctxt = rcd->ctxt; | |
233 | kinfo->spi_subctxt = subctxt_fp(fp); | |
234 | kinfo->spi_sw_version = QIB_KERN_SWVERSION; | |
235 | kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */ | |
236 | kinfo->spi_hw_version = dd->revision; | |
237 | ||
238 | if (master) | |
239 | kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER; | |
240 | ||
241 | sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); | |
242 | if (copy_to_user(ubase, kinfo, sz)) | |
243 | ret = -EFAULT; | |
244 | bail: | |
245 | kfree(kinfo); | |
246 | return ret; | |
247 | } | |
248 | ||
249 | /** | |
250 | * qib_tid_update - update a context TID | |
251 | * @rcd: the context | |
252 | * @fp: the qib device file | |
253 | * @ti: the TID information | |
254 | * | |
255 | * The new implementation as of Oct 2004 is that the driver assigns | |
256 | * the tid and returns it to the caller. To reduce search time, we | |
257 | * keep a cursor for each context, walking the shadow tid array to find | |
258 | * one that's not in use. | |
259 | * | |
260 | * For now, if we can't allocate the full list, we fail, although | |
261 | * in the long run, we'll allocate as many as we can, and the | |
262 | * caller will deal with that by trying the remaining pages later. | |
263 | * That means that when we fail, we have to mark the tids as not in | |
264 | * use again, in our shadow copy. | |
265 | * | |
266 | * It's up to the caller to free the tids when they are done. | |
267 | * We'll unlock the pages as they free them. | |
268 | * | |
269 | * Also, right now we are locking one page at a time, but since | |
270 | * the intended use of this routine is for a single group of | |
271 | * virtually contiguous pages, that should change to improve | |
272 | * performance. | |
273 | */ | |
274 | static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, | |
275 | const struct qib_tid_info *ti) | |
276 | { | |
277 | int ret = 0, ntids; | |
278 | u32 tid, ctxttid, cnt, i, tidcnt, tidoff; | |
279 | u16 *tidlist; | |
280 | struct qib_devdata *dd = rcd->dd; | |
281 | u64 physaddr; | |
282 | unsigned long vaddr; | |
283 | u64 __iomem *tidbase; | |
284 | unsigned long tidmap[8]; | |
285 | struct page **pagep = NULL; | |
286 | unsigned subctxt = subctxt_fp(fp); | |
287 | ||
288 | if (!dd->pageshadow) { | |
289 | ret = -ENOMEM; | |
290 | goto done; | |
291 | } | |
292 | ||
293 | cnt = ti->tidcnt; | |
294 | if (!cnt) { | |
295 | ret = -EFAULT; | |
296 | goto done; | |
297 | } | |
298 | ctxttid = rcd->ctxt * dd->rcvtidcnt; | |
299 | if (!rcd->subctxt_cnt) { | |
300 | tidcnt = dd->rcvtidcnt; | |
301 | tid = rcd->tidcursor; | |
302 | tidoff = 0; | |
303 | } else if (!subctxt) { | |
304 | tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + | |
305 | (dd->rcvtidcnt % rcd->subctxt_cnt); | |
306 | tidoff = dd->rcvtidcnt - tidcnt; | |
307 | ctxttid += tidoff; | |
308 | tid = tidcursor_fp(fp); | |
309 | } else { | |
310 | tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; | |
311 | tidoff = tidcnt * (subctxt - 1); | |
312 | ctxttid += tidoff; | |
313 | tid = tidcursor_fp(fp); | |
314 | } | |
315 | if (cnt > tidcnt) { | |
316 | /* make sure it all fits in tid_pg_list */ | |
317 | qib_devinfo(dd->pcidev, "Process tried to allocate %u " | |
318 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | |
319 | cnt = tidcnt; | |
320 | } | |
321 | pagep = (struct page **) rcd->tid_pg_list; | |
322 | tidlist = (u16 *) &pagep[dd->rcvtidcnt]; | |
323 | pagep += tidoff; | |
324 | tidlist += tidoff; | |
325 | ||
326 | memset(tidmap, 0, sizeof(tidmap)); | |
327 | /* before decrement; chip actual # */ | |
328 | ntids = tidcnt; | |
329 | tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + | |
330 | dd->rcvtidbase + | |
331 | ctxttid * sizeof(*tidbase)); | |
332 | ||
333 | /* virtual address of first page in transfer */ | |
334 | vaddr = ti->tidvaddr; | |
335 | if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, | |
336 | cnt * PAGE_SIZE)) { | |
337 | ret = -EFAULT; | |
338 | goto done; | |
339 | } | |
340 | ret = qib_get_user_pages(vaddr, cnt, pagep); | |
341 | if (ret) { | |
342 | /* | |
343 | * if (ret == -EBUSY) | |
344 | * We can't continue because the pagep array won't be | |
345 | * initialized. This should never happen, | |
346 | * unless perhaps the user has mpin'ed the pages | |
347 | * themselves. | |
348 | */ | |
349 | qib_devinfo(dd->pcidev, | |
350 | "Failed to lock addr %p, %u pages: " | |
351 | "errno %d\n", (void *) vaddr, cnt, -ret); | |
352 | goto done; | |
353 | } | |
354 | for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { | |
355 | for (; ntids--; tid++) { | |
356 | if (tid == tidcnt) | |
357 | tid = 0; | |
358 | if (!dd->pageshadow[ctxttid + tid]) | |
359 | break; | |
360 | } | |
361 | if (ntids < 0) { | |
362 | /* | |
363 | * Oops, wrapped all the way through their TIDs, | |
364 | * and didn't have enough free; see comments at | |
365 | * start of routine | |
366 | */ | |
367 | i--; /* last tidlist[i] not filled in */ | |
368 | ret = -ENOMEM; | |
369 | break; | |
370 | } | |
371 | tidlist[i] = tid + tidoff; | |
372 | /* we "know" system pages and TID pages are same size */ | |
373 | dd->pageshadow[ctxttid + tid] = pagep[i]; | |
374 | dd->physshadow[ctxttid + tid] = | |
375 | qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, | |
376 | PCI_DMA_FROMDEVICE); | |
377 | /* | |
378 | * don't need atomic or it's overhead | |
379 | */ | |
380 | __set_bit(tid, tidmap); | |
381 | physaddr = dd->physshadow[ctxttid + tid]; | |
382 | /* PERFORMANCE: below should almost certainly be cached */ | |
383 | dd->f_put_tid(dd, &tidbase[tid], | |
384 | RCVHQ_RCV_TYPE_EXPECTED, physaddr); | |
385 | /* | |
386 | * don't check this tid in qib_ctxtshadow, since we | |
387 | * just filled it in; start with the next one. | |
388 | */ | |
389 | tid++; | |
390 | } | |
391 | ||
392 | if (ret) { | |
393 | u32 limit; | |
394 | cleanup: | |
395 | /* jump here if copy out of updated info failed... */ | |
396 | /* same code that's in qib_free_tid() */ | |
397 | limit = sizeof(tidmap) * BITS_PER_BYTE; | |
398 | if (limit > tidcnt) | |
399 | /* just in case size changes in future */ | |
400 | limit = tidcnt; | |
401 | tid = find_first_bit((const unsigned long *)tidmap, limit); | |
402 | for (; tid < limit; tid++) { | |
403 | if (!test_bit(tid, tidmap)) | |
404 | continue; | |
405 | if (dd->pageshadow[ctxttid + tid]) { | |
406 | dma_addr_t phys; | |
407 | ||
408 | phys = dd->physshadow[ctxttid + tid]; | |
409 | dd->physshadow[ctxttid + tid] = dd->tidinvalid; | |
410 | /* PERFORMANCE: below should almost certainly | |
411 | * be cached | |
412 | */ | |
413 | dd->f_put_tid(dd, &tidbase[tid], | |
414 | RCVHQ_RCV_TYPE_EXPECTED, | |
415 | dd->tidinvalid); | |
416 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | |
417 | PCI_DMA_FROMDEVICE); | |
418 | dd->pageshadow[ctxttid + tid] = NULL; | |
419 | } | |
420 | } | |
421 | qib_release_user_pages(pagep, cnt); | |
422 | } else { | |
423 | /* | |
424 | * Copy the updated array, with qib_tid's filled in, back | |
425 | * to user. Since we did the copy in already, this "should | |
426 | * never fail" If it does, we have to clean up... | |
427 | */ | |
428 | if (copy_to_user((void __user *) | |
429 | (unsigned long) ti->tidlist, | |
430 | tidlist, cnt * sizeof(*tidlist))) { | |
431 | ret = -EFAULT; | |
432 | goto cleanup; | |
433 | } | |
434 | if (copy_to_user((void __user *) (unsigned long) ti->tidmap, | |
435 | tidmap, sizeof tidmap)) { | |
436 | ret = -EFAULT; | |
437 | goto cleanup; | |
438 | } | |
439 | if (tid == tidcnt) | |
440 | tid = 0; | |
441 | if (!rcd->subctxt_cnt) | |
442 | rcd->tidcursor = tid; | |
443 | else | |
444 | tidcursor_fp(fp) = tid; | |
445 | } | |
446 | ||
447 | done: | |
448 | return ret; | |
449 | } | |
450 | ||
451 | /** | |
452 | * qib_tid_free - free a context TID | |
453 | * @rcd: the context | |
454 | * @subctxt: the subcontext | |
455 | * @ti: the TID info | |
456 | * | |
457 | * right now we are unlocking one page at a time, but since | |
458 | * the intended use of this routine is for a single group of | |
459 | * virtually contiguous pages, that should change to improve | |
460 | * performance. We check that the TID is in range for this context | |
461 | * but otherwise don't check validity; if user has an error and | |
462 | * frees the wrong tid, it's only their own data that can thereby | |
463 | * be corrupted. We do check that the TID was in use, for sanity | |
464 | * We always use our idea of the saved address, not the address that | |
465 | * they pass in to us. | |
466 | */ | |
467 | static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, | |
468 | const struct qib_tid_info *ti) | |
469 | { | |
470 | int ret = 0; | |
471 | u32 tid, ctxttid, cnt, limit, tidcnt; | |
472 | struct qib_devdata *dd = rcd->dd; | |
473 | u64 __iomem *tidbase; | |
474 | unsigned long tidmap[8]; | |
475 | ||
476 | if (!dd->pageshadow) { | |
477 | ret = -ENOMEM; | |
478 | goto done; | |
479 | } | |
480 | ||
481 | if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, | |
482 | sizeof tidmap)) { | |
483 | ret = -EFAULT; | |
484 | goto done; | |
485 | } | |
486 | ||
487 | ctxttid = rcd->ctxt * dd->rcvtidcnt; | |
488 | if (!rcd->subctxt_cnt) | |
489 | tidcnt = dd->rcvtidcnt; | |
490 | else if (!subctxt) { | |
491 | tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + | |
492 | (dd->rcvtidcnt % rcd->subctxt_cnt); | |
493 | ctxttid += dd->rcvtidcnt - tidcnt; | |
494 | } else { | |
495 | tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; | |
496 | ctxttid += tidcnt * (subctxt - 1); | |
497 | } | |
498 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + | |
499 | dd->rcvtidbase + | |
500 | ctxttid * sizeof(*tidbase)); | |
501 | ||
502 | limit = sizeof(tidmap) * BITS_PER_BYTE; | |
503 | if (limit > tidcnt) | |
504 | /* just in case size changes in future */ | |
505 | limit = tidcnt; | |
506 | tid = find_first_bit(tidmap, limit); | |
507 | for (cnt = 0; tid < limit; tid++) { | |
508 | /* | |
509 | * small optimization; if we detect a run of 3 or so without | |
510 | * any set, use find_first_bit again. That's mainly to | |
511 | * accelerate the case where we wrapped, so we have some at | |
512 | * the beginning, and some at the end, and a big gap | |
513 | * in the middle. | |
514 | */ | |
515 | if (!test_bit(tid, tidmap)) | |
516 | continue; | |
517 | cnt++; | |
518 | if (dd->pageshadow[ctxttid + tid]) { | |
519 | struct page *p; | |
520 | dma_addr_t phys; | |
521 | ||
522 | p = dd->pageshadow[ctxttid + tid]; | |
523 | dd->pageshadow[ctxttid + tid] = NULL; | |
524 | phys = dd->physshadow[ctxttid + tid]; | |
525 | dd->physshadow[ctxttid + tid] = dd->tidinvalid; | |
526 | /* PERFORMANCE: below should almost certainly be | |
527 | * cached | |
528 | */ | |
529 | dd->f_put_tid(dd, &tidbase[tid], | |
530 | RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); | |
531 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | |
532 | PCI_DMA_FROMDEVICE); | |
533 | qib_release_user_pages(&p, 1); | |
534 | } | |
535 | } | |
536 | done: | |
537 | return ret; | |
538 | } | |
539 | ||
540 | /** | |
541 | * qib_set_part_key - set a partition key | |
542 | * @rcd: the context | |
543 | * @key: the key | |
544 | * | |
545 | * We can have up to 4 active at a time (other than the default, which is | |
546 | * always allowed). This is somewhat tricky, since multiple contexts may set | |
547 | * the same key, so we reference count them, and clean up at exit. All 4 | |
548 | * partition keys are packed into a single qlogic_ib register. It's an | |
549 | * error for a process to set the same pkey multiple times. We provide no | |
550 | * mechanism to de-allocate a pkey at this time, we may eventually need to | |
551 | * do that. I've used the atomic operations, and no locking, and only make | |
552 | * a single pass through what's available. This should be more than | |
553 | * adequate for some time. I'll think about spinlocks or the like if and as | |
554 | * it's necessary. | |
555 | */ | |
556 | static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key) | |
557 | { | |
558 | struct qib_pportdata *ppd = rcd->ppd; | |
559 | int i, any = 0, pidx = -1; | |
560 | u16 lkey = key & 0x7FFF; | |
561 | int ret; | |
562 | ||
563 | if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) { | |
564 | /* nothing to do; this key always valid */ | |
565 | ret = 0; | |
566 | goto bail; | |
567 | } | |
568 | ||
569 | if (!lkey) { | |
570 | ret = -EINVAL; | |
571 | goto bail; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Set the full membership bit, because it has to be | |
576 | * set in the register or the packet, and it seems | |
577 | * cleaner to set in the register than to force all | |
578 | * callers to set it. | |
579 | */ | |
580 | key |= 0x8000; | |
581 | ||
582 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | |
583 | if (!rcd->pkeys[i] && pidx == -1) | |
584 | pidx = i; | |
585 | if (rcd->pkeys[i] == key) { | |
586 | ret = -EEXIST; | |
587 | goto bail; | |
588 | } | |
589 | } | |
590 | if (pidx == -1) { | |
591 | ret = -EBUSY; | |
592 | goto bail; | |
593 | } | |
594 | for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | |
595 | if (!ppd->pkeys[i]) { | |
596 | any++; | |
597 | continue; | |
598 | } | |
599 | if (ppd->pkeys[i] == key) { | |
600 | atomic_t *pkrefs = &ppd->pkeyrefs[i]; | |
601 | ||
602 | if (atomic_inc_return(pkrefs) > 1) { | |
603 | rcd->pkeys[pidx] = key; | |
604 | ret = 0; | |
605 | goto bail; | |
606 | } else { | |
607 | /* | |
608 | * lost race, decrement count, catch below | |
609 | */ | |
610 | atomic_dec(pkrefs); | |
611 | any++; | |
612 | } | |
613 | } | |
614 | if ((ppd->pkeys[i] & 0x7FFF) == lkey) { | |
615 | /* | |
616 | * It makes no sense to have both the limited and | |
617 | * full membership PKEY set at the same time since | |
618 | * the unlimited one will disable the limited one. | |
619 | */ | |
620 | ret = -EEXIST; | |
621 | goto bail; | |
622 | } | |
623 | } | |
624 | if (!any) { | |
625 | ret = -EBUSY; | |
626 | goto bail; | |
627 | } | |
628 | for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | |
629 | if (!ppd->pkeys[i] && | |
630 | atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { | |
631 | rcd->pkeys[pidx] = key; | |
632 | ppd->pkeys[i] = key; | |
633 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | |
634 | ret = 0; | |
635 | goto bail; | |
636 | } | |
637 | } | |
638 | ret = -EBUSY; | |
639 | ||
640 | bail: | |
641 | return ret; | |
642 | } | |
643 | ||
644 | /** | |
645 | * qib_manage_rcvq - manage a context's receive queue | |
646 | * @rcd: the context | |
647 | * @subctxt: the subcontext | |
648 | * @start_stop: action to carry out | |
649 | * | |
650 | * start_stop == 0 disables receive on the context, for use in queue | |
651 | * overflow conditions. start_stop==1 re-enables, to be used to | |
652 | * re-init the software copy of the head register | |
653 | */ | |
654 | static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt, | |
655 | int start_stop) | |
656 | { | |
657 | struct qib_devdata *dd = rcd->dd; | |
658 | unsigned int rcvctrl_op; | |
659 | ||
660 | if (subctxt) | |
661 | goto bail; | |
662 | /* atomically clear receive enable ctxt. */ | |
663 | if (start_stop) { | |
664 | /* | |
665 | * On enable, force in-memory copy of the tail register to | |
666 | * 0, so that protocol code doesn't have to worry about | |
667 | * whether or not the chip has yet updated the in-memory | |
668 | * copy or not on return from the system call. The chip | |
669 | * always resets it's tail register back to 0 on a | |
670 | * transition from disabled to enabled. | |
671 | */ | |
672 | if (rcd->rcvhdrtail_kvaddr) | |
673 | qib_clear_rcvhdrtail(rcd); | |
674 | rcvctrl_op = QIB_RCVCTRL_CTXT_ENB; | |
675 | } else | |
676 | rcvctrl_op = QIB_RCVCTRL_CTXT_DIS; | |
677 | dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); | |
678 | /* always; new head should be equal to new tail; see above */ | |
679 | bail: | |
680 | return 0; | |
681 | } | |
682 | ||
683 | static void qib_clean_part_key(struct qib_ctxtdata *rcd, | |
684 | struct qib_devdata *dd) | |
685 | { | |
686 | int i, j, pchanged = 0; | |
687 | u64 oldpkey; | |
688 | struct qib_pportdata *ppd = rcd->ppd; | |
689 | ||
690 | /* for debugging only */ | |
691 | oldpkey = (u64) ppd->pkeys[0] | | |
692 | ((u64) ppd->pkeys[1] << 16) | | |
693 | ((u64) ppd->pkeys[2] << 32) | | |
694 | ((u64) ppd->pkeys[3] << 48); | |
695 | ||
696 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | |
697 | if (!rcd->pkeys[i]) | |
698 | continue; | |
699 | for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) { | |
700 | /* check for match independent of the global bit */ | |
701 | if ((ppd->pkeys[j] & 0x7fff) != | |
702 | (rcd->pkeys[i] & 0x7fff)) | |
703 | continue; | |
704 | if (atomic_dec_and_test(&ppd->pkeyrefs[j])) { | |
705 | ppd->pkeys[j] = 0; | |
706 | pchanged++; | |
707 | } | |
708 | break; | |
709 | } | |
710 | rcd->pkeys[i] = 0; | |
711 | } | |
712 | if (pchanged) | |
713 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | |
714 | } | |
715 | ||
716 | /* common code for the mappings on dma_alloc_coherent mem */ | |
717 | static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, | |
718 | unsigned len, void *kvaddr, u32 write_ok, char *what) | |
719 | { | |
720 | struct qib_devdata *dd = rcd->dd; | |
721 | unsigned long pfn; | |
722 | int ret; | |
723 | ||
724 | if ((vma->vm_end - vma->vm_start) > len) { | |
725 | qib_devinfo(dd->pcidev, | |
726 | "FAIL on %s: len %lx > %x\n", what, | |
727 | vma->vm_end - vma->vm_start, len); | |
728 | ret = -EFAULT; | |
729 | goto bail; | |
730 | } | |
731 | ||
732 | /* | |
733 | * shared context user code requires rcvhdrq mapped r/w, others | |
734 | * only allowed readonly mapping. | |
735 | */ | |
736 | if (!write_ok) { | |
737 | if (vma->vm_flags & VM_WRITE) { | |
738 | qib_devinfo(dd->pcidev, | |
739 | "%s must be mapped readonly\n", what); | |
740 | ret = -EPERM; | |
741 | goto bail; | |
742 | } | |
743 | ||
744 | /* don't allow them to later change with mprotect */ | |
745 | vma->vm_flags &= ~VM_MAYWRITE; | |
746 | } | |
747 | ||
748 | pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; | |
749 | ret = remap_pfn_range(vma, vma->vm_start, pfn, | |
750 | len, vma->vm_page_prot); | |
751 | if (ret) | |
752 | qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " | |
753 | "bytes failed: %d\n", what, rcd->ctxt, | |
754 | pfn, len, ret); | |
755 | bail: | |
756 | return ret; | |
757 | } | |
758 | ||
759 | static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, | |
760 | u64 ureg) | |
761 | { | |
762 | unsigned long phys; | |
763 | unsigned long sz; | |
764 | int ret; | |
765 | ||
766 | /* | |
767 | * This is real hardware, so use io_remap. This is the mechanism | |
768 | * for the user process to update the head registers for their ctxt | |
769 | * in the chip. | |
770 | */ | |
771 | sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; | |
772 | if ((vma->vm_end - vma->vm_start) > sz) { | |
773 | qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " | |
774 | "%lx > PAGE\n", vma->vm_end - vma->vm_start); | |
775 | ret = -EFAULT; | |
776 | } else { | |
777 | phys = dd->physaddr + ureg; | |
778 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
779 | ||
780 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
781 | ret = io_remap_pfn_range(vma, vma->vm_start, | |
782 | phys >> PAGE_SHIFT, | |
783 | vma->vm_end - vma->vm_start, | |
784 | vma->vm_page_prot); | |
785 | } | |
786 | return ret; | |
787 | } | |
788 | ||
789 | static int mmap_piobufs(struct vm_area_struct *vma, | |
790 | struct qib_devdata *dd, | |
791 | struct qib_ctxtdata *rcd, | |
792 | unsigned piobufs, unsigned piocnt) | |
793 | { | |
794 | unsigned long phys; | |
795 | int ret; | |
796 | ||
797 | /* | |
798 | * When we map the PIO buffers in the chip, we want to map them as | |
799 | * writeonly, no read possible; unfortunately, x86 doesn't allow | |
800 | * for this in hardware, but we still prevent users from asking | |
801 | * for it. | |
802 | */ | |
803 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { | |
804 | qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " | |
805 | "reqlen %lx > PAGE\n", | |
806 | vma->vm_end - vma->vm_start); | |
807 | ret = -EINVAL; | |
808 | goto bail; | |
809 | } | |
810 | ||
811 | phys = dd->physaddr + piobufs; | |
812 | ||
813 | #if defined(__powerpc__) | |
814 | /* There isn't a generic way to specify writethrough mappings */ | |
815 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | |
816 | pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; | |
817 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; | |
818 | #endif | |
819 | ||
820 | /* | |
821 | * don't allow them to later change to readable with mprotect (for when | |
822 | * not initially mapped readable, as is normally the case) | |
823 | */ | |
824 | vma->vm_flags &= ~VM_MAYREAD; | |
825 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
826 | ||
827 | if (qib_wc_pat) | |
828 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
829 | ||
830 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | |
831 | vma->vm_end - vma->vm_start, | |
832 | vma->vm_page_prot); | |
833 | bail: | |
834 | return ret; | |
835 | } | |
836 | ||
837 | static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |
838 | struct qib_ctxtdata *rcd) | |
839 | { | |
840 | struct qib_devdata *dd = rcd->dd; | |
841 | unsigned long start, size; | |
842 | size_t total_size, i; | |
843 | unsigned long pfn; | |
844 | int ret; | |
845 | ||
846 | size = rcd->rcvegrbuf_size; | |
847 | total_size = rcd->rcvegrbuf_chunks * size; | |
848 | if ((vma->vm_end - vma->vm_start) > total_size) { | |
849 | qib_devinfo(dd->pcidev, "FAIL on egr bufs: " | |
850 | "reqlen %lx > actual %lx\n", | |
851 | vma->vm_end - vma->vm_start, | |
852 | (unsigned long) total_size); | |
853 | ret = -EINVAL; | |
854 | goto bail; | |
855 | } | |
856 | ||
857 | if (vma->vm_flags & VM_WRITE) { | |
858 | qib_devinfo(dd->pcidev, "Can't map eager buffers as " | |
859 | "writable (flags=%lx)\n", vma->vm_flags); | |
860 | ret = -EPERM; | |
861 | goto bail; | |
862 | } | |
863 | /* don't allow them to later change to writeable with mprotect */ | |
864 | vma->vm_flags &= ~VM_MAYWRITE; | |
865 | ||
866 | start = vma->vm_start; | |
867 | ||
868 | for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { | |
869 | pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT; | |
870 | ret = remap_pfn_range(vma, start, pfn, size, | |
871 | vma->vm_page_prot); | |
872 | if (ret < 0) | |
873 | goto bail; | |
874 | } | |
875 | ret = 0; | |
876 | ||
877 | bail: | |
878 | return ret; | |
879 | } | |
880 | ||
881 | /* | |
882 | * qib_file_vma_fault - handle a VMA page fault. | |
883 | */ | |
884 | static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
885 | { | |
886 | struct page *page; | |
887 | ||
888 | page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); | |
889 | if (!page) | |
890 | return VM_FAULT_SIGBUS; | |
891 | ||
892 | get_page(page); | |
893 | vmf->page = page; | |
894 | ||
895 | return 0; | |
896 | } | |
897 | ||
898 | static struct vm_operations_struct qib_file_vm_ops = { | |
899 | .fault = qib_file_vma_fault, | |
900 | }; | |
901 | ||
902 | static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | |
903 | struct qib_ctxtdata *rcd, unsigned subctxt) | |
904 | { | |
905 | struct qib_devdata *dd = rcd->dd; | |
906 | unsigned subctxt_cnt; | |
907 | unsigned long len; | |
908 | void *addr; | |
909 | size_t size; | |
910 | int ret = 0; | |
911 | ||
912 | subctxt_cnt = rcd->subctxt_cnt; | |
913 | size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; | |
914 | ||
915 | /* | |
916 | * Each process has all the subctxt uregbase, rcvhdrq, and | |
917 | * rcvegrbufs mmapped - as an array for all the processes, | |
918 | * and also separately for this process. | |
919 | */ | |
920 | if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) { | |
921 | addr = rcd->subctxt_uregbase; | |
922 | size = PAGE_SIZE * subctxt_cnt; | |
923 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) { | |
924 | addr = rcd->subctxt_rcvhdr_base; | |
925 | size = rcd->rcvhdrq_size * subctxt_cnt; | |
926 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) { | |
927 | addr = rcd->subctxt_rcvegrbuf; | |
928 | size *= subctxt_cnt; | |
929 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase + | |
930 | PAGE_SIZE * subctxt)) { | |
931 | addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt; | |
932 | size = PAGE_SIZE; | |
933 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base + | |
934 | rcd->rcvhdrq_size * subctxt)) { | |
935 | addr = rcd->subctxt_rcvhdr_base + | |
936 | rcd->rcvhdrq_size * subctxt; | |
937 | size = rcd->rcvhdrq_size; | |
938 | } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) { | |
939 | addr = rcd->user_event_mask; | |
940 | size = PAGE_SIZE; | |
941 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf + | |
942 | size * subctxt)) { | |
943 | addr = rcd->subctxt_rcvegrbuf + size * subctxt; | |
944 | /* rcvegrbufs are read-only on the slave */ | |
945 | if (vma->vm_flags & VM_WRITE) { | |
946 | qib_devinfo(dd->pcidev, | |
947 | "Can't map eager buffers as " | |
948 | "writable (flags=%lx)\n", vma->vm_flags); | |
949 | ret = -EPERM; | |
950 | goto bail; | |
951 | } | |
952 | /* | |
953 | * Don't allow permission to later change to writeable | |
954 | * with mprotect. | |
955 | */ | |
956 | vma->vm_flags &= ~VM_MAYWRITE; | |
957 | } else | |
958 | goto bail; | |
959 | len = vma->vm_end - vma->vm_start; | |
960 | if (len > size) { | |
961 | ret = -EINVAL; | |
962 | goto bail; | |
963 | } | |
964 | ||
965 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; | |
966 | vma->vm_ops = &qib_file_vm_ops; | |
967 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; | |
968 | ret = 1; | |
969 | ||
970 | bail: | |
971 | return ret; | |
972 | } | |
973 | ||
974 | /** | |
975 | * qib_mmapf - mmap various structures into user space | |
976 | * @fp: the file pointer | |
977 | * @vma: the VM area | |
978 | * | |
979 | * We use this to have a shared buffer between the kernel and the user code | |
980 | * for the rcvhdr queue, egr buffers, and the per-context user regs and pio | |
981 | * buffers in the chip. We have the open and close entries so we can bump | |
982 | * the ref count and keep the driver from being unloaded while still mapped. | |
983 | */ | |
984 | static int qib_mmapf(struct file *fp, struct vm_area_struct *vma) | |
985 | { | |
986 | struct qib_ctxtdata *rcd; | |
987 | struct qib_devdata *dd; | |
988 | u64 pgaddr, ureg; | |
989 | unsigned piobufs, piocnt; | |
990 | int ret, match = 1; | |
991 | ||
992 | rcd = ctxt_fp(fp); | |
993 | if (!rcd || !(vma->vm_flags & VM_SHARED)) { | |
994 | ret = -EINVAL; | |
995 | goto bail; | |
996 | } | |
997 | dd = rcd->dd; | |
998 | ||
999 | /* | |
1000 | * This is the qib_do_user_init() code, mapping the shared buffers | |
1001 | * and per-context user registers into the user process. The address | |
1002 | * referred to by vm_pgoff is the file offset passed via mmap(). | |
1003 | * For shared contexts, this is the kernel vmalloc() address of the | |
1004 | * pages to share with the master. | |
1005 | * For non-shared or master ctxts, this is a physical address. | |
1006 | * We only do one mmap for each space mapped. | |
1007 | */ | |
1008 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; | |
1009 | ||
1010 | /* | |
1011 | * Check for 0 in case one of the allocations failed, but user | |
1012 | * called mmap anyway. | |
1013 | */ | |
1014 | if (!pgaddr) { | |
1015 | ret = -EINVAL; | |
1016 | goto bail; | |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * Physical addresses must fit in 40 bits for our hardware. | |
1021 | * Check for kernel virtual addresses first, anything else must | |
1022 | * match a HW or memory address. | |
1023 | */ | |
1024 | ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp)); | |
1025 | if (ret) { | |
1026 | if (ret > 0) | |
1027 | ret = 0; | |
1028 | goto bail; | |
1029 | } | |
1030 | ||
1031 | ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; | |
1032 | if (!rcd->subctxt_cnt) { | |
1033 | /* ctxt is not shared */ | |
1034 | piocnt = rcd->piocnt; | |
1035 | piobufs = rcd->piobufs; | |
1036 | } else if (!subctxt_fp(fp)) { | |
1037 | /* caller is the master */ | |
1038 | piocnt = (rcd->piocnt / rcd->subctxt_cnt) + | |
1039 | (rcd->piocnt % rcd->subctxt_cnt); | |
1040 | piobufs = rcd->piobufs + | |
1041 | dd->palign * (rcd->piocnt - piocnt); | |
1042 | } else { | |
1043 | unsigned slave = subctxt_fp(fp) - 1; | |
1044 | ||
1045 | /* caller is a slave */ | |
1046 | piocnt = rcd->piocnt / rcd->subctxt_cnt; | |
1047 | piobufs = rcd->piobufs + dd->palign * piocnt * slave; | |
1048 | } | |
1049 | ||
1050 | if (pgaddr == ureg) | |
1051 | ret = mmap_ureg(vma, dd, ureg); | |
1052 | else if (pgaddr == piobufs) | |
1053 | ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt); | |
1054 | else if (pgaddr == dd->pioavailregs_phys) | |
1055 | /* in-memory copy of pioavail registers */ | |
1056 | ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, | |
1057 | (void *) dd->pioavailregs_dma, 0, | |
1058 | "pioavail registers"); | |
1059 | else if (pgaddr == rcd->rcvegr_phys) | |
1060 | ret = mmap_rcvegrbufs(vma, rcd); | |
1061 | else if (pgaddr == (u64) rcd->rcvhdrq_phys) | |
1062 | /* | |
1063 | * The rcvhdrq itself; multiple pages, contiguous | |
1064 | * from an i/o perspective. Shared contexts need | |
1065 | * to map r/w, so we allow writing. | |
1066 | */ | |
1067 | ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size, | |
1068 | rcd->rcvhdrq, 1, "rcvhdrq"); | |
1069 | else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys) | |
1070 | /* in-memory copy of rcvhdrq tail register */ | |
1071 | ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, | |
1072 | rcd->rcvhdrtail_kvaddr, 0, | |
1073 | "rcvhdrq tail"); | |
1074 | else | |
1075 | match = 0; | |
1076 | if (!match) | |
1077 | ret = -EINVAL; | |
1078 | ||
1079 | vma->vm_private_data = NULL; | |
1080 | ||
1081 | if (ret < 0) | |
1082 | qib_devinfo(dd->pcidev, | |
1083 | "mmap Failure %d: off %llx len %lx\n", | |
1084 | -ret, (unsigned long long)pgaddr, | |
1085 | vma->vm_end - vma->vm_start); | |
1086 | bail: | |
1087 | return ret; | |
1088 | } | |
1089 | ||
1090 | static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd, | |
1091 | struct file *fp, | |
1092 | struct poll_table_struct *pt) | |
1093 | { | |
1094 | struct qib_devdata *dd = rcd->dd; | |
1095 | unsigned pollflag; | |
1096 | ||
1097 | poll_wait(fp, &rcd->wait, pt); | |
1098 | ||
1099 | spin_lock_irq(&dd->uctxt_lock); | |
1100 | if (rcd->urgent != rcd->urgent_poll) { | |
1101 | pollflag = POLLIN | POLLRDNORM; | |
1102 | rcd->urgent_poll = rcd->urgent; | |
1103 | } else { | |
1104 | pollflag = 0; | |
1105 | set_bit(QIB_CTXT_WAITING_URG, &rcd->flag); | |
1106 | } | |
1107 | spin_unlock_irq(&dd->uctxt_lock); | |
1108 | ||
1109 | return pollflag; | |
1110 | } | |
1111 | ||
1112 | static unsigned int qib_poll_next(struct qib_ctxtdata *rcd, | |
1113 | struct file *fp, | |
1114 | struct poll_table_struct *pt) | |
1115 | { | |
1116 | struct qib_devdata *dd = rcd->dd; | |
1117 | unsigned pollflag; | |
1118 | ||
1119 | poll_wait(fp, &rcd->wait, pt); | |
1120 | ||
1121 | spin_lock_irq(&dd->uctxt_lock); | |
1122 | if (dd->f_hdrqempty(rcd)) { | |
1123 | set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag); | |
1124 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); | |
1125 | pollflag = 0; | |
1126 | } else | |
1127 | pollflag = POLLIN | POLLRDNORM; | |
1128 | spin_unlock_irq(&dd->uctxt_lock); | |
1129 | ||
1130 | return pollflag; | |
1131 | } | |
1132 | ||
1133 | static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) | |
1134 | { | |
1135 | struct qib_ctxtdata *rcd; | |
1136 | unsigned pollflag; | |
1137 | ||
1138 | rcd = ctxt_fp(fp); | |
1139 | if (!rcd) | |
1140 | pollflag = POLLERR; | |
1141 | else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) | |
1142 | pollflag = qib_poll_urgent(rcd, fp, pt); | |
1143 | else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) | |
1144 | pollflag = qib_poll_next(rcd, fp, pt); | |
1145 | else /* invalid */ | |
1146 | pollflag = POLLERR; | |
1147 | ||
1148 | return pollflag; | |
1149 | } | |
1150 | ||
1151 | /* | |
1152 | * Check that userland and driver are compatible for subcontexts. | |
1153 | */ | |
1154 | static int qib_compatible_subctxts(int user_swmajor, int user_swminor) | |
1155 | { | |
1156 | /* this code is written long-hand for clarity */ | |
1157 | if (QIB_USER_SWMAJOR != user_swmajor) { | |
1158 | /* no promise of compatibility if major mismatch */ | |
1159 | return 0; | |
1160 | } | |
1161 | if (QIB_USER_SWMAJOR == 1) { | |
1162 | switch (QIB_USER_SWMINOR) { | |
1163 | case 0: | |
1164 | case 1: | |
1165 | case 2: | |
1166 | /* no subctxt implementation so cannot be compatible */ | |
1167 | return 0; | |
1168 | case 3: | |
1169 | /* 3 is only compatible with itself */ | |
1170 | return user_swminor == 3; | |
1171 | default: | |
1172 | /* >= 4 are compatible (or are expected to be) */ | |
1173 | return user_swminor >= 4; | |
1174 | } | |
1175 | } | |
1176 | /* make no promises yet for future major versions */ | |
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | static int init_subctxts(struct qib_devdata *dd, | |
1181 | struct qib_ctxtdata *rcd, | |
1182 | const struct qib_user_info *uinfo) | |
1183 | { | |
1184 | int ret = 0; | |
1185 | unsigned num_subctxts; | |
1186 | size_t size; | |
1187 | ||
1188 | /* | |
1189 | * If the user is requesting zero subctxts, | |
1190 | * skip the subctxt allocation. | |
1191 | */ | |
1192 | if (uinfo->spu_subctxt_cnt <= 0) | |
1193 | goto bail; | |
1194 | num_subctxts = uinfo->spu_subctxt_cnt; | |
1195 | ||
1196 | /* Check for subctxt compatibility */ | |
1197 | if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, | |
1198 | uinfo->spu_userversion & 0xffff)) { | |
1199 | qib_devinfo(dd->pcidev, | |
1200 | "Mismatched user version (%d.%d) and driver " | |
1201 | "version (%d.%d) while context sharing. Ensure " | |
1202 | "that driver and library are from the same " | |
1203 | "release.\n", | |
1204 | (int) (uinfo->spu_userversion >> 16), | |
1205 | (int) (uinfo->spu_userversion & 0xffff), | |
1206 | QIB_USER_SWMAJOR, QIB_USER_SWMINOR); | |
1207 | goto bail; | |
1208 | } | |
1209 | if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) { | |
1210 | ret = -EINVAL; | |
1211 | goto bail; | |
1212 | } | |
1213 | ||
1214 | rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts); | |
1215 | if (!rcd->subctxt_uregbase) { | |
1216 | ret = -ENOMEM; | |
1217 | goto bail; | |
1218 | } | |
1219 | /* Note: rcd->rcvhdrq_size isn't initialized yet. */ | |
1220 | size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * | |
1221 | sizeof(u32), PAGE_SIZE) * num_subctxts; | |
1222 | rcd->subctxt_rcvhdr_base = vmalloc_user(size); | |
1223 | if (!rcd->subctxt_rcvhdr_base) { | |
1224 | ret = -ENOMEM; | |
1225 | goto bail_ureg; | |
1226 | } | |
1227 | ||
1228 | rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks * | |
1229 | rcd->rcvegrbuf_size * | |
1230 | num_subctxts); | |
1231 | if (!rcd->subctxt_rcvegrbuf) { | |
1232 | ret = -ENOMEM; | |
1233 | goto bail_rhdr; | |
1234 | } | |
1235 | ||
1236 | rcd->subctxt_cnt = uinfo->spu_subctxt_cnt; | |
1237 | rcd->subctxt_id = uinfo->spu_subctxt_id; | |
1238 | rcd->active_slaves = 1; | |
1239 | rcd->redirect_seq_cnt = 1; | |
1240 | set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); | |
1241 | goto bail; | |
1242 | ||
1243 | bail_rhdr: | |
1244 | vfree(rcd->subctxt_rcvhdr_base); | |
1245 | bail_ureg: | |
1246 | vfree(rcd->subctxt_uregbase); | |
1247 | rcd->subctxt_uregbase = NULL; | |
1248 | bail: | |
1249 | return ret; | |
1250 | } | |
1251 | ||
1252 | static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | |
1253 | struct file *fp, const struct qib_user_info *uinfo) | |
1254 | { | |
1255 | struct qib_devdata *dd = ppd->dd; | |
1256 | struct qib_ctxtdata *rcd; | |
1257 | void *ptmp = NULL; | |
1258 | int ret; | |
1259 | ||
1260 | rcd = qib_create_ctxtdata(ppd, ctxt); | |
1261 | ||
1262 | /* | |
1263 | * Allocate memory for use in qib_tid_update() at open to | |
1264 | * reduce cost of expected send setup per message segment | |
1265 | */ | |
1266 | if (rcd) | |
1267 | ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + | |
1268 | dd->rcvtidcnt * sizeof(struct page **), | |
1269 | GFP_KERNEL); | |
1270 | ||
1271 | if (!rcd || !ptmp) { | |
1272 | qib_dev_err(dd, "Unable to allocate ctxtdata " | |
1273 | "memory, failing open\n"); | |
1274 | ret = -ENOMEM; | |
1275 | goto bailerr; | |
1276 | } | |
1277 | rcd->userversion = uinfo->spu_userversion; | |
1278 | ret = init_subctxts(dd, rcd, uinfo); | |
1279 | if (ret) | |
1280 | goto bailerr; | |
1281 | rcd->tid_pg_list = ptmp; | |
1282 | rcd->pid = current->pid; | |
1283 | init_waitqueue_head(&dd->rcd[ctxt]->wait); | |
1284 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); | |
1285 | ctxt_fp(fp) = rcd; | |
1286 | qib_stats.sps_ctxts++; | |
1287 | ret = 0; | |
1288 | goto bail; | |
1289 | ||
1290 | bailerr: | |
1291 | dd->rcd[ctxt] = NULL; | |
1292 | kfree(rcd); | |
1293 | kfree(ptmp); | |
1294 | bail: | |
1295 | return ret; | |
1296 | } | |
1297 | ||
bdf8edcb | 1298 | static inline int usable(struct qib_pportdata *ppd) |
f931551b RC |
1299 | { |
1300 | struct qib_devdata *dd = ppd->dd; | |
f931551b RC |
1301 | |
1302 | return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && | |
bdf8edcb | 1303 | (ppd->lflags & QIBL_LINKACTIVE); |
f931551b RC |
1304 | } |
1305 | ||
bdf8edcb DO |
1306 | /* |
1307 | * Select a context on the given device, either using a requested port | |
1308 | * or the port based on the context number. | |
1309 | */ | |
1310 | static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, | |
1311 | const struct qib_user_info *uinfo) | |
f931551b | 1312 | { |
f931551b | 1313 | struct qib_pportdata *ppd = NULL; |
bdf8edcb | 1314 | int ret, ctxt; |
f931551b | 1315 | |
bdf8edcb DO |
1316 | if (port) { |
1317 | if (!usable(dd->pport + port - 1)) { | |
f931551b | 1318 | ret = -ENETDOWN; |
bdf8edcb DO |
1319 | goto done; |
1320 | } else | |
1321 | ppd = dd->pport + port - 1; | |
f931551b | 1322 | } |
bdf8edcb DO |
1323 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; |
1324 | ctxt++) | |
1325 | ; | |
1326 | if (ctxt == dd->cfgctxts) { | |
1327 | ret = -EBUSY; | |
1328 | goto done; | |
1329 | } | |
1330 | if (!ppd) { | |
1331 | u32 pidx = ctxt % dd->num_pports; | |
1332 | if (usable(dd->pport + pidx)) | |
1333 | ppd = dd->pport + pidx; | |
1334 | else { | |
1335 | for (pidx = 0; pidx < dd->num_pports && !ppd; | |
1336 | pidx++) | |
1337 | if (usable(dd->pport + pidx)) | |
1338 | ppd = dd->pport + pidx; | |
f931551b | 1339 | } |
f931551b | 1340 | } |
bdf8edcb DO |
1341 | ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; |
1342 | done: | |
1343 | return ret; | |
1344 | } | |
1345 | ||
1346 | static int find_free_ctxt(int unit, struct file *fp, | |
1347 | const struct qib_user_info *uinfo) | |
1348 | { | |
1349 | struct qib_devdata *dd = qib_lookup(unit); | |
1350 | int ret; | |
1351 | ||
1352 | if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) | |
1353 | ret = -ENODEV; | |
1354 | else | |
1355 | ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); | |
f931551b | 1356 | |
f931551b RC |
1357 | return ret; |
1358 | } | |
1359 | ||
bdf8edcb DO |
1360 | static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, |
1361 | unsigned alg) | |
f931551b | 1362 | { |
bdf8edcb DO |
1363 | struct qib_devdata *udd = NULL; |
1364 | int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i; | |
f931551b RC |
1365 | u32 port = uinfo->spu_port, ctxt; |
1366 | ||
1367 | devmax = qib_count_units(&npresent, &nup); | |
bdf8edcb DO |
1368 | if (!npresent) { |
1369 | ret = -ENXIO; | |
1370 | goto done; | |
1371 | } | |
1372 | if (nup == 0) { | |
1373 | ret = -ENETDOWN; | |
1374 | goto done; | |
1375 | } | |
f931551b | 1376 | |
bdf8edcb DO |
1377 | if (alg == QIB_PORT_ALG_ACROSS) { |
1378 | unsigned inuse = ~0U; | |
1379 | /* find device (with ACTIVE ports) with fewest ctxts in use */ | |
1380 | for (ndev = 0; ndev < devmax; ndev++) { | |
1381 | struct qib_devdata *dd = qib_lookup(ndev); | |
6676b3f7 | 1382 | unsigned cused = 0, cfree = 0, pusable = 0; |
bdf8edcb | 1383 | if (!dd) |
f931551b | 1384 | continue; |
bdf8edcb DO |
1385 | if (port && port <= dd->num_pports && |
1386 | usable(dd->pport + port - 1)) | |
6676b3f7 | 1387 | pusable = 1; |
bdf8edcb DO |
1388 | else |
1389 | for (i = 0; i < dd->num_pports; i++) | |
1390 | if (usable(dd->pport + i)) | |
6676b3f7 MM |
1391 | pusable++; |
1392 | if (!pusable) | |
bdf8edcb DO |
1393 | continue; |
1394 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; | |
1395 | ctxt++) | |
1396 | if (dd->rcd[ctxt]) | |
1397 | cused++; | |
1398 | else | |
1399 | cfree++; | |
6676b3f7 | 1400 | if (pusable && cfree && cused < inuse) { |
bdf8edcb DO |
1401 | udd = dd; |
1402 | inuse = cused; | |
f931551b | 1403 | } |
bdf8edcb DO |
1404 | } |
1405 | if (udd) { | |
1406 | ret = choose_port_ctxt(fp, udd, port, uinfo); | |
f931551b RC |
1407 | goto done; |
1408 | } | |
bdf8edcb DO |
1409 | } else { |
1410 | for (ndev = 0; ndev < devmax; ndev++) { | |
1411 | struct qib_devdata *dd = qib_lookup(ndev); | |
1412 | if (dd) { | |
1413 | ret = choose_port_ctxt(fp, dd, port, uinfo); | |
1414 | if (!ret) | |
1415 | goto done; | |
1416 | if (ret == -EBUSY) | |
1417 | dusable++; | |
1418 | } | |
1419 | } | |
f931551b | 1420 | } |
bdf8edcb | 1421 | ret = dusable ? -EBUSY : -ENETDOWN; |
f931551b RC |
1422 | |
1423 | done: | |
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | static int find_shared_ctxt(struct file *fp, | |
1428 | const struct qib_user_info *uinfo) | |
1429 | { | |
1430 | int devmax, ndev, i; | |
1431 | int ret = 0; | |
1432 | ||
1433 | devmax = qib_count_units(NULL, NULL); | |
1434 | ||
1435 | for (ndev = 0; ndev < devmax; ndev++) { | |
1436 | struct qib_devdata *dd = qib_lookup(ndev); | |
1437 | ||
1438 | /* device portion of usable() */ | |
1439 | if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) | |
1440 | continue; | |
1441 | for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { | |
1442 | struct qib_ctxtdata *rcd = dd->rcd[i]; | |
1443 | ||
1444 | /* Skip ctxts which are not yet open */ | |
1445 | if (!rcd || !rcd->cnt) | |
1446 | continue; | |
1447 | /* Skip ctxt if it doesn't match the requested one */ | |
1448 | if (rcd->subctxt_id != uinfo->spu_subctxt_id) | |
1449 | continue; | |
1450 | /* Verify the sharing process matches the master */ | |
1451 | if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt || | |
1452 | rcd->userversion != uinfo->spu_userversion || | |
1453 | rcd->cnt >= rcd->subctxt_cnt) { | |
1454 | ret = -EINVAL; | |
1455 | goto done; | |
1456 | } | |
1457 | ctxt_fp(fp) = rcd; | |
1458 | subctxt_fp(fp) = rcd->cnt++; | |
1459 | rcd->subpid[subctxt_fp(fp)] = current->pid; | |
1460 | tidcursor_fp(fp) = 0; | |
1461 | rcd->active_slaves |= 1 << subctxt_fp(fp); | |
1462 | ret = 1; | |
1463 | goto done; | |
1464 | } | |
1465 | } | |
1466 | ||
1467 | done: | |
1468 | return ret; | |
1469 | } | |
1470 | ||
1471 | static int qib_open(struct inode *in, struct file *fp) | |
1472 | { | |
1473 | /* The real work is performed later in qib_assign_ctxt() */ | |
1474 | fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL); | |
1475 | if (fp->private_data) /* no cpu affinity by default */ | |
1476 | ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1; | |
1477 | return fp->private_data ? 0 : -ENOMEM; | |
1478 | } | |
1479 | ||
1480 | /* | |
1481 | * Get ctxt early, so can set affinity prior to memory allocation. | |
1482 | */ | |
1483 | static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | |
1484 | { | |
1485 | int ret; | |
1486 | int i_minor; | |
bdf8edcb | 1487 | unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS; |
f931551b RC |
1488 | |
1489 | /* Check to be sure we haven't already initialized this file */ | |
1490 | if (ctxt_fp(fp)) { | |
1491 | ret = -EINVAL; | |
1492 | goto done; | |
1493 | } | |
1494 | ||
1495 | /* for now, if major version is different, bail */ | |
1496 | swmajor = uinfo->spu_userversion >> 16; | |
1497 | if (swmajor != QIB_USER_SWMAJOR) { | |
1498 | ret = -ENODEV; | |
1499 | goto done; | |
1500 | } | |
1501 | ||
1502 | swminor = uinfo->spu_userversion & 0xffff; | |
1503 | ||
bdf8edcb DO |
1504 | if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) |
1505 | alg = uinfo->spu_port_alg; | |
1506 | ||
f931551b RC |
1507 | mutex_lock(&qib_mutex); |
1508 | ||
1509 | if (qib_compatible_subctxts(swmajor, swminor) && | |
1510 | uinfo->spu_subctxt_cnt) { | |
1511 | ret = find_shared_ctxt(fp, uinfo); | |
1512 | if (ret) { | |
1513 | if (ret > 0) | |
1514 | ret = 0; | |
1515 | goto done_chk_sdma; | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE; | |
1520 | if (i_minor) | |
1521 | ret = find_free_ctxt(i_minor - 1, fp, uinfo); | |
1522 | else | |
bdf8edcb | 1523 | ret = get_a_ctxt(fp, uinfo, alg); |
f931551b RC |
1524 | |
1525 | done_chk_sdma: | |
1526 | if (!ret) { | |
1527 | struct qib_filedata *fd = fp->private_data; | |
1528 | const struct qib_ctxtdata *rcd = fd->rcd; | |
1529 | const struct qib_devdata *dd = rcd->dd; | |
1530 | ||
1531 | if (dd->flags & QIB_HAS_SEND_DMA) { | |
1532 | fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, | |
1533 | dd->unit, | |
1534 | rcd->ctxt, | |
1535 | fd->subctxt); | |
1536 | if (!fd->pq) | |
1537 | ret = -ENOMEM; | |
1538 | } | |
1539 | ||
1540 | /* | |
1541 | * If process has NOT already set it's affinity, select and | |
25985edc | 1542 | * reserve a processor for it, as a rendezvous for all |
f931551b RC |
1543 | * users of the driver. If they don't actually later |
1544 | * set affinity to this cpu, or set it to some other cpu, | |
1545 | * it just means that sooner or later we don't recommend | |
1546 | * a cpu, and let the scheduler do it's best. | |
1547 | */ | |
1548 | if (!ret && cpus_weight(current->cpus_allowed) >= | |
1549 | qib_cpulist_count) { | |
1550 | int cpu; | |
1551 | cpu = find_first_zero_bit(qib_cpulist, | |
1552 | qib_cpulist_count); | |
1553 | if (cpu != qib_cpulist_count) { | |
1554 | __set_bit(cpu, qib_cpulist); | |
1555 | fd->rec_cpu_num = cpu; | |
1556 | } | |
1557 | } else if (cpus_weight(current->cpus_allowed) == 1 && | |
1558 | test_bit(first_cpu(current->cpus_allowed), | |
1559 | qib_cpulist)) | |
1560 | qib_devinfo(dd->pcidev, "%s PID %u affinity " | |
1561 | "set to cpu %d; already allocated\n", | |
1562 | current->comm, current->pid, | |
1563 | first_cpu(current->cpus_allowed)); | |
1564 | } | |
1565 | ||
1566 | mutex_unlock(&qib_mutex); | |
1567 | ||
1568 | done: | |
1569 | return ret; | |
1570 | } | |
1571 | ||
1572 | ||
1573 | static int qib_do_user_init(struct file *fp, | |
1574 | const struct qib_user_info *uinfo) | |
1575 | { | |
1576 | int ret; | |
1577 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | |
1578 | struct qib_devdata *dd; | |
1579 | unsigned uctxt; | |
1580 | ||
1581 | /* Subctxts don't need to initialize anything since master did it. */ | |
1582 | if (subctxt_fp(fp)) { | |
1583 | ret = wait_event_interruptible(rcd->wait, | |
1584 | !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag)); | |
1585 | goto bail; | |
1586 | } | |
1587 | ||
1588 | dd = rcd->dd; | |
1589 | ||
1590 | /* some ctxts may get extra buffers, calculate that here */ | |
1591 | uctxt = rcd->ctxt - dd->first_user_ctxt; | |
1592 | if (uctxt < dd->ctxts_extrabuf) { | |
1593 | rcd->piocnt = dd->pbufsctxt + 1; | |
1594 | rcd->pio_base = rcd->piocnt * uctxt; | |
1595 | } else { | |
1596 | rcd->piocnt = dd->pbufsctxt; | |
1597 | rcd->pio_base = rcd->piocnt * uctxt + | |
1598 | dd->ctxts_extrabuf; | |
1599 | } | |
1600 | ||
1601 | /* | |
1602 | * All user buffers are 2KB buffers. If we ever support | |
1603 | * giving 4KB buffers to user processes, this will need some | |
1604 | * work. Can't use piobufbase directly, because it has | |
1605 | * both 2K and 4K buffer base values. So check and handle. | |
1606 | */ | |
1607 | if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { | |
1608 | if (rcd->pio_base >= dd->piobcnt2k) { | |
1609 | qib_dev_err(dd, | |
1610 | "%u:ctxt%u: no 2KB buffers available\n", | |
1611 | dd->unit, rcd->ctxt); | |
1612 | ret = -ENOBUFS; | |
1613 | goto bail; | |
1614 | } | |
1615 | rcd->piocnt = dd->piobcnt2k - rcd->pio_base; | |
1616 | qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n", | |
1617 | rcd->ctxt, rcd->piocnt); | |
1618 | } | |
1619 | ||
1620 | rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; | |
1621 | qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, | |
1622 | TXCHK_CHG_TYPE_USER, rcd); | |
1623 | /* | |
1624 | * try to ensure that processes start up with consistent avail update | |
1625 | * for their own range, at least. If system very quiet, it might | |
1626 | * have the in-memory copy out of date at startup for this range of | |
1627 | * buffers, when a context gets re-used. Do after the chg_pioavail | |
1628 | * and before the rest of setup, so it's "almost certain" the dma | |
1629 | * will have occurred (can't 100% guarantee, but should be many | |
1630 | * decimals of 9s, with this ordering), given how much else happens | |
1631 | * after this. | |
1632 | */ | |
1633 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
1634 | ||
1635 | /* | |
1636 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | |
1637 | * array for time being. If rcd->ctxt > chip-supported, | |
1638 | * we need to do extra stuff here to handle by handling overflow | |
1639 | * through ctxt 0, someday | |
1640 | */ | |
1641 | ret = qib_create_rcvhdrq(dd, rcd); | |
1642 | if (!ret) | |
1643 | ret = qib_setup_eagerbufs(rcd); | |
1644 | if (ret) | |
1645 | goto bail_pio; | |
1646 | ||
1647 | rcd->tidcursor = 0; /* start at beginning after open */ | |
1648 | ||
1649 | /* initialize poll variables... */ | |
1650 | rcd->urgent = 0; | |
1651 | rcd->urgent_poll = 0; | |
1652 | ||
1653 | /* | |
1654 | * Now enable the ctxt for receive. | |
1655 | * For chips that are set to DMA the tail register to memory | |
1656 | * when they change (and when the update bit transitions from | |
1657 | * 0 to 1. So for those chips, we turn it off and then back on. | |
1658 | * This will (very briefly) affect any other open ctxts, but the | |
1659 | * duration is very short, and therefore isn't an issue. We | |
25985edc | 1660 | * explicitly set the in-memory tail copy to 0 beforehand, so we |
f931551b RC |
1661 | * don't have to wait to be sure the DMA update has happened |
1662 | * (chip resets head/tail to 0 on transition to enable). | |
1663 | */ | |
1664 | if (rcd->rcvhdrtail_kvaddr) | |
1665 | qib_clear_rcvhdrtail(rcd); | |
1666 | ||
1667 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, | |
1668 | rcd->ctxt); | |
1669 | ||
1670 | /* Notify any waiting slaves */ | |
1671 | if (rcd->subctxt_cnt) { | |
1672 | clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); | |
1673 | wake_up(&rcd->wait); | |
1674 | } | |
1675 | return 0; | |
1676 | ||
1677 | bail_pio: | |
1678 | qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, | |
1679 | TXCHK_CHG_TYPE_KERN, rcd); | |
1680 | bail: | |
1681 | return ret; | |
1682 | } | |
1683 | ||
1684 | /** | |
1685 | * unlock_exptid - unlock any expected TID entries context still had in use | |
1686 | * @rcd: ctxt | |
1687 | * | |
1688 | * We don't actually update the chip here, because we do a bulk update | |
1689 | * below, using f_clear_tids. | |
1690 | */ | |
1691 | static void unlock_expected_tids(struct qib_ctxtdata *rcd) | |
1692 | { | |
1693 | struct qib_devdata *dd = rcd->dd; | |
1694 | int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; | |
1695 | int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; | |
1696 | ||
1697 | for (i = ctxt_tidbase; i < maxtid; i++) { | |
1698 | struct page *p = dd->pageshadow[i]; | |
1699 | dma_addr_t phys; | |
1700 | ||
1701 | if (!p) | |
1702 | continue; | |
1703 | ||
1704 | phys = dd->physshadow[i]; | |
1705 | dd->physshadow[i] = dd->tidinvalid; | |
1706 | dd->pageshadow[i] = NULL; | |
1707 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | |
1708 | PCI_DMA_FROMDEVICE); | |
1709 | qib_release_user_pages(&p, 1); | |
1710 | cnt++; | |
1711 | } | |
1712 | } | |
1713 | ||
1714 | static int qib_close(struct inode *in, struct file *fp) | |
1715 | { | |
1716 | int ret = 0; | |
1717 | struct qib_filedata *fd; | |
1718 | struct qib_ctxtdata *rcd; | |
1719 | struct qib_devdata *dd; | |
1720 | unsigned long flags; | |
1721 | unsigned ctxt; | |
1722 | pid_t pid; | |
1723 | ||
1724 | mutex_lock(&qib_mutex); | |
1725 | ||
ea3f0e6b | 1726 | fd = fp->private_data; |
f931551b RC |
1727 | fp->private_data = NULL; |
1728 | rcd = fd->rcd; | |
1729 | if (!rcd) { | |
1730 | mutex_unlock(&qib_mutex); | |
1731 | goto bail; | |
1732 | } | |
1733 | ||
1734 | dd = rcd->dd; | |
1735 | ||
1736 | /* ensure all pio buffer writes in progress are flushed */ | |
1737 | qib_flush_wc(); | |
1738 | ||
1739 | /* drain user sdma queue */ | |
1740 | if (fd->pq) { | |
1741 | qib_user_sdma_queue_drain(rcd->ppd, fd->pq); | |
1742 | qib_user_sdma_queue_destroy(fd->pq); | |
1743 | } | |
1744 | ||
1745 | if (fd->rec_cpu_num != -1) | |
1746 | __clear_bit(fd->rec_cpu_num, qib_cpulist); | |
1747 | ||
1748 | if (--rcd->cnt) { | |
1749 | /* | |
1750 | * XXX If the master closes the context before the slave(s), | |
1751 | * revoke the mmap for the eager receive queue so | |
1752 | * the slave(s) don't wait for receive data forever. | |
1753 | */ | |
1754 | rcd->active_slaves &= ~(1 << fd->subctxt); | |
1755 | rcd->subpid[fd->subctxt] = 0; | |
1756 | mutex_unlock(&qib_mutex); | |
1757 | goto bail; | |
1758 | } | |
1759 | ||
1760 | /* early; no interrupt users after this */ | |
1761 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1762 | ctxt = rcd->ctxt; | |
1763 | dd->rcd[ctxt] = NULL; | |
1764 | pid = rcd->pid; | |
1765 | rcd->pid = 0; | |
1766 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1767 | ||
1768 | if (rcd->rcvwait_to || rcd->piowait_to || | |
1769 | rcd->rcvnowait || rcd->pionowait) { | |
1770 | rcd->rcvwait_to = 0; | |
1771 | rcd->piowait_to = 0; | |
1772 | rcd->rcvnowait = 0; | |
1773 | rcd->pionowait = 0; | |
1774 | } | |
1775 | if (rcd->flag) | |
1776 | rcd->flag = 0; | |
1777 | ||
1778 | if (dd->kregbase) { | |
1779 | /* atomically clear receive enable ctxt and intr avail. */ | |
1780 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | | |
1781 | QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); | |
1782 | ||
1783 | /* clean up the pkeys for this ctxt user */ | |
1784 | qib_clean_part_key(rcd, dd); | |
1785 | qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); | |
1786 | qib_chg_pioavailkernel(dd, rcd->pio_base, | |
1787 | rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL); | |
1788 | ||
1789 | dd->f_clear_tids(dd, rcd); | |
1790 | ||
1791 | if (dd->pageshadow) | |
1792 | unlock_expected_tids(rcd); | |
1793 | qib_stats.sps_ctxts--; | |
1794 | } | |
1795 | ||
1796 | mutex_unlock(&qib_mutex); | |
1797 | qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */ | |
1798 | ||
1799 | bail: | |
1800 | kfree(fd); | |
1801 | return ret; | |
1802 | } | |
1803 | ||
1804 | static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) | |
1805 | { | |
1806 | struct qib_ctxt_info info; | |
1807 | int ret; | |
1808 | size_t sz; | |
1809 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | |
1810 | struct qib_filedata *fd; | |
1811 | ||
ea3f0e6b | 1812 | fd = fp->private_data; |
f931551b RC |
1813 | |
1814 | info.num_active = qib_count_active_units(); | |
1815 | info.unit = rcd->dd->unit; | |
1816 | info.port = rcd->ppd->port; | |
1817 | info.ctxt = rcd->ctxt; | |
1818 | info.subctxt = subctxt_fp(fp); | |
1819 | /* Number of user ctxts available for this device. */ | |
1820 | info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; | |
1821 | info.num_subctxts = rcd->subctxt_cnt; | |
1822 | info.rec_cpu = fd->rec_cpu_num; | |
1823 | sz = sizeof(info); | |
1824 | ||
1825 | if (copy_to_user(uinfo, &info, sz)) { | |
1826 | ret = -EFAULT; | |
1827 | goto bail; | |
1828 | } | |
1829 | ret = 0; | |
1830 | ||
1831 | bail: | |
1832 | return ret; | |
1833 | } | |
1834 | ||
1835 | static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq, | |
1836 | u32 __user *inflightp) | |
1837 | { | |
1838 | const u32 val = qib_user_sdma_inflight_counter(pq); | |
1839 | ||
1840 | if (put_user(val, inflightp)) | |
1841 | return -EFAULT; | |
1842 | ||
1843 | return 0; | |
1844 | } | |
1845 | ||
1846 | static int qib_sdma_get_complete(struct qib_pportdata *ppd, | |
1847 | struct qib_user_sdma_queue *pq, | |
1848 | u32 __user *completep) | |
1849 | { | |
1850 | u32 val; | |
1851 | int err; | |
1852 | ||
1853 | if (!pq) | |
1854 | return -EINVAL; | |
1855 | ||
1856 | err = qib_user_sdma_make_progress(ppd, pq); | |
1857 | if (err < 0) | |
1858 | return err; | |
1859 | ||
1860 | val = qib_user_sdma_complete_counter(pq); | |
1861 | if (put_user(val, completep)) | |
1862 | return -EFAULT; | |
1863 | ||
1864 | return 0; | |
1865 | } | |
1866 | ||
1867 | static int disarm_req_delay(struct qib_ctxtdata *rcd) | |
1868 | { | |
1869 | int ret = 0; | |
1870 | ||
bdf8edcb | 1871 | if (!usable(rcd->ppd)) { |
f931551b RC |
1872 | int i; |
1873 | /* | |
1874 | * if link is down, or otherwise not usable, delay | |
1875 | * the caller up to 30 seconds, so we don't thrash | |
1876 | * in trying to get the chip back to ACTIVE, and | |
1877 | * set flag so they make the call again. | |
1878 | */ | |
1879 | if (rcd->user_event_mask) { | |
1880 | /* | |
1881 | * subctxt_cnt is 0 if not shared, so do base | |
1882 | * separately, first, then remaining subctxt, if any | |
1883 | */ | |
1884 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | |
1885 | &rcd->user_event_mask[0]); | |
1886 | for (i = 1; i < rcd->subctxt_cnt; i++) | |
1887 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | |
1888 | &rcd->user_event_mask[i]); | |
1889 | } | |
bdf8edcb | 1890 | for (i = 0; !usable(rcd->ppd) && i < 300; i++) |
f931551b RC |
1891 | msleep(100); |
1892 | ret = -ENETDOWN; | |
1893 | } | |
1894 | return ret; | |
1895 | } | |
1896 | ||
1897 | /* | |
1898 | * Find all user contexts in use, and set the specified bit in their | |
1899 | * event mask. | |
1900 | * See also find_ctxt() for a similar use, that is specific to send buffers. | |
1901 | */ | |
1902 | int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit) | |
1903 | { | |
1904 | struct qib_ctxtdata *rcd; | |
1905 | unsigned ctxt; | |
1906 | int ret = 0; | |
1907 | ||
1908 | spin_lock(&ppd->dd->uctxt_lock); | |
1909 | for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; | |
1910 | ctxt++) { | |
1911 | rcd = ppd->dd->rcd[ctxt]; | |
1912 | if (!rcd) | |
1913 | continue; | |
1914 | if (rcd->user_event_mask) { | |
1915 | int i; | |
1916 | /* | |
1917 | * subctxt_cnt is 0 if not shared, so do base | |
1918 | * separately, first, then remaining subctxt, if any | |
1919 | */ | |
1920 | set_bit(evtbit, &rcd->user_event_mask[0]); | |
1921 | for (i = 1; i < rcd->subctxt_cnt; i++) | |
1922 | set_bit(evtbit, &rcd->user_event_mask[i]); | |
1923 | } | |
1924 | ret = 1; | |
1925 | break; | |
1926 | } | |
1927 | spin_unlock(&ppd->dd->uctxt_lock); | |
1928 | ||
1929 | return ret; | |
1930 | } | |
1931 | ||
1932 | /* | |
1933 | * clear the event notifier events for this context. | |
1934 | * For the DISARM_BUFS case, we also take action (this obsoletes | |
1935 | * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards | |
1936 | * compatibility. | |
1937 | * Other bits don't currently require actions, just atomically clear. | |
1938 | * User process then performs actions appropriate to bit having been | |
1939 | * set, if desired, and checks again in future. | |
1940 | */ | |
1941 | static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt, | |
1942 | unsigned long events) | |
1943 | { | |
1944 | int ret = 0, i; | |
1945 | ||
1946 | for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) { | |
1947 | if (!test_bit(i, &events)) | |
1948 | continue; | |
1949 | if (i == _QIB_EVENT_DISARM_BUFS_BIT) { | |
1950 | (void)qib_disarm_piobufs_ifneeded(rcd); | |
1951 | ret = disarm_req_delay(rcd); | |
1952 | } else | |
1953 | clear_bit(i, &rcd->user_event_mask[subctxt]); | |
1954 | } | |
1955 | return ret; | |
1956 | } | |
1957 | ||
1958 | static ssize_t qib_write(struct file *fp, const char __user *data, | |
1959 | size_t count, loff_t *off) | |
1960 | { | |
1961 | const struct qib_cmd __user *ucmd; | |
1962 | struct qib_ctxtdata *rcd; | |
1963 | const void __user *src; | |
1964 | size_t consumed, copy = 0; | |
1965 | struct qib_cmd cmd; | |
1966 | ssize_t ret = 0; | |
1967 | void *dest; | |
1968 | ||
1969 | if (count < sizeof(cmd.type)) { | |
1970 | ret = -EINVAL; | |
1971 | goto bail; | |
1972 | } | |
1973 | ||
1974 | ucmd = (const struct qib_cmd __user *) data; | |
1975 | ||
1976 | if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { | |
1977 | ret = -EFAULT; | |
1978 | goto bail; | |
1979 | } | |
1980 | ||
1981 | consumed = sizeof(cmd.type); | |
1982 | ||
1983 | switch (cmd.type) { | |
1984 | case QIB_CMD_ASSIGN_CTXT: | |
1985 | case QIB_CMD_USER_INIT: | |
1986 | copy = sizeof(cmd.cmd.user_info); | |
1987 | dest = &cmd.cmd.user_info; | |
1988 | src = &ucmd->cmd.user_info; | |
1989 | break; | |
1990 | ||
1991 | case QIB_CMD_RECV_CTRL: | |
1992 | copy = sizeof(cmd.cmd.recv_ctrl); | |
1993 | dest = &cmd.cmd.recv_ctrl; | |
1994 | src = &ucmd->cmd.recv_ctrl; | |
1995 | break; | |
1996 | ||
1997 | case QIB_CMD_CTXT_INFO: | |
1998 | copy = sizeof(cmd.cmd.ctxt_info); | |
1999 | dest = &cmd.cmd.ctxt_info; | |
2000 | src = &ucmd->cmd.ctxt_info; | |
2001 | break; | |
2002 | ||
2003 | case QIB_CMD_TID_UPDATE: | |
2004 | case QIB_CMD_TID_FREE: | |
2005 | copy = sizeof(cmd.cmd.tid_info); | |
2006 | dest = &cmd.cmd.tid_info; | |
2007 | src = &ucmd->cmd.tid_info; | |
2008 | break; | |
2009 | ||
2010 | case QIB_CMD_SET_PART_KEY: | |
2011 | copy = sizeof(cmd.cmd.part_key); | |
2012 | dest = &cmd.cmd.part_key; | |
2013 | src = &ucmd->cmd.part_key; | |
2014 | break; | |
2015 | ||
2016 | case QIB_CMD_DISARM_BUFS: | |
2017 | case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */ | |
2018 | copy = 0; | |
2019 | src = NULL; | |
2020 | dest = NULL; | |
2021 | break; | |
2022 | ||
2023 | case QIB_CMD_POLL_TYPE: | |
2024 | copy = sizeof(cmd.cmd.poll_type); | |
2025 | dest = &cmd.cmd.poll_type; | |
2026 | src = &ucmd->cmd.poll_type; | |
2027 | break; | |
2028 | ||
2029 | case QIB_CMD_ARMLAUNCH_CTRL: | |
2030 | copy = sizeof(cmd.cmd.armlaunch_ctrl); | |
2031 | dest = &cmd.cmd.armlaunch_ctrl; | |
2032 | src = &ucmd->cmd.armlaunch_ctrl; | |
2033 | break; | |
2034 | ||
2035 | case QIB_CMD_SDMA_INFLIGHT: | |
2036 | copy = sizeof(cmd.cmd.sdma_inflight); | |
2037 | dest = &cmd.cmd.sdma_inflight; | |
2038 | src = &ucmd->cmd.sdma_inflight; | |
2039 | break; | |
2040 | ||
2041 | case QIB_CMD_SDMA_COMPLETE: | |
2042 | copy = sizeof(cmd.cmd.sdma_complete); | |
2043 | dest = &cmd.cmd.sdma_complete; | |
2044 | src = &ucmd->cmd.sdma_complete; | |
2045 | break; | |
2046 | ||
2047 | case QIB_CMD_ACK_EVENT: | |
2048 | copy = sizeof(cmd.cmd.event_mask); | |
2049 | dest = &cmd.cmd.event_mask; | |
2050 | src = &ucmd->cmd.event_mask; | |
2051 | break; | |
2052 | ||
2053 | default: | |
2054 | ret = -EINVAL; | |
2055 | goto bail; | |
2056 | } | |
2057 | ||
2058 | if (copy) { | |
2059 | if ((count - consumed) < copy) { | |
2060 | ret = -EINVAL; | |
2061 | goto bail; | |
2062 | } | |
2063 | if (copy_from_user(dest, src, copy)) { | |
2064 | ret = -EFAULT; | |
2065 | goto bail; | |
2066 | } | |
2067 | consumed += copy; | |
2068 | } | |
2069 | ||
2070 | rcd = ctxt_fp(fp); | |
2071 | if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) { | |
2072 | ret = -EINVAL; | |
2073 | goto bail; | |
2074 | } | |
2075 | ||
2076 | switch (cmd.type) { | |
2077 | case QIB_CMD_ASSIGN_CTXT: | |
2078 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); | |
2079 | if (ret) | |
2080 | goto bail; | |
2081 | break; | |
2082 | ||
2083 | case QIB_CMD_USER_INIT: | |
2084 | ret = qib_do_user_init(fp, &cmd.cmd.user_info); | |
2085 | if (ret) | |
2086 | goto bail; | |
2087 | ret = qib_get_base_info(fp, (void __user *) (unsigned long) | |
2088 | cmd.cmd.user_info.spu_base_info, | |
2089 | cmd.cmd.user_info.spu_base_info_size); | |
2090 | break; | |
2091 | ||
2092 | case QIB_CMD_RECV_CTRL: | |
2093 | ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl); | |
2094 | break; | |
2095 | ||
2096 | case QIB_CMD_CTXT_INFO: | |
2097 | ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *) | |
2098 | (unsigned long) cmd.cmd.ctxt_info); | |
2099 | break; | |
2100 | ||
2101 | case QIB_CMD_TID_UPDATE: | |
2102 | ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info); | |
2103 | break; | |
2104 | ||
2105 | case QIB_CMD_TID_FREE: | |
2106 | ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info); | |
2107 | break; | |
2108 | ||
2109 | case QIB_CMD_SET_PART_KEY: | |
2110 | ret = qib_set_part_key(rcd, cmd.cmd.part_key); | |
2111 | break; | |
2112 | ||
2113 | case QIB_CMD_DISARM_BUFS: | |
2114 | (void)qib_disarm_piobufs_ifneeded(rcd); | |
2115 | ret = disarm_req_delay(rcd); | |
2116 | break; | |
2117 | ||
2118 | case QIB_CMD_PIOAVAILUPD: | |
2119 | qib_force_pio_avail_update(rcd->dd); | |
2120 | break; | |
2121 | ||
2122 | case QIB_CMD_POLL_TYPE: | |
2123 | rcd->poll_type = cmd.cmd.poll_type; | |
2124 | break; | |
2125 | ||
2126 | case QIB_CMD_ARMLAUNCH_CTRL: | |
2127 | rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); | |
2128 | break; | |
2129 | ||
2130 | case QIB_CMD_SDMA_INFLIGHT: | |
2131 | ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp), | |
2132 | (u32 __user *) (unsigned long) | |
2133 | cmd.cmd.sdma_inflight); | |
2134 | break; | |
2135 | ||
2136 | case QIB_CMD_SDMA_COMPLETE: | |
2137 | ret = qib_sdma_get_complete(rcd->ppd, | |
2138 | user_sdma_queue_fp(fp), | |
2139 | (u32 __user *) (unsigned long) | |
2140 | cmd.cmd.sdma_complete); | |
2141 | break; | |
2142 | ||
2143 | case QIB_CMD_ACK_EVENT: | |
2144 | ret = qib_user_event_ack(rcd, subctxt_fp(fp), | |
2145 | cmd.cmd.event_mask); | |
2146 | break; | |
2147 | } | |
2148 | ||
2149 | if (ret >= 0) | |
2150 | ret = consumed; | |
2151 | ||
2152 | bail: | |
2153 | return ret; | |
2154 | } | |
2155 | ||
2156 | static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
2157 | unsigned long dim, loff_t off) | |
2158 | { | |
2159 | struct qib_filedata *fp = iocb->ki_filp->private_data; | |
2160 | struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); | |
2161 | struct qib_user_sdma_queue *pq = fp->pq; | |
2162 | ||
2163 | if (!dim || !pq) | |
2164 | return -EINVAL; | |
2165 | ||
2166 | return qib_user_sdma_writev(rcd, pq, iov, dim); | |
2167 | } | |
2168 | ||
2169 | static struct class *qib_class; | |
2170 | static dev_t qib_dev; | |
2171 | ||
2172 | int qib_cdev_init(int minor, const char *name, | |
2173 | const struct file_operations *fops, | |
2174 | struct cdev **cdevp, struct device **devp) | |
2175 | { | |
2176 | const dev_t dev = MKDEV(MAJOR(qib_dev), minor); | |
2177 | struct cdev *cdev; | |
2178 | struct device *device = NULL; | |
2179 | int ret; | |
2180 | ||
2181 | cdev = cdev_alloc(); | |
2182 | if (!cdev) { | |
2183 | printk(KERN_ERR QIB_DRV_NAME | |
2184 | ": Could not allocate cdev for minor %d, %s\n", | |
2185 | minor, name); | |
2186 | ret = -ENOMEM; | |
2187 | goto done; | |
2188 | } | |
2189 | ||
2190 | cdev->owner = THIS_MODULE; | |
2191 | cdev->ops = fops; | |
2192 | kobject_set_name(&cdev->kobj, name); | |
2193 | ||
2194 | ret = cdev_add(cdev, dev, 1); | |
2195 | if (ret < 0) { | |
2196 | printk(KERN_ERR QIB_DRV_NAME | |
2197 | ": Could not add cdev for minor %d, %s (err %d)\n", | |
2198 | minor, name, -ret); | |
2199 | goto err_cdev; | |
2200 | } | |
2201 | ||
2202 | device = device_create(qib_class, NULL, dev, NULL, name); | |
2203 | if (!IS_ERR(device)) | |
2204 | goto done; | |
2205 | ret = PTR_ERR(device); | |
2206 | device = NULL; | |
2207 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | |
2208 | "device for minor %d, %s (err %d)\n", | |
2209 | minor, name, -ret); | |
2210 | err_cdev: | |
2211 | cdev_del(cdev); | |
2212 | cdev = NULL; | |
2213 | done: | |
2214 | *cdevp = cdev; | |
2215 | *devp = device; | |
2216 | return ret; | |
2217 | } | |
2218 | ||
2219 | void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp) | |
2220 | { | |
2221 | struct device *device = *devp; | |
2222 | ||
2223 | if (device) { | |
2224 | device_unregister(device); | |
2225 | *devp = NULL; | |
2226 | } | |
2227 | ||
2228 | if (*cdevp) { | |
2229 | cdev_del(*cdevp); | |
2230 | *cdevp = NULL; | |
2231 | } | |
2232 | } | |
2233 | ||
2234 | static struct cdev *wildcard_cdev; | |
2235 | static struct device *wildcard_device; | |
2236 | ||
2237 | int __init qib_dev_init(void) | |
2238 | { | |
2239 | int ret; | |
2240 | ||
2241 | ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); | |
2242 | if (ret < 0) { | |
2243 | printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " | |
2244 | "chrdev region (err %d)\n", -ret); | |
2245 | goto done; | |
2246 | } | |
2247 | ||
2248 | qib_class = class_create(THIS_MODULE, "ipath"); | |
2249 | if (IS_ERR(qib_class)) { | |
2250 | ret = PTR_ERR(qib_class); | |
2251 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | |
2252 | "device class (err %d)\n", -ret); | |
2253 | unregister_chrdev_region(qib_dev, QIB_NMINORS); | |
2254 | } | |
2255 | ||
2256 | done: | |
2257 | return ret; | |
2258 | } | |
2259 | ||
2260 | void qib_dev_cleanup(void) | |
2261 | { | |
2262 | if (qib_class) { | |
2263 | class_destroy(qib_class); | |
2264 | qib_class = NULL; | |
2265 | } | |
2266 | ||
2267 | unregister_chrdev_region(qib_dev, QIB_NMINORS); | |
2268 | } | |
2269 | ||
2270 | static atomic_t user_count = ATOMIC_INIT(0); | |
2271 | ||
2272 | static void qib_user_remove(struct qib_devdata *dd) | |
2273 | { | |
2274 | if (atomic_dec_return(&user_count) == 0) | |
2275 | qib_cdev_cleanup(&wildcard_cdev, &wildcard_device); | |
2276 | ||
2277 | qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); | |
2278 | } | |
2279 | ||
2280 | static int qib_user_add(struct qib_devdata *dd) | |
2281 | { | |
2282 | char name[10]; | |
2283 | int ret; | |
2284 | ||
2285 | if (atomic_inc_return(&user_count) == 1) { | |
2286 | ret = qib_cdev_init(0, "ipath", &qib_file_ops, | |
2287 | &wildcard_cdev, &wildcard_device); | |
2288 | if (ret) | |
2289 | goto done; | |
2290 | } | |
2291 | ||
2292 | snprintf(name, sizeof(name), "ipath%d", dd->unit); | |
2293 | ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, | |
2294 | &dd->user_cdev, &dd->user_device); | |
2295 | if (ret) | |
2296 | qib_user_remove(dd); | |
2297 | done: | |
2298 | return ret; | |
2299 | } | |
2300 | ||
2301 | /* | |
2302 | * Create per-unit files in /dev | |
2303 | */ | |
2304 | int qib_device_create(struct qib_devdata *dd) | |
2305 | { | |
2306 | int r, ret; | |
2307 | ||
2308 | r = qib_user_add(dd); | |
2309 | ret = qib_diag_add(dd); | |
2310 | if (r && !ret) | |
2311 | ret = r; | |
2312 | return ret; | |
2313 | } | |
2314 | ||
2315 | /* | |
2316 | * Remove per-unit files in /dev | |
2317 | * void, core kernel returns no errors for this stuff | |
2318 | */ | |
2319 | void qib_device_remove(struct qib_devdata *dd) | |
2320 | { | |
2321 | qib_user_remove(dd); | |
2322 | qib_diag_remove(dd); | |
2323 | } |