]>
Commit | Line | Data |
---|---|---|
7f510b46 | 1 | /* |
e7eacd36 | 2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. |
7f510b46 BS |
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/pci.h> | |
35 | #include <linux/poll.h> | |
36 | #include <linux/cdev.h> | |
37 | #include <linux/swap.h> | |
38 | #include <linux/vmalloc.h> | |
124b4dcb DO |
39 | #include <linux/highmem.h> |
40 | #include <linux/io.h> | |
41 | #include <linux/jiffies.h> | |
f2b9857e | 42 | #include <linux/smp_lock.h> |
7f510b46 BS |
43 | #include <asm/pgtable.h> |
44 | ||
45 | #include "ipath_kernel.h" | |
27b678dd | 46 | #include "ipath_common.h" |
124b4dcb | 47 | #include "ipath_user_sdma.h" |
7f510b46 BS |
48 | |
49 | static int ipath_open(struct inode *, struct file *); | |
50 | static int ipath_close(struct inode *, struct file *); | |
51 | static ssize_t ipath_write(struct file *, const char __user *, size_t, | |
52 | loff_t *); | |
124b4dcb DO |
53 | static ssize_t ipath_writev(struct kiocb *, const struct iovec *, |
54 | unsigned long , loff_t); | |
7f510b46 BS |
55 | static unsigned int ipath_poll(struct file *, struct poll_table_struct *); |
56 | static int ipath_mmap(struct file *, struct vm_area_struct *); | |
57 | ||
2b8693c0 | 58 | static const struct file_operations ipath_file_ops = { |
7f510b46 BS |
59 | .owner = THIS_MODULE, |
60 | .write = ipath_write, | |
124b4dcb | 61 | .aio_write = ipath_writev, |
7f510b46 BS |
62 | .open = ipath_open, |
63 | .release = ipath_close, | |
64 | .poll = ipath_poll, | |
65 | .mmap = ipath_mmap | |
66 | }; | |
67 | ||
0a5a83cf RC |
68 | /* |
69 | * Convert kernel virtual addresses to physical addresses so they don't | |
70 | * potentially conflict with the chip addresses used as mmap offsets. | |
71 | * It doesn't really matter what mmap offset we use as long as we can | |
72 | * interpret it correctly. | |
73 | */ | |
74 | static u64 cvt_kvaddr(void *p) | |
75 | { | |
76 | struct page *page; | |
77 | u64 paddr = 0; | |
78 | ||
79 | page = vmalloc_to_page(p); | |
80 | if (page) | |
81 | paddr = page_to_pfn(page) << PAGE_SHIFT; | |
82 | ||
83 | return paddr; | |
84 | } | |
85 | ||
9929b0fb | 86 | static int ipath_get_base_info(struct file *fp, |
7f510b46 BS |
87 | void __user *ubase, size_t ubase_size) |
88 | { | |
9929b0fb | 89 | struct ipath_portdata *pd = port_fp(fp); |
7f510b46 BS |
90 | int ret = 0; |
91 | struct ipath_base_info *kinfo = NULL; | |
92 | struct ipath_devdata *dd = pd->port_dd; | |
9929b0fb BS |
93 | unsigned subport_cnt; |
94 | int shared, master; | |
95 | size_t sz; | |
96 | ||
97 | subport_cnt = pd->port_subport_cnt; | |
98 | if (!subport_cnt) { | |
99 | shared = 0; | |
100 | master = 0; | |
101 | subport_cnt = 1; | |
102 | } else { | |
103 | shared = 1; | |
104 | master = !subport_fp(fp); | |
105 | } | |
7f510b46 | 106 | |
9929b0fb BS |
107 | sz = sizeof(*kinfo); |
108 | /* If port sharing is not requested, allow the old size structure */ | |
109 | if (!shared) | |
c7e29ff1 | 110 | sz -= 7 * sizeof(u64); |
9929b0fb | 111 | if (ubase_size < sz) { |
7f510b46 | 112 | ipath_cdbg(PROC, |
9929b0fb BS |
113 | "Base size %zu, need %zu (version mismatch?)\n", |
114 | ubase_size, sz); | |
7f510b46 BS |
115 | ret = -EINVAL; |
116 | goto bail; | |
117 | } | |
118 | ||
119 | kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); | |
120 | if (kinfo == NULL) { | |
121 | ret = -ENOMEM; | |
122 | goto bail; | |
123 | } | |
124 | ||
125 | ret = dd->ipath_f_get_base_info(pd, kinfo); | |
126 | if (ret < 0) | |
127 | goto bail; | |
128 | ||
129 | kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; | |
130 | kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; | |
131 | kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; | |
132 | kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; | |
133 | /* | |
134 | * have to mmap whole thing | |
135 | */ | |
136 | kinfo->spi_rcv_egrbuftotlen = | |
137 | pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; | |
138 | kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; | |
139 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / | |
140 | pd->port_rcvegrbuf_chunks; | |
9929b0fb BS |
141 | kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; |
142 | if (master) | |
143 | kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; | |
7f510b46 BS |
144 | /* |
145 | * for this use, may be ipath_cfgports summed over all chips that | |
146 | * are are configured and present | |
147 | */ | |
148 | kinfo->spi_nports = dd->ipath_cfgports; | |
149 | /* unit (chip/board) our port is on */ | |
150 | kinfo->spi_unit = dd->ipath_unit; | |
151 | /* for now, only a single page */ | |
152 | kinfo->spi_tid_maxsize = PAGE_SIZE; | |
153 | ||
154 | /* | |
155 | * Doing this per port, and based on the skip value, etc. This has | |
156 | * to be the actual buffer size, since the protocol code treats it | |
157 | * as an array. | |
158 | * | |
159 | * These have to be set to user addresses in the user code via mmap. | |
160 | * These values are used on return to user code for the mmap target | |
161 | * addresses only. For 32 bit, same 44 bit address problem, so use | |
162 | * the physical address, not virtual. Before 2.6.11, using the | |
163 | * page_address() macro worked, but in 2.6.11, even that returns the | |
164 | * full 64 bit address (upper bits all 1's). So far, using the | |
165 | * physical addresses (or chip offsets, for chip mapping) works, but | |
9929b0fb BS |
166 | * no doubt some future kernel release will change that, and we'll be |
167 | * on to yet another method of dealing with this. | |
7f510b46 BS |
168 | */ |
169 | kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; | |
9929b0fb | 170 | kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; |
7f510b46 BS |
171 | kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; |
172 | kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; | |
173 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + | |
174 | (void *) dd->ipath_statusp - | |
175 | (void *) dd->ipath_pioavailregs_dma; | |
9929b0fb | 176 | if (!shared) { |
e2ab41ca | 177 | kinfo->spi_piocnt = pd->port_piocnt; |
9929b0fb BS |
178 | kinfo->spi_piobufbase = (u64) pd->port_piobufs; |
179 | kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + | |
a18e26ae | 180 | dd->ipath_ureg_align * pd->port_port; |
9929b0fb | 181 | } else if (master) { |
e2ab41ca DO |
182 | kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + |
183 | (pd->port_piocnt % subport_cnt); | |
9929b0fb BS |
184 | /* Master's PIO buffers are after all the slave's */ |
185 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + | |
186 | dd->ipath_palign * | |
e2ab41ca | 187 | (pd->port_piocnt - kinfo->spi_piocnt); |
9929b0fb BS |
188 | } else { |
189 | unsigned slave = subport_fp(fp) - 1; | |
7f510b46 | 190 | |
e2ab41ca | 191 | kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; |
9929b0fb BS |
192 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + |
193 | dd->ipath_palign * kinfo->spi_piocnt * slave; | |
c7e29ff1 | 194 | } |
1d7c2e52 | 195 | |
c7e29ff1 MD |
196 | if (shared) { |
197 | kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + | |
a18e26ae | 198 | dd->ipath_ureg_align * pd->port_port; |
c7e29ff1 MD |
199 | kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; |
200 | kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; | |
201 | kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; | |
202 | ||
0a5a83cf | 203 | kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + |
c7e29ff1 | 204 | PAGE_SIZE * subport_fp(fp)); |
9929b0fb | 205 | |
0a5a83cf | 206 | kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + |
c7e29ff1 | 207 | pd->port_rcvhdrq_size * subport_fp(fp)); |
947d7617 | 208 | kinfo->spi_rcvhdr_tailaddr = 0; |
0a5a83cf | 209 | kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + |
c7e29ff1 MD |
210 | pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * |
211 | subport_fp(fp)); | |
212 | ||
213 | kinfo->spi_subport_uregbase = | |
214 | cvt_kvaddr(pd->subport_uregbase); | |
215 | kinfo->spi_subport_rcvegrbuf = | |
216 | cvt_kvaddr(pd->subport_rcvegrbuf); | |
217 | kinfo->spi_subport_rcvhdr_base = | |
218 | cvt_kvaddr(pd->subport_rcvhdr_base); | |
219 | ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", | |
220 | kinfo->spi_port, kinfo->spi_runtime_flags, | |
221 | (unsigned long long) kinfo->spi_subport_uregbase, | |
222 | (unsigned long long) kinfo->spi_subport_rcvegrbuf, | |
223 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); | |
9929b0fb BS |
224 | } |
225 | ||
1bf7724e DO |
226 | /* |
227 | * All user buffers are 2KB buffers. If we ever support | |
228 | * giving 4KB buffers to user processes, this will need some | |
229 | * work. | |
230 | */ | |
231 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - | |
232 | (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; | |
7f510b46 BS |
233 | kinfo->spi_pioalign = dd->ipath_palign; |
234 | ||
235 | kinfo->spi_qpair = IPATH_KD_QP; | |
826d8010 DO |
236 | /* |
237 | * user mode PIO buffers are always 2KB, even when 4KB can | |
238 | * be received, and sent via the kernel; this is ibmaxlen | |
239 | * for 2K MTU. | |
240 | */ | |
241 | kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); | |
7f510b46 BS |
242 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ |
243 | kinfo->spi_port = pd->port_port; | |
9929b0fb | 244 | kinfo->spi_subport = subport_fp(fp); |
eaf6733b | 245 | kinfo->spi_sw_version = IPATH_KERN_SWVERSION; |
7f510b46 BS |
246 | kinfo->spi_hw_version = dd->ipath_revision; |
247 | ||
9929b0fb BS |
248 | if (master) { |
249 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; | |
9929b0fb BS |
250 | } |
251 | ||
c7e29ff1 MD |
252 | sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); |
253 | if (copy_to_user(ubase, kinfo, sz)) | |
7f510b46 BS |
254 | ret = -EFAULT; |
255 | ||
256 | bail: | |
257 | kfree(kinfo); | |
258 | return ret; | |
259 | } | |
260 | ||
261 | /** | |
262 | * ipath_tid_update - update a port TID | |
263 | * @pd: the port | |
9929b0fb | 264 | * @fp: the ipath device file |
7f510b46 BS |
265 | * @ti: the TID information |
266 | * | |
267 | * The new implementation as of Oct 2004 is that the driver assigns | |
268 | * the tid and returns it to the caller. To make it easier to | |
269 | * catch bugs, and to reduce search time, we keep a cursor for | |
270 | * each port, walking the shadow tid array to find one that's not | |
271 | * in use. | |
272 | * | |
273 | * For now, if we can't allocate the full list, we fail, although | |
274 | * in the long run, we'll allocate as many as we can, and the | |
275 | * caller will deal with that by trying the remaining pages later. | |
276 | * That means that when we fail, we have to mark the tids as not in | |
277 | * use again, in our shadow copy. | |
278 | * | |
279 | * It's up to the caller to free the tids when they are done. | |
280 | * We'll unlock the pages as they free them. | |
281 | * | |
282 | * Also, right now we are locking one page at a time, but since | |
283 | * the intended use of this routine is for a single group of | |
284 | * virtually contiguous pages, that should change to improve | |
285 | * performance. | |
286 | */ | |
9929b0fb | 287 | static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, |
7f510b46 BS |
288 | const struct ipath_tid_info *ti) |
289 | { | |
290 | int ret = 0, ntids; | |
9929b0fb | 291 | u32 tid, porttid, cnt, i, tidcnt, tidoff; |
7f510b46 BS |
292 | u16 *tidlist; |
293 | struct ipath_devdata *dd = pd->port_dd; | |
294 | u64 physaddr; | |
295 | unsigned long vaddr; | |
296 | u64 __iomem *tidbase; | |
297 | unsigned long tidmap[8]; | |
298 | struct page **pagep = NULL; | |
9929b0fb | 299 | unsigned subport = subport_fp(fp); |
7f510b46 BS |
300 | |
301 | if (!dd->ipath_pageshadow) { | |
302 | ret = -ENOMEM; | |
303 | goto done; | |
304 | } | |
305 | ||
306 | cnt = ti->tidcnt; | |
307 | if (!cnt) { | |
308 | ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", | |
309 | (unsigned long long) ti->tidlist); | |
310 | /* | |
311 | * Should we treat as success? likely a bug | |
312 | */ | |
313 | ret = -EFAULT; | |
314 | goto done; | |
315 | } | |
9929b0fb BS |
316 | porttid = pd->port_port * dd->ipath_rcvtidcnt; |
317 | if (!pd->port_subport_cnt) { | |
318 | tidcnt = dd->ipath_rcvtidcnt; | |
319 | tid = pd->port_tidcursor; | |
320 | tidoff = 0; | |
321 | } else if (!subport) { | |
322 | tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + | |
323 | (dd->ipath_rcvtidcnt % pd->port_subport_cnt); | |
324 | tidoff = dd->ipath_rcvtidcnt - tidcnt; | |
325 | porttid += tidoff; | |
326 | tid = tidcursor_fp(fp); | |
327 | } else { | |
328 | tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; | |
329 | tidoff = tidcnt * (subport - 1); | |
330 | porttid += tidoff; | |
331 | tid = tidcursor_fp(fp); | |
332 | } | |
333 | if (cnt > tidcnt) { | |
7f510b46 BS |
334 | /* make sure it all fits in port_tid_pg_list */ |
335 | dev_info(&dd->pcidev->dev, "Process tried to allocate %u " | |
336 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | |
337 | cnt = tidcnt; | |
338 | } | |
9929b0fb BS |
339 | pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; |
340 | tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; | |
7f510b46 BS |
341 | |
342 | memset(tidmap, 0, sizeof(tidmap)); | |
7f510b46 | 343 | /* before decrement; chip actual # */ |
7f510b46 BS |
344 | ntids = tidcnt; |
345 | tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + | |
346 | dd->ipath_rcvtidbase + | |
347 | porttid * sizeof(*tidbase)); | |
348 | ||
349 | ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", | |
350 | pd->port_port, cnt, tid, tidbase); | |
351 | ||
352 | /* virtual address of first page in transfer */ | |
353 | vaddr = ti->tidvaddr; | |
354 | if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, | |
355 | cnt * PAGE_SIZE)) { | |
356 | ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", | |
357 | (void *)vaddr, cnt); | |
358 | ret = -EFAULT; | |
359 | goto done; | |
360 | } | |
361 | ret = ipath_get_user_pages(vaddr, cnt, pagep); | |
362 | if (ret) { | |
363 | if (ret == -EBUSY) { | |
364 | ipath_dbg("Failed to lock addr %p, %u pages " | |
365 | "(already locked)\n", | |
366 | (void *) vaddr, cnt); | |
367 | /* | |
368 | * for now, continue, and see what happens but with | |
369 | * the new implementation, this should never happen, | |
370 | * unless perhaps the user has mpin'ed the pages | |
371 | * themselves (something we need to test) | |
372 | */ | |
373 | ret = 0; | |
374 | } else { | |
375 | dev_info(&dd->pcidev->dev, | |
376 | "Failed to lock addr %p, %u pages: " | |
377 | "errno %d\n", (void *) vaddr, cnt, -ret); | |
378 | goto done; | |
379 | } | |
380 | } | |
381 | for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { | |
382 | for (; ntids--; tid++) { | |
383 | if (tid == tidcnt) | |
384 | tid = 0; | |
385 | if (!dd->ipath_pageshadow[porttid + tid]) | |
386 | break; | |
387 | } | |
388 | if (ntids < 0) { | |
389 | /* | |
390 | * oops, wrapped all the way through their TIDs, | |
391 | * and didn't have enough free; see comments at | |
392 | * start of routine | |
393 | */ | |
394 | ipath_dbg("Not enough free TIDs for %u pages " | |
395 | "(index %d), failing\n", cnt, i); | |
396 | i--; /* last tidlist[i] not filled in */ | |
397 | ret = -ENOMEM; | |
398 | break; | |
399 | } | |
9929b0fb | 400 | tidlist[i] = tid + tidoff; |
7f510b46 | 401 | ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " |
9929b0fb | 402 | "vaddr %lx\n", i, tid + tidoff, vaddr); |
7f510b46 BS |
403 | /* we "know" system pages and TID pages are same size */ |
404 | dd->ipath_pageshadow[porttid + tid] = pagep[i]; | |
1fd3b40f BS |
405 | dd->ipath_physshadow[porttid + tid] = ipath_map_page( |
406 | dd->pcidev, pagep[i], 0, PAGE_SIZE, | |
407 | PCI_DMA_FROMDEVICE); | |
7f510b46 BS |
408 | /* |
409 | * don't need atomic or it's overhead | |
410 | */ | |
411 | __set_bit(tid, tidmap); | |
1fd3b40f | 412 | physaddr = dd->ipath_physshadow[porttid + tid]; |
7f510b46 BS |
413 | ipath_stats.sps_pagelocks++; |
414 | ipath_cdbg(VERBOSE, | |
415 | "TID %u, vaddr %lx, physaddr %llx pgp %p\n", | |
416 | tid, vaddr, (unsigned long long) physaddr, | |
417 | pagep[i]); | |
f716cdfe JE |
418 | dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, |
419 | physaddr); | |
7f510b46 BS |
420 | /* |
421 | * don't check this tid in ipath_portshadow, since we | |
422 | * just filled it in; start with the next one. | |
423 | */ | |
424 | tid++; | |
425 | } | |
426 | ||
427 | if (ret) { | |
428 | u32 limit; | |
429 | cleanup: | |
430 | /* jump here if copy out of updated info failed... */ | |
431 | ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", | |
432 | -ret, i, cnt); | |
433 | /* same code that's in ipath_free_tid() */ | |
434 | limit = sizeof(tidmap) * BITS_PER_BYTE; | |
435 | if (limit > tidcnt) | |
436 | /* just in case size changes in future */ | |
437 | limit = tidcnt; | |
438 | tid = find_first_bit((const unsigned long *)tidmap, limit); | |
439 | for (; tid < limit; tid++) { | |
440 | if (!test_bit(tid, tidmap)) | |
441 | continue; | |
442 | if (dd->ipath_pageshadow[porttid + tid]) { | |
443 | ipath_cdbg(VERBOSE, "Freeing TID %u\n", | |
444 | tid); | |
f716cdfe JE |
445 | dd->ipath_f_put_tid(dd, &tidbase[tid], |
446 | RCVHQ_RCV_TYPE_EXPECTED, | |
7f510b46 | 447 | dd->ipath_tidinvalid); |
1fd3b40f BS |
448 | pci_unmap_page(dd->pcidev, |
449 | dd->ipath_physshadow[porttid + tid], | |
450 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
7f510b46 BS |
451 | dd->ipath_pageshadow[porttid + tid] = NULL; |
452 | ipath_stats.sps_pageunlocks++; | |
453 | } | |
454 | } | |
455 | ipath_release_user_pages(pagep, cnt); | |
456 | } else { | |
457 | /* | |
458 | * Copy the updated array, with ipath_tid's filled in, back | |
459 | * to user. Since we did the copy in already, this "should | |
460 | * never fail" If it does, we have to clean up... | |
461 | */ | |
462 | if (copy_to_user((void __user *) | |
463 | (unsigned long) ti->tidlist, | |
464 | tidlist, cnt * sizeof(*tidlist))) { | |
465 | ret = -EFAULT; | |
466 | goto cleanup; | |
467 | } | |
468 | if (copy_to_user((void __user *) (unsigned long) ti->tidmap, | |
469 | tidmap, sizeof tidmap)) { | |
470 | ret = -EFAULT; | |
471 | goto cleanup; | |
472 | } | |
473 | if (tid == tidcnt) | |
474 | tid = 0; | |
9929b0fb BS |
475 | if (!pd->port_subport_cnt) |
476 | pd->port_tidcursor = tid; | |
477 | else | |
478 | tidcursor_fp(fp) = tid; | |
7f510b46 BS |
479 | } |
480 | ||
481 | done: | |
482 | if (ret) | |
483 | ipath_dbg("Failed to map %u TID pages, failing with %d\n", | |
484 | ti->tidcnt, -ret); | |
485 | return ret; | |
486 | } | |
487 | ||
488 | /** | |
489 | * ipath_tid_free - free a port TID | |
490 | * @pd: the port | |
9929b0fb | 491 | * @subport: the subport |
7f510b46 BS |
492 | * @ti: the TID info |
493 | * | |
494 | * right now we are unlocking one page at a time, but since | |
495 | * the intended use of this routine is for a single group of | |
496 | * virtually contiguous pages, that should change to improve | |
497 | * performance. We check that the TID is in range for this port | |
498 | * but otherwise don't check validity; if user has an error and | |
499 | * frees the wrong tid, it's only their own data that can thereby | |
500 | * be corrupted. We do check that the TID was in use, for sanity | |
501 | * We always use our idea of the saved address, not the address that | |
502 | * they pass in to us. | |
503 | */ | |
504 | ||
9929b0fb | 505 | static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, |
7f510b46 BS |
506 | const struct ipath_tid_info *ti) |
507 | { | |
508 | int ret = 0; | |
509 | u32 tid, porttid, cnt, limit, tidcnt; | |
510 | struct ipath_devdata *dd = pd->port_dd; | |
511 | u64 __iomem *tidbase; | |
512 | unsigned long tidmap[8]; | |
513 | ||
514 | if (!dd->ipath_pageshadow) { | |
515 | ret = -ENOMEM; | |
516 | goto done; | |
517 | } | |
518 | ||
519 | if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, | |
520 | sizeof tidmap)) { | |
521 | ret = -EFAULT; | |
522 | goto done; | |
523 | } | |
524 | ||
525 | porttid = pd->port_port * dd->ipath_rcvtidcnt; | |
9929b0fb BS |
526 | if (!pd->port_subport_cnt) |
527 | tidcnt = dd->ipath_rcvtidcnt; | |
528 | else if (!subport) { | |
529 | tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + | |
530 | (dd->ipath_rcvtidcnt % pd->port_subport_cnt); | |
531 | porttid += dd->ipath_rcvtidcnt - tidcnt; | |
532 | } else { | |
533 | tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; | |
534 | porttid += tidcnt * (subport - 1); | |
535 | } | |
7f510b46 BS |
536 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + |
537 | dd->ipath_rcvtidbase + | |
538 | porttid * sizeof(*tidbase)); | |
539 | ||
7f510b46 BS |
540 | limit = sizeof(tidmap) * BITS_PER_BYTE; |
541 | if (limit > tidcnt) | |
542 | /* just in case size changes in future */ | |
543 | limit = tidcnt; | |
544 | tid = find_first_bit(tidmap, limit); | |
545 | ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " | |
546 | "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, | |
547 | limit, tid, porttid); | |
548 | for (cnt = 0; tid < limit; tid++) { | |
549 | /* | |
550 | * small optimization; if we detect a run of 3 or so without | |
551 | * any set, use find_first_bit again. That's mainly to | |
552 | * accelerate the case where we wrapped, so we have some at | |
553 | * the beginning, and some at the end, and a big gap | |
554 | * in the middle. | |
555 | */ | |
556 | if (!test_bit(tid, tidmap)) | |
557 | continue; | |
558 | cnt++; | |
559 | if (dd->ipath_pageshadow[porttid + tid]) { | |
3ac8c70f DO |
560 | struct page *p; |
561 | p = dd->ipath_pageshadow[porttid + tid]; | |
562 | dd->ipath_pageshadow[porttid + tid] = NULL; | |
7f510b46 | 563 | ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", |
40d97692 | 564 | pid_nr(pd->port_pid), tid); |
f716cdfe JE |
565 | dd->ipath_f_put_tid(dd, &tidbase[tid], |
566 | RCVHQ_RCV_TYPE_EXPECTED, | |
7f510b46 | 567 | dd->ipath_tidinvalid); |
1fd3b40f BS |
568 | pci_unmap_page(dd->pcidev, |
569 | dd->ipath_physshadow[porttid + tid], | |
570 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
3ac8c70f | 571 | ipath_release_user_pages(&p, 1); |
7f510b46 BS |
572 | ipath_stats.sps_pageunlocks++; |
573 | } else | |
574 | ipath_dbg("Unused tid %u, ignoring\n", tid); | |
575 | } | |
576 | if (cnt != ti->tidcnt) | |
577 | ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", | |
578 | ti->tidcnt, cnt); | |
579 | done: | |
580 | if (ret) | |
581 | ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", | |
582 | ti->tidcnt, -ret); | |
583 | return ret; | |
584 | } | |
585 | ||
586 | /** | |
587 | * ipath_set_part_key - set a partition key | |
588 | * @pd: the port | |
589 | * @key: the key | |
590 | * | |
591 | * We can have up to 4 active at a time (other than the default, which is | |
592 | * always allowed). This is somewhat tricky, since multiple ports may set | |
593 | * the same key, so we reference count them, and clean up at exit. All 4 | |
594 | * partition keys are packed into a single infinipath register. It's an | |
595 | * error for a process to set the same pkey multiple times. We provide no | |
596 | * mechanism to de-allocate a pkey at this time, we may eventually need to | |
597 | * do that. I've used the atomic operations, and no locking, and only make | |
598 | * a single pass through what's available. This should be more than | |
599 | * adequate for some time. I'll think about spinlocks or the like if and as | |
600 | * it's necessary. | |
601 | */ | |
602 | static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) | |
603 | { | |
604 | struct ipath_devdata *dd = pd->port_dd; | |
605 | int i, any = 0, pidx = -1; | |
606 | u16 lkey = key & 0x7FFF; | |
607 | int ret; | |
608 | ||
27b678dd | 609 | if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { |
7f510b46 BS |
610 | /* nothing to do; this key always valid */ |
611 | ret = 0; | |
612 | goto bail; | |
613 | } | |
614 | ||
615 | ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " | |
616 | "%hx:%x %hx:%x %hx:%x %hx:%x\n", | |
617 | pd->port_port, key, dd->ipath_pkeys[0], | |
618 | atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], | |
619 | atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], | |
620 | atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], | |
621 | atomic_read(&dd->ipath_pkeyrefs[3])); | |
622 | ||
623 | if (!lkey) { | |
624 | ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", | |
625 | pd->port_port); | |
626 | ret = -EINVAL; | |
627 | goto bail; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Set the full membership bit, because it has to be | |
632 | * set in the register or the packet, and it seems | |
633 | * cleaner to set in the register than to force all | |
634 | * callers to set it. (see bug 4331) | |
635 | */ | |
636 | key |= 0x8000; | |
637 | ||
638 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | |
639 | if (!pd->port_pkeys[i] && pidx == -1) | |
640 | pidx = i; | |
641 | if (pd->port_pkeys[i] == key) { | |
642 | ipath_cdbg(VERBOSE, "p%u tries to set same pkey " | |
643 | "(%x) more than once\n", | |
644 | pd->port_port, key); | |
645 | ret = -EEXIST; | |
646 | goto bail; | |
647 | } | |
648 | } | |
649 | if (pidx == -1) { | |
650 | ipath_dbg("All pkeys for port %u already in use, " | |
651 | "can't set %x\n", pd->port_port, key); | |
652 | ret = -EBUSY; | |
653 | goto bail; | |
654 | } | |
655 | for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | |
656 | if (!dd->ipath_pkeys[i]) { | |
657 | any++; | |
658 | continue; | |
659 | } | |
660 | if (dd->ipath_pkeys[i] == key) { | |
661 | atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; | |
662 | ||
663 | if (atomic_inc_return(pkrefs) > 1) { | |
664 | pd->port_pkeys[pidx] = key; | |
665 | ipath_cdbg(VERBOSE, "p%u set key %x " | |
666 | "matches #%d, count now %d\n", | |
667 | pd->port_port, key, i, | |
668 | atomic_read(pkrefs)); | |
669 | ret = 0; | |
670 | goto bail; | |
671 | } else { | |
672 | /* | |
673 | * lost race, decrement count, catch below | |
674 | */ | |
675 | atomic_dec(pkrefs); | |
676 | ipath_cdbg(VERBOSE, "Lost race, count was " | |
677 | "0, after dec, it's %d\n", | |
678 | atomic_read(pkrefs)); | |
679 | any++; | |
680 | } | |
681 | } | |
682 | if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { | |
683 | /* | |
684 | * It makes no sense to have both the limited and | |
685 | * full membership PKEY set at the same time since | |
686 | * the unlimited one will disable the limited one. | |
687 | */ | |
688 | ret = -EEXIST; | |
689 | goto bail; | |
690 | } | |
691 | } | |
692 | if (!any) { | |
693 | ipath_dbg("port %u, all pkeys already in use, " | |
694 | "can't set %x\n", pd->port_port, key); | |
695 | ret = -EBUSY; | |
696 | goto bail; | |
697 | } | |
698 | for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | |
699 | if (!dd->ipath_pkeys[i] && | |
700 | atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { | |
701 | u64 pkey; | |
702 | ||
703 | /* for ipathstats, etc. */ | |
704 | ipath_stats.sps_pkeys[i] = lkey; | |
705 | pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; | |
706 | pkey = | |
707 | (u64) dd->ipath_pkeys[0] | | |
708 | ((u64) dd->ipath_pkeys[1] << 16) | | |
709 | ((u64) dd->ipath_pkeys[2] << 32) | | |
710 | ((u64) dd->ipath_pkeys[3] << 48); | |
711 | ipath_cdbg(PROC, "p%u set key %x in #%d, " | |
712 | "portidx %d, new pkey reg %llx\n", | |
713 | pd->port_port, key, i, pidx, | |
714 | (unsigned long long) pkey); | |
715 | ipath_write_kreg( | |
716 | dd, dd->ipath_kregs->kr_partitionkey, pkey); | |
717 | ||
718 | ret = 0; | |
719 | goto bail; | |
720 | } | |
721 | } | |
722 | ipath_dbg("port %u, all pkeys already in use 2nd pass, " | |
723 | "can't set %x\n", pd->port_port, key); | |
724 | ret = -EBUSY; | |
725 | ||
726 | bail: | |
727 | return ret; | |
728 | } | |
729 | ||
730 | /** | |
731 | * ipath_manage_rcvq - manage a port's receive queue | |
732 | * @pd: the port | |
9929b0fb | 733 | * @subport: the subport |
7f510b46 BS |
734 | * @start_stop: action to carry out |
735 | * | |
736 | * start_stop == 0 disables receive on the port, for use in queue | |
737 | * overflow conditions. start_stop==1 re-enables, to be used to | |
738 | * re-init the software copy of the head register | |
739 | */ | |
9929b0fb BS |
740 | static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, |
741 | int start_stop) | |
7f510b46 BS |
742 | { |
743 | struct ipath_devdata *dd = pd->port_dd; | |
7f510b46 | 744 | |
9929b0fb | 745 | ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", |
7f510b46 | 746 | start_stop ? "en" : "dis", dd->ipath_unit, |
9929b0fb BS |
747 | pd->port_port, subport); |
748 | if (subport) | |
749 | goto bail; | |
7f510b46 BS |
750 | /* atomically clear receive enable port. */ |
751 | if (start_stop) { | |
752 | /* | |
753 | * On enable, force in-memory copy of the tail register to | |
754 | * 0, so that protocol code doesn't have to worry about | |
755 | * whether or not the chip has yet updated the in-memory | |
756 | * copy or not on return from the system call. The chip | |
757 | * always resets it's tail register back to 0 on a | |
758 | * transition from disabled to enabled. This could cause a | |
759 | * problem if software was broken, and did the enable w/o | |
760 | * the disable, but eventually the in-memory copy will be | |
761 | * updated and correct itself, even in the face of software | |
762 | * bugs. | |
763 | */ | |
c59a80ac RC |
764 | if (pd->port_rcvhdrtail_kvaddr) |
765 | ipath_clear_rcvhdrtail(pd); | |
d8274869 | 766 | set_bit(dd->ipath_r_portenable_shift + pd->port_port, |
7f510b46 BS |
767 | &dd->ipath_rcvctrl); |
768 | } else | |
d8274869 | 769 | clear_bit(dd->ipath_r_portenable_shift + pd->port_port, |
7f510b46 BS |
770 | &dd->ipath_rcvctrl); |
771 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
772 | dd->ipath_rcvctrl); | |
773 | /* now be sure chip saw it before we return */ | |
44f8e3f3 | 774 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); |
7f510b46 BS |
775 | if (start_stop) { |
776 | /* | |
777 | * And try to be sure that tail reg update has happened too. | |
778 | * This should in theory interlock with the RXE changes to | |
779 | * the tail register. Don't assign it to the tail register | |
780 | * in memory copy, since we could overwrite an update by the | |
781 | * chip if we did. | |
782 | */ | |
44f8e3f3 | 783 | ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); |
7f510b46 BS |
784 | } |
785 | /* always; new head should be equal to new tail; see above */ | |
9929b0fb | 786 | bail: |
7f510b46 BS |
787 | return 0; |
788 | } | |
789 | ||
790 | static void ipath_clean_part_key(struct ipath_portdata *pd, | |
791 | struct ipath_devdata *dd) | |
792 | { | |
793 | int i, j, pchanged = 0; | |
794 | u64 oldpkey; | |
795 | ||
796 | /* for debugging only */ | |
797 | oldpkey = (u64) dd->ipath_pkeys[0] | | |
798 | ((u64) dd->ipath_pkeys[1] << 16) | | |
799 | ((u64) dd->ipath_pkeys[2] << 32) | | |
800 | ((u64) dd->ipath_pkeys[3] << 48); | |
801 | ||
802 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | |
803 | if (!pd->port_pkeys[i]) | |
804 | continue; | |
805 | ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, | |
806 | pd->port_pkeys[i]); | |
807 | for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { | |
808 | /* check for match independent of the global bit */ | |
809 | if ((dd->ipath_pkeys[j] & 0x7fff) != | |
810 | (pd->port_pkeys[i] & 0x7fff)) | |
811 | continue; | |
812 | if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { | |
813 | ipath_cdbg(VERBOSE, "p%u clear key " | |
814 | "%x matches #%d\n", | |
815 | pd->port_port, | |
816 | pd->port_pkeys[i], j); | |
817 | ipath_stats.sps_pkeys[j] = | |
818 | dd->ipath_pkeys[j] = 0; | |
819 | pchanged++; | |
820 | } | |
821 | else ipath_cdbg( | |
822 | VERBOSE, "p%u key %x matches #%d, " | |
823 | "but ref still %d\n", pd->port_port, | |
824 | pd->port_pkeys[i], j, | |
825 | atomic_read(&dd->ipath_pkeyrefs[j])); | |
826 | break; | |
827 | } | |
828 | pd->port_pkeys[i] = 0; | |
829 | } | |
830 | if (pchanged) { | |
831 | u64 pkey = (u64) dd->ipath_pkeys[0] | | |
832 | ((u64) dd->ipath_pkeys[1] << 16) | | |
833 | ((u64) dd->ipath_pkeys[2] << 32) | | |
834 | ((u64) dd->ipath_pkeys[3] << 48); | |
835 | ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " | |
836 | "new pkey reg %llx\n", pd->port_port, | |
837 | (unsigned long long) oldpkey, | |
838 | (unsigned long long) pkey); | |
839 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | |
840 | pkey); | |
841 | } | |
842 | } | |
843 | ||
9929b0fb BS |
844 | /* |
845 | * Initialize the port data with the receive buffer sizes | |
846 | * so this can be done while the master port is locked. | |
847 | * Otherwise, there is a race with a slave opening the port | |
848 | * and seeing these fields uninitialized. | |
849 | */ | |
850 | static void init_user_egr_sizes(struct ipath_portdata *pd) | |
851 | { | |
852 | struct ipath_devdata *dd = pd->port_dd; | |
853 | unsigned egrperchunk, egrcnt, size; | |
854 | ||
855 | /* | |
856 | * to avoid wasting a lot of memory, we allocate 32KB chunks of | |
857 | * physically contiguous memory, advance through it until used up | |
858 | * and then allocate more. Of course, we need memory to store those | |
859 | * extra pointers, now. Started out with 256KB, but under heavy | |
860 | * memory pressure (creating large files and then copying them over | |
861 | * NFS while doing lots of MPI jobs), we hit some allocation | |
862 | * failures, even though we can sleep... (2.6.10) Still get | |
863 | * failures at 64K. 32K is the lowest we can go without wasting | |
864 | * additional memory. | |
865 | */ | |
866 | size = 0x8000; | |
867 | egrperchunk = size / dd->ipath_rcvegrbufsize; | |
868 | egrcnt = dd->ipath_rcvegrcnt; | |
869 | pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; | |
870 | pd->port_rcvegrbufs_perchunk = egrperchunk; | |
871 | pd->port_rcvegrbuf_size = size; | |
872 | } | |
873 | ||
7f510b46 BS |
874 | /** |
875 | * ipath_create_user_egr - allocate eager TID buffers | |
876 | * @pd: the port to allocate TID buffers for | |
877 | * | |
878 | * This routine is now quite different for user and kernel, because | |
879 | * the kernel uses skb's, for the accelerated network performance | |
880 | * This is the user port version | |
881 | * | |
882 | * Allocate the eager TID buffers and program them into infinipath | |
883 | * They are no longer completely contiguous, we do multiple allocation | |
884 | * calls. | |
885 | */ | |
886 | static int ipath_create_user_egr(struct ipath_portdata *pd) | |
887 | { | |
888 | struct ipath_devdata *dd = pd->port_dd; | |
9929b0fb | 889 | unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; |
7f510b46 BS |
890 | size_t size; |
891 | int ret; | |
0ed9a4a0 BS |
892 | gfp_t gfp_flags; |
893 | ||
894 | /* | |
895 | * GFP_USER, but without GFP_FS, so buffer cache can be | |
896 | * coalesced (we hope); otherwise, even at order 4, | |
897 | * heavy filesystem activity makes these fail, and we can | |
898 | * use compound pages. | |
899 | */ | |
900 | gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; | |
7f510b46 BS |
901 | |
902 | egrcnt = dd->ipath_rcvegrcnt; | |
903 | /* TID number offset for this port */ | |
60948a41 | 904 | egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; |
7f510b46 BS |
905 | egrsize = dd->ipath_rcvegrbufsize; |
906 | ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " | |
907 | "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); | |
908 | ||
9929b0fb BS |
909 | chunk = pd->port_rcvegrbuf_chunks; |
910 | egrperchunk = pd->port_rcvegrbufs_perchunk; | |
911 | size = pd->port_rcvegrbuf_size; | |
912 | pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), | |
913 | GFP_KERNEL); | |
7f510b46 BS |
914 | if (!pd->port_rcvegrbuf) { |
915 | ret = -ENOMEM; | |
916 | goto bail; | |
917 | } | |
918 | pd->port_rcvegrbuf_phys = | |
9929b0fb BS |
919 | kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), |
920 | GFP_KERNEL); | |
7f510b46 BS |
921 | if (!pd->port_rcvegrbuf_phys) { |
922 | ret = -ENOMEM; | |
923 | goto bail_rcvegrbuf; | |
924 | } | |
925 | for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { | |
7f510b46 BS |
926 | |
927 | pd->port_rcvegrbuf[e] = dma_alloc_coherent( | |
928 | &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], | |
929 | gfp_flags); | |
930 | ||
931 | if (!pd->port_rcvegrbuf[e]) { | |
932 | ret = -ENOMEM; | |
933 | goto bail_rcvegrbuf_phys; | |
934 | } | |
935 | } | |
936 | ||
937 | pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; | |
938 | ||
939 | for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { | |
940 | dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; | |
941 | unsigned i; | |
942 | ||
943 | for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { | |
944 | dd->ipath_f_put_tid(dd, e + egroff + | |
945 | (u64 __iomem *) | |
946 | ((char __iomem *) | |
947 | dd->ipath_kregbase + | |
f716cdfe JE |
948 | dd->ipath_rcvegrbase), |
949 | RCVHQ_RCV_TYPE_EAGER, pa); | |
7f510b46 BS |
950 | pa += egrsize; |
951 | } | |
952 | cond_resched(); /* don't hog the cpu */ | |
953 | } | |
954 | ||
955 | ret = 0; | |
956 | goto bail; | |
957 | ||
958 | bail_rcvegrbuf_phys: | |
959 | for (e = 0; e < pd->port_rcvegrbuf_chunks && | |
f37bda92 | 960 | pd->port_rcvegrbuf[e]; e++) { |
7f510b46 BS |
961 | dma_free_coherent(&dd->pcidev->dev, size, |
962 | pd->port_rcvegrbuf[e], | |
963 | pd->port_rcvegrbuf_phys[e]); | |
964 | ||
f37bda92 | 965 | } |
9929b0fb | 966 | kfree(pd->port_rcvegrbuf_phys); |
7f510b46 BS |
967 | pd->port_rcvegrbuf_phys = NULL; |
968 | bail_rcvegrbuf: | |
9929b0fb | 969 | kfree(pd->port_rcvegrbuf); |
7f510b46 BS |
970 | pd->port_rcvegrbuf = NULL; |
971 | bail: | |
972 | return ret; | |
973 | } | |
974 | ||
f37bda92 BS |
975 | |
976 | /* common code for the mappings on dma_alloc_coherent mem */ | |
977 | static int ipath_mmap_mem(struct vm_area_struct *vma, | |
1fd3b40f BS |
978 | struct ipath_portdata *pd, unsigned len, int write_ok, |
979 | void *kvaddr, char *what) | |
f37bda92 BS |
980 | { |
981 | struct ipath_devdata *dd = pd->port_dd; | |
1fd3b40f | 982 | unsigned long pfn; |
f37bda92 BS |
983 | int ret; |
984 | ||
985 | if ((vma->vm_end - vma->vm_start) > len) { | |
986 | dev_info(&dd->pcidev->dev, | |
987 | "FAIL on %s: len %lx > %x\n", what, | |
988 | vma->vm_end - vma->vm_start, len); | |
989 | ret = -EFAULT; | |
990 | goto bail; | |
991 | } | |
992 | ||
993 | if (!write_ok) { | |
994 | if (vma->vm_flags & VM_WRITE) { | |
995 | dev_info(&dd->pcidev->dev, | |
996 | "%s must be mapped readonly\n", what); | |
997 | ret = -EPERM; | |
998 | goto bail; | |
999 | } | |
1000 | ||
1001 | /* don't allow them to later change with mprotect */ | |
1002 | vma->vm_flags &= ~VM_MAYWRITE; | |
1003 | } | |
1004 | ||
1fd3b40f | 1005 | pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; |
f37bda92 BS |
1006 | ret = remap_pfn_range(vma, vma->vm_start, pfn, |
1007 | len, vma->vm_page_prot); | |
1008 | if (ret) | |
1fd3b40f BS |
1009 | dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " |
1010 | "bytes r%c failed: %d\n", what, pd->port_port, | |
1011 | pfn, len, write_ok?'w':'o', ret); | |
f37bda92 | 1012 | else |
1fd3b40f BS |
1013 | ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " |
1014 | "r%c\n", what, pd->port_port, pfn, len, | |
1015 | write_ok?'w':'o'); | |
f37bda92 BS |
1016 | bail: |
1017 | return ret; | |
1018 | } | |
1019 | ||
7f510b46 BS |
1020 | static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, |
1021 | u64 ureg) | |
1022 | { | |
1023 | unsigned long phys; | |
1024 | int ret; | |
1025 | ||
f37bda92 BS |
1026 | /* |
1027 | * This is real hardware, so use io_remap. This is the mechanism | |
1028 | * for the user process to update the head registers for their port | |
1029 | * in the chip. | |
1030 | */ | |
7f510b46 BS |
1031 | if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { |
1032 | dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " | |
1033 | "%lx > PAGE\n", vma->vm_end - vma->vm_start); | |
1034 | ret = -EFAULT; | |
1035 | } else { | |
1036 | phys = dd->ipath_physaddr + ureg; | |
1037 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
1038 | ||
1039 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
1040 | ret = io_remap_pfn_range(vma, vma->vm_start, | |
1041 | phys >> PAGE_SHIFT, | |
1042 | vma->vm_end - vma->vm_start, | |
1043 | vma->vm_page_prot); | |
1044 | } | |
1045 | return ret; | |
1046 | } | |
1047 | ||
1048 | static int mmap_piobufs(struct vm_area_struct *vma, | |
1049 | struct ipath_devdata *dd, | |
9929b0fb BS |
1050 | struct ipath_portdata *pd, |
1051 | unsigned piobufs, unsigned piocnt) | |
7f510b46 BS |
1052 | { |
1053 | unsigned long phys; | |
1054 | int ret; | |
1055 | ||
1056 | /* | |
f37bda92 BS |
1057 | * When we map the PIO buffers in the chip, we want to map them as |
1058 | * writeonly, no read possible. This prevents access to previous | |
1059 | * process data, and catches users who might try to read the i/o | |
1060 | * space due to a bug. | |
7f510b46 | 1061 | */ |
9929b0fb | 1062 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { |
7f510b46 BS |
1063 | dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " |
1064 | "reqlen %lx > PAGE\n", | |
1065 | vma->vm_end - vma->vm_start); | |
9929b0fb | 1066 | ret = -EINVAL; |
7f510b46 BS |
1067 | goto bail; |
1068 | } | |
1069 | ||
9929b0fb | 1070 | phys = dd->ipath_physaddr + piobufs; |
f37bda92 | 1071 | |
eb9dc6f4 BS |
1072 | #if defined(__powerpc__) |
1073 | /* There isn't a generic way to specify writethrough mappings */ | |
1074 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | |
1075 | pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; | |
1076 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; | |
1077 | #endif | |
1078 | ||
367fe711 BS |
1079 | /* |
1080 | * don't allow them to later change to readable with mprotect (for when | |
1081 | * not initially mapped readable, as is normally the case) | |
1082 | */ | |
f37bda92 | 1083 | vma->vm_flags &= ~VM_MAYREAD; |
7f510b46 BS |
1084 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; |
1085 | ||
1086 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | |
1087 | vma->vm_end - vma->vm_start, | |
1088 | vma->vm_page_prot); | |
1089 | bail: | |
1090 | return ret; | |
1091 | } | |
1092 | ||
1093 | static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |
1094 | struct ipath_portdata *pd) | |
1095 | { | |
1096 | struct ipath_devdata *dd = pd->port_dd; | |
1097 | unsigned long start, size; | |
1098 | size_t total_size, i; | |
1fd3b40f | 1099 | unsigned long pfn; |
7f510b46 BS |
1100 | int ret; |
1101 | ||
7f510b46 BS |
1102 | size = pd->port_rcvegrbuf_size; |
1103 | total_size = pd->port_rcvegrbuf_chunks * size; | |
1104 | if ((vma->vm_end - vma->vm_start) > total_size) { | |
1105 | dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " | |
1106 | "reqlen %lx > actual %lx\n", | |
1107 | vma->vm_end - vma->vm_start, | |
1108 | (unsigned long) total_size); | |
9929b0fb | 1109 | ret = -EINVAL; |
7f510b46 BS |
1110 | goto bail; |
1111 | } | |
1112 | ||
1113 | if (vma->vm_flags & VM_WRITE) { | |
1114 | dev_info(&dd->pcidev->dev, "Can't map eager buffers as " | |
1115 | "writable (flags=%lx)\n", vma->vm_flags); | |
1116 | ret = -EPERM; | |
1117 | goto bail; | |
1118 | } | |
f37bda92 BS |
1119 | /* don't allow them to later change to writeable with mprotect */ |
1120 | vma->vm_flags &= ~VM_MAYWRITE; | |
7f510b46 BS |
1121 | |
1122 | start = vma->vm_start; | |
7f510b46 | 1123 | |
7f510b46 | 1124 | for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { |
1fd3b40f BS |
1125 | pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; |
1126 | ret = remap_pfn_range(vma, start, pfn, size, | |
1127 | vma->vm_page_prot); | |
7f510b46 BS |
1128 | if (ret < 0) |
1129 | goto bail; | |
1130 | } | |
1131 | ret = 0; | |
1132 | ||
1133 | bail: | |
1134 | return ret; | |
1135 | } | |
1136 | ||
9929b0fb | 1137 | /* |
3c845086 | 1138 | * ipath_file_vma_fault - handle a VMA page fault. |
9929b0fb | 1139 | */ |
3c845086 NP |
1140 | static int ipath_file_vma_fault(struct vm_area_struct *vma, |
1141 | struct vm_fault *vmf) | |
9929b0fb | 1142 | { |
3c845086 | 1143 | struct page *page; |
9929b0fb | 1144 | |
3c845086 | 1145 | page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); |
9929b0fb | 1146 | if (!page) |
3c845086 | 1147 | return VM_FAULT_SIGBUS; |
9929b0fb | 1148 | get_page(page); |
3c845086 NP |
1149 | vmf->page = page; |
1150 | ||
1151 | return 0; | |
9929b0fb BS |
1152 | } |
1153 | ||
1154 | static struct vm_operations_struct ipath_file_vm_ops = { | |
3c845086 | 1155 | .fault = ipath_file_vma_fault, |
9929b0fb BS |
1156 | }; |
1157 | ||
1158 | static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | |
1159 | struct ipath_portdata *pd, unsigned subport) | |
1160 | { | |
1161 | unsigned long len; | |
1162 | struct ipath_devdata *dd; | |
1163 | void *addr; | |
1164 | size_t size; | |
0a5a83cf | 1165 | int ret = 0; |
9929b0fb BS |
1166 | |
1167 | /* If the port is not shared, all addresses should be physical */ | |
0a5a83cf | 1168 | if (!pd->port_subport_cnt) |
9929b0fb | 1169 | goto bail; |
9929b0fb BS |
1170 | |
1171 | dd = pd->port_dd; | |
1172 | size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; | |
1173 | ||
1174 | /* | |
c7e29ff1 MD |
1175 | * Each process has all the subport uregbase, rcvhdrq, and |
1176 | * rcvegrbufs mmapped - as an array for all the processes, | |
1177 | * and also separately for this process. | |
9929b0fb | 1178 | */ |
c7e29ff1 MD |
1179 | if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { |
1180 | addr = pd->subport_uregbase; | |
1181 | size = PAGE_SIZE * pd->port_subport_cnt; | |
1182 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { | |
1183 | addr = pd->subport_rcvhdr_base; | |
1184 | size = pd->port_rcvhdrq_size * pd->port_subport_cnt; | |
1185 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { | |
1186 | addr = pd->subport_rcvegrbuf; | |
1187 | size *= pd->port_subport_cnt; | |
1188 | } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + | |
1189 | PAGE_SIZE * subport)) { | |
1190 | addr = pd->subport_uregbase + PAGE_SIZE * subport; | |
1191 | size = PAGE_SIZE; | |
1192 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + | |
1193 | pd->port_rcvhdrq_size * subport)) { | |
1194 | addr = pd->subport_rcvhdr_base + | |
1195 | pd->port_rcvhdrq_size * subport; | |
1196 | size = pd->port_rcvhdrq_size; | |
1197 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + | |
1198 | size * subport)) { | |
1199 | addr = pd->subport_rcvegrbuf + size * subport; | |
1200 | /* rcvegrbufs are read-only on the slave */ | |
1201 | if (vma->vm_flags & VM_WRITE) { | |
1202 | dev_info(&dd->pcidev->dev, | |
1203 | "Can't map eager buffers as " | |
1204 | "writable (flags=%lx)\n", vma->vm_flags); | |
1205 | ret = -EPERM; | |
1206 | goto bail; | |
1207 | } | |
1208 | /* | |
1209 | * Don't allow permission to later change to writeable | |
1210 | * with mprotect. | |
1211 | */ | |
1212 | vma->vm_flags &= ~VM_MAYWRITE; | |
1213 | } else { | |
9929b0fb | 1214 | goto bail; |
c7e29ff1 | 1215 | } |
9929b0fb BS |
1216 | len = vma->vm_end - vma->vm_start; |
1217 | if (len > size) { | |
1218 | ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); | |
1219 | ret = -EINVAL; | |
1220 | goto bail; | |
1221 | } | |
1222 | ||
1223 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; | |
1224 | vma->vm_ops = &ipath_file_vm_ops; | |
1225 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; | |
0a5a83cf | 1226 | ret = 1; |
9929b0fb BS |
1227 | |
1228 | bail: | |
1229 | return ret; | |
1230 | } | |
1231 | ||
7f510b46 BS |
1232 | /** |
1233 | * ipath_mmap - mmap various structures into user space | |
1234 | * @fp: the file pointer | |
1235 | * @vma: the VM area | |
1236 | * | |
1237 | * We use this to have a shared buffer between the kernel and the user code | |
1238 | * for the rcvhdr queue, egr buffers, and the per-port user regs and pio | |
1239 | * buffers in the chip. We have the open and close entries so we can bump | |
1240 | * the ref count and keep the driver from being unloaded while still mapped. | |
1241 | */ | |
1242 | static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |
1243 | { | |
1244 | struct ipath_portdata *pd; | |
1245 | struct ipath_devdata *dd; | |
1246 | u64 pgaddr, ureg; | |
9929b0fb | 1247 | unsigned piobufs, piocnt; |
7f510b46 BS |
1248 | int ret; |
1249 | ||
1250 | pd = port_fp(fp); | |
9929b0fb BS |
1251 | if (!pd) { |
1252 | ret = -EINVAL; | |
1253 | goto bail; | |
1254 | } | |
7f510b46 | 1255 | dd = pd->port_dd; |
f37bda92 | 1256 | |
7f510b46 BS |
1257 | /* |
1258 | * This is the ipath_do_user_init() code, mapping the shared buffers | |
1259 | * into the user process. The address referred to by vm_pgoff is the | |
9929b0fb BS |
1260 | * file offset passed via mmap(). For shared ports, this is the |
1261 | * kernel vmalloc() address of the pages to share with the master. | |
1262 | * For non-shared or master ports, this is a physical address. | |
1263 | * We only do one mmap for each space mapped. | |
7f510b46 BS |
1264 | */ |
1265 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; | |
1266 | ||
1267 | /* | |
9929b0fb BS |
1268 | * Check for 0 in case one of the allocations failed, but user |
1269 | * called mmap anyway. | |
7f510b46 | 1270 | */ |
9929b0fb BS |
1271 | if (!pgaddr) { |
1272 | ret = -EINVAL; | |
1273 | goto bail; | |
f37bda92 | 1274 | } |
7f510b46 | 1275 | |
9929b0fb | 1276 | ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", |
7f510b46 | 1277 | (unsigned long long) pgaddr, vma->vm_start, |
9929b0fb BS |
1278 | vma->vm_end - vma->vm_start, dd->ipath_unit, |
1279 | pd->port_port, subport_fp(fp)); | |
7f510b46 | 1280 | |
9929b0fb BS |
1281 | /* |
1282 | * Physical addresses must fit in 40 bits for our hardware. | |
1283 | * Check for kernel virtual addresses first, anything else must | |
1284 | * match a HW or memory address. | |
1285 | */ | |
0a5a83cf RC |
1286 | ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); |
1287 | if (ret) { | |
1288 | if (ret > 0) | |
1289 | ret = 0; | |
9929b0fb BS |
1290 | goto bail; |
1291 | } | |
1292 | ||
a18e26ae | 1293 | ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; |
9929b0fb BS |
1294 | if (!pd->port_subport_cnt) { |
1295 | /* port is not shared */ | |
e2ab41ca | 1296 | piocnt = pd->port_piocnt; |
9929b0fb BS |
1297 | piobufs = pd->port_piobufs; |
1298 | } else if (!subport_fp(fp)) { | |
1299 | /* caller is the master */ | |
e2ab41ca DO |
1300 | piocnt = (pd->port_piocnt / pd->port_subport_cnt) + |
1301 | (pd->port_piocnt % pd->port_subport_cnt); | |
9929b0fb | 1302 | piobufs = pd->port_piobufs + |
e2ab41ca | 1303 | dd->ipath_palign * (pd->port_piocnt - piocnt); |
9929b0fb BS |
1304 | } else { |
1305 | unsigned slave = subport_fp(fp) - 1; | |
1306 | ||
1307 | /* caller is a slave */ | |
e2ab41ca | 1308 | piocnt = pd->port_piocnt / pd->port_subport_cnt; |
9929b0fb | 1309 | piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; |
f37bda92 | 1310 | } |
9929b0fb BS |
1311 | |
1312 | if (pgaddr == ureg) | |
7f510b46 | 1313 | ret = mmap_ureg(vma, dd, ureg); |
9929b0fb BS |
1314 | else if (pgaddr == piobufs) |
1315 | ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); | |
1316 | else if (pgaddr == dd->ipath_pioavailregs_phys) | |
1317 | /* in-memory copy of pioavail registers */ | |
1318 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | |
1fd3b40f | 1319 | (void *) dd->ipath_pioavailregs_dma, |
9929b0fb | 1320 | "pioavail registers"); |
9929b0fb | 1321 | else if (pgaddr == pd->port_rcvegr_phys) |
7f510b46 | 1322 | ret = mmap_rcvegrbufs(vma, pd); |
9929b0fb | 1323 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) |
f37bda92 | 1324 | /* |
525d0ca1 | 1325 | * The rcvhdrq itself; readonly except on HT (so have |
f37bda92 BS |
1326 | * to allow writable mapping), multiple pages, contiguous |
1327 | * from an i/o perspective. | |
1328 | */ | |
9929b0fb | 1329 | ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, |
1fd3b40f | 1330 | pd->port_rcvhdrq, |
f37bda92 | 1331 | "rcvhdrq"); |
9929b0fb | 1332 | else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) |
f37bda92 BS |
1333 | /* in-memory copy of rcvhdrq tail register */ |
1334 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | |
1fd3b40f | 1335 | pd->port_rcvhdrtail_kvaddr, |
f37bda92 | 1336 | "rcvhdrq tail"); |
7f510b46 BS |
1337 | else |
1338 | ret = -EINVAL; | |
1339 | ||
1340 | vma->vm_private_data = NULL; | |
1341 | ||
1342 | if (ret < 0) | |
1343 | dev_info(&dd->pcidev->dev, | |
9929b0fb BS |
1344 | "Failure %d on off %llx len %lx\n", |
1345 | -ret, (unsigned long long)pgaddr, | |
1346 | vma->vm_end - vma->vm_start); | |
1347 | bail: | |
7f510b46 BS |
1348 | return ret; |
1349 | } | |
1350 | ||
70c51da2 AJ |
1351 | static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) |
1352 | { | |
1353 | unsigned pollflag = 0; | |
1354 | ||
1355 | if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && | |
1356 | pd->port_hdrqfull != pd->port_hdrqfull_poll) { | |
1357 | pollflag |= POLLIN | POLLRDNORM; | |
1358 | pd->port_hdrqfull_poll = pd->port_hdrqfull; | |
1359 | } | |
1360 | ||
1361 | return pollflag; | |
1362 | } | |
1363 | ||
f2d04231 RW |
1364 | static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, |
1365 | struct file *fp, | |
1366 | struct poll_table_struct *pt) | |
7f510b46 | 1367 | { |
e35d710d | 1368 | unsigned pollflag = 0; |
7f510b46 BS |
1369 | struct ipath_devdata *dd; |
1370 | ||
7f510b46 BS |
1371 | dd = pd->port_dd; |
1372 | ||
70c51da2 AJ |
1373 | /* variable access in ipath_poll_hdrqfull() needs this */ |
1374 | rmb(); | |
1375 | pollflag = ipath_poll_hdrqfull(pd); | |
7f510b46 | 1376 | |
70c51da2 | 1377 | if (pd->port_urgent != pd->port_urgent_poll) { |
f2d04231 | 1378 | pollflag |= POLLIN | POLLRDNORM; |
70c51da2 | 1379 | pd->port_urgent_poll = pd->port_urgent; |
f2d04231 | 1380 | } |
7f510b46 | 1381 | |
f2d04231 | 1382 | if (!pollflag) { |
70c51da2 | 1383 | /* this saves a spin_lock/unlock in interrupt handler... */ |
f2d04231 | 1384 | set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); |
70c51da2 AJ |
1385 | /* flush waiting flag so don't miss an event... */ |
1386 | wmb(); | |
f2d04231 RW |
1387 | poll_wait(fp, &pd->port_wait, pt); |
1388 | } | |
1389 | ||
1390 | return pollflag; | |
1391 | } | |
1392 | ||
1393 | static unsigned int ipath_poll_next(struct ipath_portdata *pd, | |
1394 | struct file *fp, | |
1395 | struct poll_table_struct *pt) | |
1396 | { | |
70c51da2 AJ |
1397 | u32 head; |
1398 | u32 tail; | |
f2d04231 RW |
1399 | unsigned pollflag = 0; |
1400 | struct ipath_devdata *dd; | |
1401 | ||
1402 | dd = pd->port_dd; | |
7f510b46 | 1403 | |
70c51da2 AJ |
1404 | /* variable access in ipath_poll_hdrqfull() needs this */ |
1405 | rmb(); | |
1406 | pollflag = ipath_poll_hdrqfull(pd); | |
1407 | ||
7f510b46 | 1408 | head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); |
c59a80ac RC |
1409 | if (pd->port_rcvhdrtail_kvaddr) |
1410 | tail = ipath_get_rcvhdrtail(pd); | |
1411 | else | |
1412 | tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); | |
f2d04231 | 1413 | |
70c51da2 | 1414 | if (head != tail) |
f2d04231 | 1415 | pollflag |= POLLIN | POLLRDNORM; |
70c51da2 AJ |
1416 | else { |
1417 | /* this saves a spin_lock/unlock in interrupt handler */ | |
7f510b46 | 1418 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); |
70c51da2 AJ |
1419 | /* flush waiting flag so we don't miss an event */ |
1420 | wmb(); | |
f2d04231 | 1421 | |
d8274869 | 1422 | set_bit(pd->port_port + dd->ipath_r_intravail_shift, |
f2d04231 RW |
1423 | &dd->ipath_rcvctrl); |
1424 | ||
1425 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
1426 | dd->ipath_rcvctrl); | |
1427 | ||
9929b0fb | 1428 | if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ |
f2d04231 RW |
1429 | ipath_write_ureg(dd, ur_rcvhdrhead, |
1430 | dd->ipath_rhdrhead_intr_off | head, | |
1431 | pd->port_port); | |
7f510b46 | 1432 | |
f2d04231 | 1433 | poll_wait(fp, &pd->port_wait, pt); |
7f510b46 BS |
1434 | } |
1435 | ||
f2d04231 RW |
1436 | return pollflag; |
1437 | } | |
1438 | ||
1439 | static unsigned int ipath_poll(struct file *fp, | |
1440 | struct poll_table_struct *pt) | |
1441 | { | |
1442 | struct ipath_portdata *pd; | |
1443 | unsigned pollflag; | |
1444 | ||
1445 | pd = port_fp(fp); | |
1446 | if (!pd) | |
1447 | pollflag = 0; | |
1448 | else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) | |
1449 | pollflag = ipath_poll_urgent(pd, fp, pt); | |
1450 | else | |
1451 | pollflag = ipath_poll_next(pd, fp, pt); | |
7f510b46 | 1452 | |
e35d710d | 1453 | return pollflag; |
7f510b46 BS |
1454 | } |
1455 | ||
0df6291c MD |
1456 | static int ipath_supports_subports(int user_swmajor, int user_swminor) |
1457 | { | |
1458 | /* no subport implementation prior to software version 1.3 */ | |
1459 | return (user_swmajor > 1) || (user_swminor >= 3); | |
1460 | } | |
1461 | ||
1462 | static int ipath_compatible_subports(int user_swmajor, int user_swminor) | |
1463 | { | |
1464 | /* this code is written long-hand for clarity */ | |
1465 | if (IPATH_USER_SWMAJOR != user_swmajor) { | |
1466 | /* no promise of compatibility if major mismatch */ | |
1467 | return 0; | |
1468 | } | |
1469 | if (IPATH_USER_SWMAJOR == 1) { | |
1470 | switch (IPATH_USER_SWMINOR) { | |
1471 | case 0: | |
1472 | case 1: | |
1473 | case 2: | |
1474 | /* no subport implementation so cannot be compatible */ | |
1475 | return 0; | |
1476 | case 3: | |
1477 | /* 3 is only compatible with itself */ | |
1478 | return user_swminor == 3; | |
1479 | default: | |
1480 | /* >= 4 are compatible (or are expected to be) */ | |
1481 | return user_swminor >= 4; | |
1482 | } | |
1483 | } | |
1484 | /* make no promises yet for future major versions */ | |
1485 | return 0; | |
1486 | } | |
1487 | ||
9929b0fb BS |
1488 | static int init_subports(struct ipath_devdata *dd, |
1489 | struct ipath_portdata *pd, | |
1490 | const struct ipath_user_info *uinfo) | |
1491 | { | |
1492 | int ret = 0; | |
c7e29ff1 | 1493 | unsigned num_subports; |
9929b0fb BS |
1494 | size_t size; |
1495 | ||
9929b0fb | 1496 | /* |
bacf4013 | 1497 | * If the user is requesting zero subports, |
9929b0fb BS |
1498 | * skip the subport allocation. |
1499 | */ | |
bacf4013 | 1500 | if (uinfo->spu_subport_cnt <= 0) |
9929b0fb | 1501 | goto bail; |
c7e29ff1 | 1502 | |
0df6291c MD |
1503 | /* Self-consistency check for ipath_compatible_subports() */ |
1504 | if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && | |
1505 | !ipath_compatible_subports(IPATH_USER_SWMAJOR, | |
1506 | IPATH_USER_SWMINOR)) { | |
c7e29ff1 | 1507 | dev_info(&dd->pcidev->dev, |
0df6291c MD |
1508 | "Inconsistent ipath_compatible_subports()\n"); |
1509 | goto bail; | |
1510 | } | |
1511 | ||
1512 | /* Check for subport compatibility */ | |
1513 | if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, | |
1514 | uinfo->spu_userversion & 0xffff)) { | |
1515 | dev_info(&dd->pcidev->dev, | |
1516 | "Mismatched user version (%d.%d) and driver " | |
1517 | "version (%d.%d) while port sharing. Ensure " | |
c7e29ff1 MD |
1518 | "that driver and library are from the same " |
1519 | "release.\n", | |
0df6291c | 1520 | (int) (uinfo->spu_userversion >> 16), |
c7e29ff1 | 1521 | (int) (uinfo->spu_userversion & 0xffff), |
0df6291c | 1522 | IPATH_USER_SWMAJOR, |
c7e29ff1 MD |
1523 | IPATH_USER_SWMINOR); |
1524 | goto bail; | |
1525 | } | |
0a5a83cf | 1526 | if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { |
9929b0fb BS |
1527 | ret = -EINVAL; |
1528 | goto bail; | |
1529 | } | |
1530 | ||
c7e29ff1 MD |
1531 | num_subports = uinfo->spu_subport_cnt; |
1532 | pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); | |
9929b0fb BS |
1533 | if (!pd->subport_uregbase) { |
1534 | ret = -ENOMEM; | |
1535 | goto bail; | |
1536 | } | |
1537 | /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ | |
1538 | size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | |
c7e29ff1 | 1539 | sizeof(u32), PAGE_SIZE) * num_subports; |
9929b0fb BS |
1540 | pd->subport_rcvhdr_base = vmalloc(size); |
1541 | if (!pd->subport_rcvhdr_base) { | |
1542 | ret = -ENOMEM; | |
1543 | goto bail_ureg; | |
1544 | } | |
1545 | ||
1546 | pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * | |
1547 | pd->port_rcvegrbuf_size * | |
c7e29ff1 | 1548 | num_subports); |
9929b0fb BS |
1549 | if (!pd->subport_rcvegrbuf) { |
1550 | ret = -ENOMEM; | |
1551 | goto bail_rhdr; | |
1552 | } | |
1553 | ||
1554 | pd->port_subport_cnt = uinfo->spu_subport_cnt; | |
1555 | pd->port_subport_id = uinfo->spu_subport_id; | |
1556 | pd->active_slaves = 1; | |
947d7617 | 1557 | set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); |
c7e29ff1 MD |
1558 | memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); |
1559 | memset(pd->subport_rcvhdr_base, 0, size); | |
1560 | memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * | |
1561 | pd->port_rcvegrbuf_size * | |
1562 | num_subports); | |
9929b0fb BS |
1563 | goto bail; |
1564 | ||
1565 | bail_rhdr: | |
1566 | vfree(pd->subport_rcvhdr_base); | |
1567 | bail_ureg: | |
1568 | vfree(pd->subport_uregbase); | |
1569 | pd->subport_uregbase = NULL; | |
1570 | bail: | |
1571 | return ret; | |
1572 | } | |
1573 | ||
7f510b46 | 1574 | static int try_alloc_port(struct ipath_devdata *dd, int port, |
9929b0fb BS |
1575 | struct file *fp, |
1576 | const struct ipath_user_info *uinfo) | |
7f510b46 | 1577 | { |
9929b0fb | 1578 | struct ipath_portdata *pd; |
7f510b46 BS |
1579 | int ret; |
1580 | ||
9929b0fb BS |
1581 | if (!(pd = dd->ipath_pd[port])) { |
1582 | void *ptmp; | |
7f510b46 | 1583 | |
9929b0fb | 1584 | pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); |
7f510b46 BS |
1585 | |
1586 | /* | |
1587 | * Allocate memory for use in ipath_tid_update() just once | |
1588 | * at open, not per call. Reduces cost of expected send | |
1589 | * setup. | |
1590 | */ | |
1591 | ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + | |
1592 | dd->ipath_rcvtidcnt * sizeof(struct page **), | |
1593 | GFP_KERNEL); | |
9929b0fb | 1594 | if (!pd || !ptmp) { |
7f510b46 BS |
1595 | ipath_dev_err(dd, "Unable to allocate portdata " |
1596 | "memory, failing open\n"); | |
1597 | ret = -ENOMEM; | |
9929b0fb | 1598 | kfree(pd); |
7f510b46 BS |
1599 | kfree(ptmp); |
1600 | goto bail; | |
1601 | } | |
9929b0fb | 1602 | dd->ipath_pd[port] = pd; |
7f510b46 BS |
1603 | dd->ipath_pd[port]->port_port = port; |
1604 | dd->ipath_pd[port]->port_dd = dd; | |
1605 | dd->ipath_pd[port]->port_tid_pg_list = ptmp; | |
1606 | init_waitqueue_head(&dd->ipath_pd[port]->port_wait); | |
1607 | } | |
9929b0fb BS |
1608 | if (!pd->port_cnt) { |
1609 | pd->userversion = uinfo->spu_userversion; | |
1610 | init_user_egr_sizes(pd); | |
1611 | if ((ret = init_subports(dd, pd, uinfo)) != 0) | |
1612 | goto bail; | |
7f510b46 BS |
1613 | ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", |
1614 | current->comm, current->pid, dd->ipath_unit, | |
1615 | port); | |
9929b0fb BS |
1616 | pd->port_cnt = 1; |
1617 | port_fp(fp) = pd; | |
40d97692 | 1618 | pd->port_pid = get_pid(task_pid(current)); |
9929b0fb | 1619 | strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); |
7f510b46 BS |
1620 | ipath_stats.sps_ports++; |
1621 | ret = 0; | |
9929b0fb BS |
1622 | } else |
1623 | ret = -EBUSY; | |
7f510b46 BS |
1624 | |
1625 | bail: | |
1626 | return ret; | |
1627 | } | |
1628 | ||
1629 | static inline int usable(struct ipath_devdata *dd) | |
1630 | { | |
1631 | return dd && | |
1632 | (dd->ipath_flags & IPATH_PRESENT) && | |
1633 | dd->ipath_kregbase && | |
1634 | dd->ipath_lid && | |
1635 | !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED | |
1636 | | IPATH_LINKUNK)); | |
1637 | } | |
1638 | ||
9929b0fb BS |
1639 | static int find_free_port(int unit, struct file *fp, |
1640 | const struct ipath_user_info *uinfo) | |
7f510b46 BS |
1641 | { |
1642 | struct ipath_devdata *dd = ipath_lookup(unit); | |
1643 | int ret, i; | |
1644 | ||
1645 | if (!dd) { | |
1646 | ret = -ENODEV; | |
1647 | goto bail; | |
1648 | } | |
1649 | ||
1650 | if (!usable(dd)) { | |
1651 | ret = -ENETDOWN; | |
1652 | goto bail; | |
1653 | } | |
1654 | ||
9929b0fb BS |
1655 | for (i = 1; i < dd->ipath_cfgports; i++) { |
1656 | ret = try_alloc_port(dd, i, fp, uinfo); | |
7f510b46 BS |
1657 | if (ret != -EBUSY) |
1658 | goto bail; | |
1659 | } | |
1660 | ret = -EBUSY; | |
1661 | ||
1662 | bail: | |
1663 | return ret; | |
1664 | } | |
1665 | ||
9929b0fb BS |
1666 | static int find_best_unit(struct file *fp, |
1667 | const struct ipath_user_info *uinfo) | |
7f510b46 BS |
1668 | { |
1669 | int ret = 0, i, prefunit = -1, devmax; | |
1670 | int maxofallports, npresent, nup; | |
1671 | int ndev; | |
1672 | ||
9929b0fb | 1673 | devmax = ipath_count_units(&npresent, &nup, &maxofallports); |
7f510b46 BS |
1674 | |
1675 | /* | |
1676 | * This code is present to allow a knowledgeable person to | |
1677 | * specify the layout of processes to processors before opening | |
1678 | * this driver, and then we'll assign the process to the "closest" | |
525d0ca1 | 1679 | * InfiniPath chip to that processor (we assume reasonable connectivity, |
7f510b46 BS |
1680 | * for now). This code assumes that if affinity has been set |
1681 | * before this point, that at most one cpu is set; for now this | |
1682 | * is reasonable. I check for both cpus_empty() and cpus_full(), | |
1683 | * in case some kernel variant sets none of the bits when no | |
1684 | * affinity is set. 2.6.11 and 12 kernels have all present | |
1685 | * cpus set. Some day we'll have to fix it up further to handle | |
525d0ca1 | 1686 | * a cpu subset. This algorithm fails for two HT chips connected |
7f510b46 BS |
1687 | * in tunnel fashion. Eventually this needs real topology |
1688 | * information. There may be some issues with dual core numbering | |
1689 | * as well. This needs more work prior to release. | |
1690 | */ | |
1691 | if (!cpus_empty(current->cpus_allowed) && | |
1692 | !cpus_full(current->cpus_allowed)) { | |
f0810daf | 1693 | int ncpus = num_online_cpus(), curcpu = -1, nset = 0; |
7f510b46 BS |
1694 | for (i = 0; i < ncpus; i++) |
1695 | if (cpu_isset(i, current->cpus_allowed)) { | |
1696 | ipath_cdbg(PROC, "%s[%u] affinity set for " | |
f0810daf BS |
1697 | "cpu %d/%d\n", current->comm, |
1698 | current->pid, i, ncpus); | |
7f510b46 | 1699 | curcpu = i; |
f0810daf | 1700 | nset++; |
7f510b46 | 1701 | } |
f0810daf | 1702 | if (curcpu != -1 && nset != ncpus) { |
7f510b46 BS |
1703 | if (npresent) { |
1704 | prefunit = curcpu / (ncpus / npresent); | |
c7e29ff1 | 1705 | ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " |
7f510b46 BS |
1706 | "%d cpus/chip, select unit %d\n", |
1707 | current->comm, current->pid, | |
1708 | npresent, ncpus, ncpus / npresent, | |
1709 | prefunit); | |
1710 | } | |
1711 | } | |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | * user ports start at 1, kernel port is 0 | |
1716 | * For now, we do round-robin access across all chips | |
1717 | */ | |
1718 | ||
1719 | if (prefunit != -1) | |
1720 | devmax = prefunit + 1; | |
7f510b46 BS |
1721 | recheck: |
1722 | for (i = 1; i < maxofallports; i++) { | |
1723 | for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; | |
1724 | ndev++) { | |
1725 | struct ipath_devdata *dd = ipath_lookup(ndev); | |
1726 | ||
1727 | if (!usable(dd)) | |
1728 | continue; /* can't use this unit */ | |
1729 | if (i >= dd->ipath_cfgports) | |
1730 | /* | |
1731 | * Maxed out on users of this unit. Try | |
1732 | * next. | |
1733 | */ | |
1734 | continue; | |
9929b0fb | 1735 | ret = try_alloc_port(dd, i, fp, uinfo); |
7f510b46 BS |
1736 | if (!ret) |
1737 | goto done; | |
1738 | } | |
1739 | } | |
1740 | ||
1741 | if (npresent) { | |
1742 | if (nup == 0) { | |
1743 | ret = -ENETDOWN; | |
1744 | ipath_dbg("No ports available (none initialized " | |
1745 | "and ready)\n"); | |
1746 | } else { | |
1747 | if (prefunit > 0) { | |
1748 | /* if started above 0, retry from 0 */ | |
1749 | ipath_cdbg(PROC, | |
1750 | "%s[%u] no ports on prefunit " | |
1751 | "%d, clear and re-check\n", | |
1752 | current->comm, current->pid, | |
1753 | prefunit); | |
1754 | devmax = ipath_count_units(NULL, NULL, | |
1755 | NULL); | |
1756 | prefunit = -1; | |
1757 | goto recheck; | |
1758 | } | |
1759 | ret = -EBUSY; | |
1760 | ipath_dbg("No ports available\n"); | |
1761 | } | |
1762 | } else { | |
1763 | ret = -ENXIO; | |
1764 | ipath_dbg("No boards found\n"); | |
1765 | } | |
1766 | ||
1767 | done: | |
1768 | return ret; | |
1769 | } | |
1770 | ||
9929b0fb BS |
1771 | static int find_shared_port(struct file *fp, |
1772 | const struct ipath_user_info *uinfo) | |
1773 | { | |
1774 | int devmax, ndev, i; | |
1775 | int ret = 0; | |
1776 | ||
1777 | devmax = ipath_count_units(NULL, NULL, NULL); | |
1778 | ||
1779 | for (ndev = 0; ndev < devmax; ndev++) { | |
1780 | struct ipath_devdata *dd = ipath_lookup(ndev); | |
1781 | ||
5d1ce03d | 1782 | if (!usable(dd)) |
9929b0fb BS |
1783 | continue; |
1784 | for (i = 1; i < dd->ipath_cfgports; i++) { | |
1785 | struct ipath_portdata *pd = dd->ipath_pd[i]; | |
1786 | ||
1787 | /* Skip ports which are not yet open */ | |
1788 | if (!pd || !pd->port_cnt) | |
1789 | continue; | |
1790 | /* Skip port if it doesn't match the requested one */ | |
1791 | if (pd->port_subport_id != uinfo->spu_subport_id) | |
1792 | continue; | |
1793 | /* Verify the sharing process matches the master */ | |
1794 | if (pd->port_subport_cnt != uinfo->spu_subport_cnt || | |
1795 | pd->userversion != uinfo->spu_userversion || | |
1796 | pd->port_cnt >= pd->port_subport_cnt) { | |
1797 | ret = -EINVAL; | |
1798 | goto done; | |
1799 | } | |
1800 | port_fp(fp) = pd; | |
1801 | subport_fp(fp) = pd->port_cnt++; | |
40d97692 PE |
1802 | pd->port_subpid[subport_fp(fp)] = |
1803 | get_pid(task_pid(current)); | |
9929b0fb BS |
1804 | tidcursor_fp(fp) = 0; |
1805 | pd->active_slaves |= 1 << subport_fp(fp); | |
1806 | ipath_cdbg(PROC, | |
1807 | "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", | |
1808 | current->comm, current->pid, | |
1809 | subport_fp(fp), | |
40d97692 | 1810 | pd->port_comm, pid_nr(pd->port_pid), |
9929b0fb BS |
1811 | dd->ipath_unit, pd->port_port); |
1812 | ret = 1; | |
1813 | goto done; | |
1814 | } | |
1815 | } | |
1816 | ||
1817 | done: | |
1818 | return ret; | |
1819 | } | |
1820 | ||
7f510b46 BS |
1821 | static int ipath_open(struct inode *in, struct file *fp) |
1822 | { | |
c97d27d8 | 1823 | /* The real work is performed later in ipath_assign_port() */ |
f2b9857e | 1824 | cycle_kernel_lock(); |
9929b0fb BS |
1825 | fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); |
1826 | return fp->private_data ? 0 : -ENOMEM; | |
1827 | } | |
1828 | ||
c97d27d8 BS |
1829 | /* Get port early, so can set affinity prior to memory allocation */ |
1830 | static int ipath_assign_port(struct file *fp, | |
9929b0fb BS |
1831 | const struct ipath_user_info *uinfo) |
1832 | { | |
1833 | int ret; | |
9929b0fb | 1834 | int i_minor; |
0df6291c | 1835 | unsigned swmajor, swminor; |
9929b0fb BS |
1836 | |
1837 | /* Check to be sure we haven't already initialized this file */ | |
1838 | if (port_fp(fp)) { | |
1839 | ret = -EINVAL; | |
1840 | goto done; | |
1841 | } | |
1842 | ||
1843 | /* for now, if major version is different, bail */ | |
0df6291c MD |
1844 | swmajor = uinfo->spu_userversion >> 16; |
1845 | if (swmajor != IPATH_USER_SWMAJOR) { | |
9929b0fb BS |
1846 | ipath_dbg("User major version %d not same as driver " |
1847 | "major %d\n", uinfo->spu_userversion >> 16, | |
1848 | IPATH_USER_SWMAJOR); | |
1849 | ret = -ENODEV; | |
1850 | goto done; | |
1851 | } | |
1852 | ||
1853 | swminor = uinfo->spu_userversion & 0xffff; | |
1854 | if (swminor != IPATH_USER_SWMINOR) | |
1855 | ipath_dbg("User minor version %d not same as driver " | |
1856 | "minor %d\n", swminor, IPATH_USER_SWMINOR); | |
7f510b46 BS |
1857 | |
1858 | mutex_lock(&ipath_mutex); | |
1859 | ||
0df6291c MD |
1860 | if (ipath_compatible_subports(swmajor, swminor) && |
1861 | uinfo->spu_subport_cnt && | |
9929b0fb | 1862 | (ret = find_shared_port(fp, uinfo))) { |
9929b0fb BS |
1863 | if (ret > 0) |
1864 | ret = 0; | |
124b4dcb | 1865 | goto done_chk_sdma; |
9929b0fb BS |
1866 | } |
1867 | ||
1cfd6e64 | 1868 | i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; |
7f510b46 | 1869 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", |
1cfd6e64 | 1870 | (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); |
7f510b46 | 1871 | |
9929b0fb BS |
1872 | if (i_minor) |
1873 | ret = find_free_port(i_minor - 1, fp, uinfo); | |
7f510b46 | 1874 | else |
9929b0fb | 1875 | ret = find_best_unit(fp, uinfo); |
7f510b46 | 1876 | |
124b4dcb DO |
1877 | done_chk_sdma: |
1878 | if (!ret) { | |
1879 | struct ipath_filedata *fd = fp->private_data; | |
1880 | const struct ipath_portdata *pd = fd->pd; | |
1881 | const struct ipath_devdata *dd = pd->port_dd; | |
1882 | ||
1883 | fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, | |
1884 | dd->ipath_unit, | |
1885 | pd->port_port, | |
1886 | fd->subport); | |
1887 | ||
1888 | if (!fd->pq) | |
1889 | ret = -ENOMEM; | |
1890 | } | |
1891 | ||
7f510b46 | 1892 | mutex_unlock(&ipath_mutex); |
9929b0fb | 1893 | |
c97d27d8 BS |
1894 | done: |
1895 | return ret; | |
1896 | } | |
1897 | ||
1898 | ||
1899 | static int ipath_do_user_init(struct file *fp, | |
1900 | const struct ipath_user_info *uinfo) | |
1901 | { | |
1902 | int ret; | |
947d7617 | 1903 | struct ipath_portdata *pd = port_fp(fp); |
c97d27d8 BS |
1904 | struct ipath_devdata *dd; |
1905 | u32 head32; | |
9929b0fb | 1906 | |
947d7617 RC |
1907 | /* Subports don't need to initialize anything since master did it. */ |
1908 | if (subport_fp(fp)) { | |
1909 | ret = wait_event_interruptible(pd->port_wait, | |
1910 | !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); | |
1911 | goto done; | |
1912 | } | |
1913 | ||
9929b0fb BS |
1914 | dd = pd->port_dd; |
1915 | ||
1916 | if (uinfo->spu_rcvhdrsize) { | |
1917 | ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); | |
1918 | if (ret) | |
1919 | goto done; | |
1920 | } | |
1921 | ||
1922 | /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ | |
1923 | ||
e2ab41ca DO |
1924 | /* some ports may get extra buffers, calculate that here */ |
1925 | if (pd->port_port <= dd->ipath_ports_extrabuf) | |
1926 | pd->port_piocnt = dd->ipath_pbufsport + 1; | |
1927 | else | |
1928 | pd->port_piocnt = dd->ipath_pbufsport; | |
1929 | ||
9929b0fb | 1930 | /* for right now, kernel piobufs are at end, so port 1 is at 0 */ |
e2ab41ca DO |
1931 | if (pd->port_port <= dd->ipath_ports_extrabuf) |
1932 | pd->port_pio_base = (dd->ipath_pbufsport + 1) | |
1933 | * (pd->port_port - 1); | |
1934 | else | |
1935 | pd->port_pio_base = dd->ipath_ports_extrabuf + | |
1936 | dd->ipath_pbufsport * (pd->port_port - 1); | |
9929b0fb | 1937 | pd->port_piobufs = dd->ipath_piobufbase + |
e2ab41ca DO |
1938 | pd->port_pio_base * dd->ipath_palign; |
1939 | ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," | |
1940 | " first pio %u\n", pd->port_port, pd->port_piobufs, | |
1941 | pd->port_piocnt, pd->port_pio_base); | |
1942 | ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); | |
9929b0fb BS |
1943 | |
1944 | /* | |
1945 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | |
1946 | * array for time being. If pd->port_port > chip-supported, | |
1947 | * we need to do extra stuff here to handle by handling overflow | |
1948 | * through port 0, someday | |
1949 | */ | |
1950 | ret = ipath_create_rcvhdrq(dd, pd); | |
1951 | if (!ret) | |
1952 | ret = ipath_create_user_egr(pd); | |
1953 | if (ret) | |
1954 | goto done; | |
1955 | ||
1956 | /* | |
1957 | * set the eager head register for this port to the current values | |
1958 | * of the tail pointers, since we don't know if they were | |
1959 | * updated on last use of the port. | |
1960 | */ | |
1961 | head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); | |
1962 | ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); | |
755807a2 | 1963 | pd->port_lastrcvhdrqtail = -1; |
9929b0fb BS |
1964 | ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", |
1965 | pd->port_port, head32); | |
1966 | pd->port_tidcursor = 0; /* start at beginning after open */ | |
70c51da2 AJ |
1967 | |
1968 | /* initialize poll variables... */ | |
1969 | pd->port_urgent = 0; | |
1970 | pd->port_urgent_poll = 0; | |
1971 | pd->port_hdrqfull_poll = pd->port_hdrqfull; | |
1972 | ||
9929b0fb | 1973 | /* |
9355fb6a RC |
1974 | * Now enable the port for receive. |
1975 | * For chips that are set to DMA the tail register to memory | |
1976 | * when they change (and when the update bit transitions from | |
1977 | * 0 to 1. So for those chips, we turn it off and then back on. | |
1978 | * This will (very briefly) affect any other open ports, but the | |
1979 | * duration is very short, and therefore isn't an issue. We | |
1980 | * explictly set the in-memory tail copy to 0 beforehand, so we | |
1981 | * don't have to wait to be sure the DMA update has happened | |
1982 | * (chip resets head/tail to 0 on transition to enable). | |
9929b0fb | 1983 | */ |
d8274869 | 1984 | set_bit(dd->ipath_r_portenable_shift + pd->port_port, |
9929b0fb | 1985 | &dd->ipath_rcvctrl); |
9355fb6a RC |
1986 | if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { |
1987 | if (pd->port_rcvhdrtail_kvaddr) | |
1988 | ipath_clear_rcvhdrtail(pd); | |
1989 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
d8274869 DO |
1990 | dd->ipath_rcvctrl & |
1991 | ~(1ULL << dd->ipath_r_tailupd_shift)); | |
9355fb6a | 1992 | } |
9929b0fb BS |
1993 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
1994 | dd->ipath_rcvctrl); | |
947d7617 RC |
1995 | /* Notify any waiting slaves */ |
1996 | if (pd->port_subport_cnt) { | |
1997 | clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); | |
1998 | wake_up(&pd->port_wait); | |
1999 | } | |
9929b0fb | 2000 | done: |
7f510b46 BS |
2001 | return ret; |
2002 | } | |
2003 | ||
2004 | /** | |
2005 | * unlock_exptid - unlock any expected TID entries port still had in use | |
2006 | * @pd: port | |
2007 | * | |
2008 | * We don't actually update the chip here, because we do a bulk update | |
2009 | * below, using ipath_f_clear_tids. | |
2010 | */ | |
2011 | static void unlock_expected_tids(struct ipath_portdata *pd) | |
2012 | { | |
2013 | struct ipath_devdata *dd = pd->port_dd; | |
2014 | int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; | |
2015 | int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; | |
2016 | ||
2017 | ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", | |
2018 | pd->port_port); | |
2019 | for (i = port_tidbase; i < maxtid; i++) { | |
9355fb6a RC |
2020 | struct page *ps = dd->ipath_pageshadow[i]; |
2021 | ||
2022 | if (!ps) | |
7f510b46 BS |
2023 | continue; |
2024 | ||
9355fb6a | 2025 | dd->ipath_pageshadow[i] = NULL; |
1fd3b40f BS |
2026 | pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], |
2027 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | |
9355fb6a | 2028 | ipath_release_user_pages_on_close(&ps, 1); |
7f510b46 BS |
2029 | cnt++; |
2030 | ipath_stats.sps_pageunlocks++; | |
2031 | } | |
2032 | if (cnt) | |
2033 | ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", | |
2034 | pd->port_port, cnt); | |
2035 | ||
2036 | if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) | |
2037 | ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", | |
2038 | (unsigned long long) ipath_stats.sps_pagelocks, | |
2039 | (unsigned long long) | |
2040 | ipath_stats.sps_pageunlocks); | |
2041 | } | |
2042 | ||
2043 | static int ipath_close(struct inode *in, struct file *fp) | |
2044 | { | |
2045 | int ret = 0; | |
9929b0fb | 2046 | struct ipath_filedata *fd; |
7f510b46 BS |
2047 | struct ipath_portdata *pd; |
2048 | struct ipath_devdata *dd; | |
2049 | unsigned port; | |
2050 | ||
2051 | ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", | |
2052 | (long)in->i_rdev, fp->private_data); | |
2053 | ||
2054 | mutex_lock(&ipath_mutex); | |
2055 | ||
9929b0fb | 2056 | fd = (struct ipath_filedata *) fp->private_data; |
7f510b46 | 2057 | fp->private_data = NULL; |
9929b0fb BS |
2058 | pd = fd->pd; |
2059 | if (!pd) { | |
2060 | mutex_unlock(&ipath_mutex); | |
2061 | goto bail; | |
2062 | } | |
124b4dcb DO |
2063 | |
2064 | dd = pd->port_dd; | |
2065 | ||
2066 | /* drain user sdma queue */ | |
2067 | ipath_user_sdma_queue_drain(dd, fd->pq); | |
2068 | ipath_user_sdma_queue_destroy(fd->pq); | |
2069 | ||
9929b0fb BS |
2070 | if (--pd->port_cnt) { |
2071 | /* | |
2072 | * XXX If the master closes the port before the slave(s), | |
2073 | * revoke the mmap for the eager receive queue so | |
2074 | * the slave(s) don't wait for receive data forever. | |
2075 | */ | |
2076 | pd->active_slaves &= ~(1 << fd->subport); | |
40d97692 PE |
2077 | put_pid(pd->port_subpid[fd->subport]); |
2078 | pd->port_subpid[fd->subport] = NULL; | |
9929b0fb BS |
2079 | mutex_unlock(&ipath_mutex); |
2080 | goto bail; | |
2081 | } | |
2082 | port = pd->port_port; | |
7f510b46 BS |
2083 | |
2084 | if (pd->port_hdrqfull) { | |
2085 | ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " | |
40d97692 | 2086 | "during run\n", pd->port_comm, pid_nr(pd->port_pid), |
7f510b46 BS |
2087 | pd->port_hdrqfull); |
2088 | pd->port_hdrqfull = 0; | |
2089 | } | |
2090 | ||
2091 | if (pd->port_rcvwait_to || pd->port_piowait_to | |
2092 | || pd->port_rcvnowait || pd->port_pionowait) { | |
2093 | ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " | |
2094 | "%u rcv %u, pio already\n", | |
2095 | pd->port_port, pd->port_rcvwait_to, | |
2096 | pd->port_piowait_to, pd->port_rcvnowait, | |
2097 | pd->port_pionowait); | |
2098 | pd->port_rcvwait_to = pd->port_piowait_to = | |
2099 | pd->port_rcvnowait = pd->port_pionowait = 0; | |
2100 | } | |
2101 | if (pd->port_flag) { | |
bb917144 | 2102 | ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", |
7f510b46 BS |
2103 | pd->port_port, pd->port_flag); |
2104 | pd->port_flag = 0; | |
2105 | } | |
2106 | ||
2107 | if (dd->ipath_kregbase) { | |
70c51da2 | 2108 | /* atomically clear receive enable port and intr avail. */ |
d8274869 | 2109 | clear_bit(dd->ipath_r_portenable_shift + port, |
35783ec0 | 2110 | &dd->ipath_rcvctrl); |
d8274869 | 2111 | clear_bit(pd->port_port + dd->ipath_r_intravail_shift, |
70c51da2 | 2112 | &dd->ipath_rcvctrl); |
35783ec0 BS |
2113 | ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, |
2114 | dd->ipath_rcvctrl); | |
2115 | /* and read back from chip to be sure that nothing | |
2116 | * else is in flight when we do the rest */ | |
2117 | (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
7f510b46 BS |
2118 | |
2119 | /* clean up the pkeys for this port user */ | |
2120 | ipath_clean_part_key(pd, dd); | |
35783ec0 BS |
2121 | /* |
2122 | * be paranoid, and never write 0's to these, just use an | |
2123 | * unused part of the port 0 tail page. Of course, | |
2124 | * rcvhdraddr points to a large chunk of memory, so this | |
2125 | * could still trash things, but at least it won't trash | |
2126 | * page 0, and by disabling the port, it should stop "soon", | |
2127 | * even if a packet or two is in already in flight after we | |
2128 | * disabled the port. | |
2129 | */ | |
2130 | ipath_write_kreg_port(dd, | |
2131 | dd->ipath_kregs->kr_rcvhdrtailaddr, port, | |
2132 | dd->ipath_dummy_hdrq_phys); | |
2133 | ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, | |
2134 | pd->port_port, dd->ipath_dummy_hdrq_phys); | |
2135 | ||
e2ab41ca DO |
2136 | ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); |
2137 | ipath_chg_pioavailkernel(dd, pd->port_pio_base, | |
2138 | pd->port_piocnt, 1); | |
35783ec0 | 2139 | |
1fd3b40f BS |
2140 | dd->ipath_f_clear_tids(dd, pd->port_port); |
2141 | ||
35783ec0 BS |
2142 | if (dd->ipath_pageshadow) |
2143 | unlock_expected_tids(pd); | |
2144 | ipath_stats.sps_ports--; | |
2145 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", | |
40d97692 | 2146 | pd->port_comm, pid_nr(pd->port_pid), |
35783ec0 | 2147 | dd->ipath_unit, port); |
7f510b46 BS |
2148 | } |
2149 | ||
40d97692 PE |
2150 | put_pid(pd->port_pid); |
2151 | pd->port_pid = NULL; | |
f37bda92 | 2152 | dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ |
7f510b46 | 2153 | mutex_unlock(&ipath_mutex); |
f37bda92 | 2154 | ipath_free_pddata(dd, pd); /* after releasing the mutex */ |
7f510b46 | 2155 | |
9929b0fb BS |
2156 | bail: |
2157 | kfree(fd); | |
7f510b46 BS |
2158 | return ret; |
2159 | } | |
2160 | ||
9929b0fb | 2161 | static int ipath_port_info(struct ipath_portdata *pd, u16 subport, |
7f510b46 BS |
2162 | struct ipath_port_info __user *uinfo) |
2163 | { | |
2164 | struct ipath_port_info info; | |
2165 | int nup; | |
2166 | int ret; | |
9929b0fb | 2167 | size_t sz; |
7f510b46 BS |
2168 | |
2169 | (void) ipath_count_units(NULL, &nup, NULL); | |
2170 | info.num_active = nup; | |
2171 | info.unit = pd->port_dd->ipath_unit; | |
2172 | info.port = pd->port_port; | |
9929b0fb BS |
2173 | info.subport = subport; |
2174 | /* Don't return new fields if old library opened the port. */ | |
0df6291c MD |
2175 | if (ipath_supports_subports(pd->userversion >> 16, |
2176 | pd->userversion & 0xffff)) { | |
9929b0fb BS |
2177 | /* Number of user ports available for this device. */ |
2178 | info.num_ports = pd->port_dd->ipath_cfgports - 1; | |
2179 | info.num_subports = pd->port_subport_cnt; | |
2180 | sz = sizeof(info); | |
2181 | } else | |
2182 | sz = sizeof(info) - 2 * sizeof(u16); | |
7f510b46 | 2183 | |
9929b0fb | 2184 | if (copy_to_user(uinfo, &info, sz)) { |
7f510b46 BS |
2185 | ret = -EFAULT; |
2186 | goto bail; | |
2187 | } | |
2188 | ret = 0; | |
2189 | ||
2190 | bail: | |
2191 | return ret; | |
2192 | } | |
2193 | ||
9929b0fb BS |
2194 | static int ipath_get_slave_info(struct ipath_portdata *pd, |
2195 | void __user *slave_mask_addr) | |
2196 | { | |
2197 | int ret = 0; | |
2198 | ||
2199 | if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) | |
2200 | ret = -EFAULT; | |
2201 | return ret; | |
2202 | } | |
2203 | ||
124b4dcb DO |
2204 | static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, |
2205 | u32 __user *inflightp) | |
2206 | { | |
2207 | const u32 val = ipath_user_sdma_inflight_counter(pq); | |
2208 | ||
2209 | if (put_user(val, inflightp)) | |
2210 | return -EFAULT; | |
2211 | ||
2212 | return 0; | |
2213 | } | |
2214 | ||
2215 | static int ipath_sdma_get_complete(struct ipath_devdata *dd, | |
2216 | struct ipath_user_sdma_queue *pq, | |
2217 | u32 __user *completep) | |
2218 | { | |
2219 | u32 val; | |
2220 | int err; | |
2221 | ||
2222 | err = ipath_user_sdma_make_progress(dd, pq); | |
2223 | if (err < 0) | |
2224 | return err; | |
2225 | ||
2226 | val = ipath_user_sdma_complete_counter(pq); | |
2227 | if (put_user(val, completep)) | |
2228 | return -EFAULT; | |
2229 | ||
2230 | return 0; | |
2231 | } | |
2232 | ||
7f510b46 BS |
2233 | static ssize_t ipath_write(struct file *fp, const char __user *data, |
2234 | size_t count, loff_t *off) | |
2235 | { | |
2236 | const struct ipath_cmd __user *ucmd; | |
2237 | struct ipath_portdata *pd; | |
2238 | const void __user *src; | |
2239 | size_t consumed, copy; | |
2240 | struct ipath_cmd cmd; | |
2241 | ssize_t ret = 0; | |
2242 | void *dest; | |
2243 | ||
2244 | if (count < sizeof(cmd.type)) { | |
2245 | ret = -EINVAL; | |
2246 | goto bail; | |
2247 | } | |
2248 | ||
2249 | ucmd = (const struct ipath_cmd __user *) data; | |
2250 | ||
2251 | if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { | |
2252 | ret = -EFAULT; | |
2253 | goto bail; | |
2254 | } | |
2255 | ||
2256 | consumed = sizeof(cmd.type); | |
2257 | ||
2258 | switch (cmd.type) { | |
c97d27d8 BS |
2259 | case IPATH_CMD_ASSIGN_PORT: |
2260 | case __IPATH_CMD_USER_INIT: | |
7f510b46 BS |
2261 | case IPATH_CMD_USER_INIT: |
2262 | copy = sizeof(cmd.cmd.user_info); | |
2263 | dest = &cmd.cmd.user_info; | |
2264 | src = &ucmd->cmd.user_info; | |
2265 | break; | |
2266 | case IPATH_CMD_RECV_CTRL: | |
2267 | copy = sizeof(cmd.cmd.recv_ctrl); | |
2268 | dest = &cmd.cmd.recv_ctrl; | |
2269 | src = &ucmd->cmd.recv_ctrl; | |
2270 | break; | |
2271 | case IPATH_CMD_PORT_INFO: | |
2272 | copy = sizeof(cmd.cmd.port_info); | |
2273 | dest = &cmd.cmd.port_info; | |
2274 | src = &ucmd->cmd.port_info; | |
2275 | break; | |
2276 | case IPATH_CMD_TID_UPDATE: | |
2277 | case IPATH_CMD_TID_FREE: | |
2278 | copy = sizeof(cmd.cmd.tid_info); | |
2279 | dest = &cmd.cmd.tid_info; | |
2280 | src = &ucmd->cmd.tid_info; | |
2281 | break; | |
2282 | case IPATH_CMD_SET_PART_KEY: | |
2283 | copy = sizeof(cmd.cmd.part_key); | |
2284 | dest = &cmd.cmd.part_key; | |
2285 | src = &ucmd->cmd.part_key; | |
2286 | break; | |
c7e29ff1 | 2287 | case __IPATH_CMD_SLAVE_INFO: |
9929b0fb BS |
2288 | copy = sizeof(cmd.cmd.slave_mask_addr); |
2289 | dest = &cmd.cmd.slave_mask_addr; | |
2290 | src = &ucmd->cmd.slave_mask_addr; | |
2291 | break; | |
569b87b4 AJ |
2292 | case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg |
2293 | copy = 0; | |
2294 | src = NULL; | |
2295 | dest = NULL; | |
2296 | break; | |
f2d04231 RW |
2297 | case IPATH_CMD_POLL_TYPE: |
2298 | copy = sizeof(cmd.cmd.poll_type); | |
2299 | dest = &cmd.cmd.poll_type; | |
2300 | src = &ucmd->cmd.poll_type; | |
2301 | break; | |
6ac50727 DO |
2302 | case IPATH_CMD_ARMLAUNCH_CTRL: |
2303 | copy = sizeof(cmd.cmd.armlaunch_ctrl); | |
2304 | dest = &cmd.cmd.armlaunch_ctrl; | |
2305 | src = &ucmd->cmd.armlaunch_ctrl; | |
2306 | break; | |
124b4dcb DO |
2307 | case IPATH_CMD_SDMA_INFLIGHT: |
2308 | copy = sizeof(cmd.cmd.sdma_inflight); | |
2309 | dest = &cmd.cmd.sdma_inflight; | |
2310 | src = &ucmd->cmd.sdma_inflight; | |
2311 | break; | |
2312 | case IPATH_CMD_SDMA_COMPLETE: | |
2313 | copy = sizeof(cmd.cmd.sdma_complete); | |
2314 | dest = &cmd.cmd.sdma_complete; | |
2315 | src = &ucmd->cmd.sdma_complete; | |
2316 | break; | |
7f510b46 BS |
2317 | default: |
2318 | ret = -EINVAL; | |
2319 | goto bail; | |
2320 | } | |
2321 | ||
569b87b4 AJ |
2322 | if (copy) { |
2323 | if ((count - consumed) < copy) { | |
2324 | ret = -EINVAL; | |
2325 | goto bail; | |
2326 | } | |
7f510b46 | 2327 | |
569b87b4 AJ |
2328 | if (copy_from_user(dest, src, copy)) { |
2329 | ret = -EFAULT; | |
2330 | goto bail; | |
2331 | } | |
2332 | ||
2333 | consumed += copy; | |
7f510b46 BS |
2334 | } |
2335 | ||
7f510b46 | 2336 | pd = port_fp(fp); |
c97d27d8 BS |
2337 | if (!pd && cmd.type != __IPATH_CMD_USER_INIT && |
2338 | cmd.type != IPATH_CMD_ASSIGN_PORT) { | |
9929b0fb BS |
2339 | ret = -EINVAL; |
2340 | goto bail; | |
2341 | } | |
7f510b46 BS |
2342 | |
2343 | switch (cmd.type) { | |
c97d27d8 BS |
2344 | case IPATH_CMD_ASSIGN_PORT: |
2345 | ret = ipath_assign_port(fp, &cmd.cmd.user_info); | |
2346 | if (ret) | |
2347 | goto bail; | |
2348 | break; | |
2349 | case __IPATH_CMD_USER_INIT: | |
2350 | /* backwards compatibility, get port first */ | |
2351 | ret = ipath_assign_port(fp, &cmd.cmd.user_info); | |
2352 | if (ret) | |
2353 | goto bail; | |
2354 | /* and fall through to current version. */ | |
7f510b46 | 2355 | case IPATH_CMD_USER_INIT: |
9929b0fb BS |
2356 | ret = ipath_do_user_init(fp, &cmd.cmd.user_info); |
2357 | if (ret) | |
7f510b46 BS |
2358 | goto bail; |
2359 | ret = ipath_get_base_info( | |
9929b0fb | 2360 | fp, (void __user *) (unsigned long) |
7f510b46 BS |
2361 | cmd.cmd.user_info.spu_base_info, |
2362 | cmd.cmd.user_info.spu_base_info_size); | |
2363 | break; | |
2364 | case IPATH_CMD_RECV_CTRL: | |
9929b0fb | 2365 | ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); |
7f510b46 BS |
2366 | break; |
2367 | case IPATH_CMD_PORT_INFO: | |
9929b0fb | 2368 | ret = ipath_port_info(pd, subport_fp(fp), |
7f510b46 BS |
2369 | (struct ipath_port_info __user *) |
2370 | (unsigned long) cmd.cmd.port_info); | |
2371 | break; | |
2372 | case IPATH_CMD_TID_UPDATE: | |
9929b0fb | 2373 | ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); |
7f510b46 BS |
2374 | break; |
2375 | case IPATH_CMD_TID_FREE: | |
9929b0fb | 2376 | ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); |
7f510b46 BS |
2377 | break; |
2378 | case IPATH_CMD_SET_PART_KEY: | |
2379 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); | |
2380 | break; | |
c7e29ff1 | 2381 | case __IPATH_CMD_SLAVE_INFO: |
9929b0fb BS |
2382 | ret = ipath_get_slave_info(pd, |
2383 | (void __user *) (unsigned long) | |
2384 | cmd.cmd.slave_mask_addr); | |
2385 | break; | |
569b87b4 | 2386 | case IPATH_CMD_PIOAVAILUPD: |
c4b4d16e | 2387 | ipath_force_pio_avail_update(pd->port_dd); |
569b87b4 | 2388 | break; |
f2d04231 RW |
2389 | case IPATH_CMD_POLL_TYPE: |
2390 | pd->poll_type = cmd.cmd.poll_type; | |
2391 | break; | |
6ac50727 DO |
2392 | case IPATH_CMD_ARMLAUNCH_CTRL: |
2393 | if (cmd.cmd.armlaunch_ctrl) | |
2394 | ipath_enable_armlaunch(pd->port_dd); | |
2395 | else | |
2396 | ipath_disable_armlaunch(pd->port_dd); | |
2397 | break; | |
124b4dcb DO |
2398 | case IPATH_CMD_SDMA_INFLIGHT: |
2399 | ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), | |
2400 | (u32 __user *) (unsigned long) | |
2401 | cmd.cmd.sdma_inflight); | |
2402 | break; | |
2403 | case IPATH_CMD_SDMA_COMPLETE: | |
2404 | ret = ipath_sdma_get_complete(pd->port_dd, | |
2405 | user_sdma_queue_fp(fp), | |
2406 | (u32 __user *) (unsigned long) | |
2407 | cmd.cmd.sdma_complete); | |
2408 | break; | |
7f510b46 BS |
2409 | } |
2410 | ||
2411 | if (ret >= 0) | |
2412 | ret = consumed; | |
2413 | ||
2414 | bail: | |
2415 | return ret; | |
2416 | } | |
2417 | ||
124b4dcb DO |
2418 | static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, |
2419 | unsigned long dim, loff_t off) | |
2420 | { | |
2421 | struct file *filp = iocb->ki_filp; | |
2422 | struct ipath_filedata *fp = filp->private_data; | |
2423 | struct ipath_portdata *pd = port_fp(filp); | |
2424 | struct ipath_user_sdma_queue *pq = fp->pq; | |
2425 | ||
2426 | if (!dim) | |
2427 | return -EINVAL; | |
2428 | ||
2429 | return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); | |
2430 | } | |
2431 | ||
7f510b46 BS |
2432 | static struct class *ipath_class; |
2433 | ||
2b8693c0 | 2434 | static int init_cdev(int minor, char *name, const struct file_operations *fops, |
f4e91eb4 | 2435 | struct cdev **cdevp, struct device **devp) |
7f510b46 BS |
2436 | { |
2437 | const dev_t dev = MKDEV(IPATH_MAJOR, minor); | |
2438 | struct cdev *cdev = NULL; | |
f4e91eb4 | 2439 | struct device *device = NULL; |
7f510b46 BS |
2440 | int ret; |
2441 | ||
2442 | cdev = cdev_alloc(); | |
2443 | if (!cdev) { | |
2444 | printk(KERN_ERR IPATH_DRV_NAME | |
2445 | ": Could not allocate cdev for minor %d, %s\n", | |
2446 | minor, name); | |
2447 | ret = -ENOMEM; | |
2448 | goto done; | |
2449 | } | |
2450 | ||
2451 | cdev->owner = THIS_MODULE; | |
2452 | cdev->ops = fops; | |
2453 | kobject_set_name(&cdev->kobj, name); | |
2454 | ||
2455 | ret = cdev_add(cdev, dev, 1); | |
2456 | if (ret < 0) { | |
2457 | printk(KERN_ERR IPATH_DRV_NAME | |
2458 | ": Could not add cdev for minor %d, %s (err %d)\n", | |
2459 | minor, name, -ret); | |
2460 | goto err_cdev; | |
2461 | } | |
2462 | ||
91bd418f | 2463 | device = device_create(ipath_class, NULL, dev, NULL, name); |
7f510b46 | 2464 | |
f4e91eb4 TJ |
2465 | if (IS_ERR(device)) { |
2466 | ret = PTR_ERR(device); | |
7f510b46 | 2467 | printk(KERN_ERR IPATH_DRV_NAME ": Could not create " |
f4e91eb4 | 2468 | "device for minor %d, %s (err %d)\n", |
7f510b46 BS |
2469 | minor, name, -ret); |
2470 | goto err_cdev; | |
2471 | } | |
2472 | ||
2473 | goto done; | |
2474 | ||
2475 | err_cdev: | |
2476 | cdev_del(cdev); | |
2477 | cdev = NULL; | |
2478 | ||
2479 | done: | |
2480 | if (ret >= 0) { | |
2481 | *cdevp = cdev; | |
f4e91eb4 | 2482 | *devp = device; |
7f510b46 BS |
2483 | } else { |
2484 | *cdevp = NULL; | |
f4e91eb4 | 2485 | *devp = NULL; |
7f510b46 BS |
2486 | } |
2487 | ||
2488 | return ret; | |
2489 | } | |
2490 | ||
2b8693c0 | 2491 | int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, |
f4e91eb4 | 2492 | struct cdev **cdevp, struct device **devp) |
7f510b46 | 2493 | { |
f4e91eb4 | 2494 | return init_cdev(minor, name, fops, cdevp, devp); |
7f510b46 BS |
2495 | } |
2496 | ||
2497 | static void cleanup_cdev(struct cdev **cdevp, | |
f4e91eb4 | 2498 | struct device **devp) |
7f510b46 | 2499 | { |
f4e91eb4 | 2500 | struct device *dev = *devp; |
7f510b46 | 2501 | |
f4e91eb4 TJ |
2502 | if (dev) { |
2503 | device_unregister(dev); | |
2504 | *devp = NULL; | |
7f510b46 BS |
2505 | } |
2506 | ||
2507 | if (*cdevp) { | |
2508 | cdev_del(*cdevp); | |
2509 | *cdevp = NULL; | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | void ipath_cdev_cleanup(struct cdev **cdevp, | |
f4e91eb4 | 2514 | struct device **devp) |
7f510b46 | 2515 | { |
f4e91eb4 | 2516 | cleanup_cdev(cdevp, devp); |
7f510b46 BS |
2517 | } |
2518 | ||
2519 | static struct cdev *wildcard_cdev; | |
f4e91eb4 | 2520 | static struct device *wildcard_dev; |
7f510b46 BS |
2521 | |
2522 | static const dev_t dev = MKDEV(IPATH_MAJOR, 0); | |
2523 | ||
2524 | static int user_init(void) | |
2525 | { | |
2526 | int ret; | |
2527 | ||
2528 | ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); | |
2529 | if (ret < 0) { | |
2530 | printk(KERN_ERR IPATH_DRV_NAME ": Could not register " | |
2531 | "chrdev region (err %d)\n", -ret); | |
2532 | goto done; | |
2533 | } | |
2534 | ||
2535 | ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); | |
2536 | ||
2537 | if (IS_ERR(ipath_class)) { | |
2538 | ret = PTR_ERR(ipath_class); | |
2539 | printk(KERN_ERR IPATH_DRV_NAME ": Could not create " | |
2540 | "device class (err %d)\n", -ret); | |
2541 | goto bail; | |
2542 | } | |
2543 | ||
2544 | goto done; | |
2545 | bail: | |
2546 | unregister_chrdev_region(dev, IPATH_NMINORS); | |
2547 | done: | |
2548 | return ret; | |
2549 | } | |
2550 | ||
2551 | static void user_cleanup(void) | |
2552 | { | |
2553 | if (ipath_class) { | |
2554 | class_destroy(ipath_class); | |
2555 | ipath_class = NULL; | |
2556 | } | |
2557 | ||
2558 | unregister_chrdev_region(dev, IPATH_NMINORS); | |
2559 | } | |
2560 | ||
2561 | static atomic_t user_count = ATOMIC_INIT(0); | |
2562 | static atomic_t user_setup = ATOMIC_INIT(0); | |
2563 | ||
2564 | int ipath_user_add(struct ipath_devdata *dd) | |
2565 | { | |
2566 | char name[10]; | |
2567 | int ret; | |
2568 | ||
2569 | if (atomic_inc_return(&user_count) == 1) { | |
2570 | ret = user_init(); | |
2571 | if (ret < 0) { | |
2572 | ipath_dev_err(dd, "Unable to set up user support: " | |
2573 | "error %d\n", -ret); | |
2574 | goto bail; | |
2575 | } | |
7f510b46 | 2576 | ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, |
f4e91eb4 | 2577 | &wildcard_dev); |
7f510b46 BS |
2578 | if (ret < 0) { |
2579 | ipath_dev_err(dd, "Could not create wildcard " | |
2580 | "minor: error %d\n", -ret); | |
0fd41363 | 2581 | goto bail_user; |
7f510b46 BS |
2582 | } |
2583 | ||
2584 | atomic_set(&user_setup, 1); | |
2585 | } | |
2586 | ||
2587 | snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); | |
2588 | ||
2589 | ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, | |
f4e91eb4 | 2590 | &dd->user_cdev, &dd->user_dev); |
7f510b46 BS |
2591 | if (ret < 0) |
2592 | ipath_dev_err(dd, "Could not create user minor %d, %s\n", | |
2593 | dd->ipath_unit + 1, name); | |
2594 | ||
2595 | goto bail; | |
2596 | ||
0fd41363 | 2597 | bail_user: |
7f510b46 BS |
2598 | user_cleanup(); |
2599 | bail: | |
2600 | return ret; | |
2601 | } | |
2602 | ||
a2acb2ff | 2603 | void ipath_user_remove(struct ipath_devdata *dd) |
7f510b46 | 2604 | { |
f4e91eb4 | 2605 | cleanup_cdev(&dd->user_cdev, &dd->user_dev); |
7f510b46 BS |
2606 | |
2607 | if (atomic_dec_return(&user_count) == 0) { | |
2608 | if (atomic_read(&user_setup) == 0) | |
2609 | goto bail; | |
2610 | ||
f4e91eb4 | 2611 | cleanup_cdev(&wildcard_cdev, &wildcard_dev); |
7f510b46 BS |
2612 | user_cleanup(); |
2613 | ||
2614 | atomic_set(&user_setup, 0); | |
2615 | } | |
2616 | bail: | |
2617 | return; | |
2618 | } |