2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/poll.h>
36 #include <linux/cdev.h>
37 #include <linux/swap.h>
38 #include <linux/export.h>
39 #include <linux/vmalloc.h>
40 #include <linux/slab.h>
41 #include <linux/highmem.h>
43 #include <linux/jiffies.h>
44 #include <linux/cpu.h>
45 #include <linux/uio.h>
46 #include <asm/pgtable.h>
48 #include "ipath_kernel.h"
49 #include "ipath_common.h"
50 #include "ipath_user_sdma.h"
52 static int ipath_open(struct inode
*, struct file
*);
53 static int ipath_close(struct inode
*, struct file
*);
54 static ssize_t
ipath_write(struct file
*, const char __user
*, size_t,
56 static ssize_t
ipath_write_iter(struct kiocb
*, struct iov_iter
*from
);
57 static unsigned int ipath_poll(struct file
*, struct poll_table_struct
*);
58 static int ipath_mmap(struct file
*, struct vm_area_struct
*);
61 * This is really, really weird shit - write() and writev() here
62 * have completely unrelated semantics. Sucky userland ABI,
65 static const struct file_operations ipath_file_ops
= {
68 .write_iter
= ipath_write_iter
,
70 .release
= ipath_close
,
73 .llseek
= noop_llseek
,
77 * Convert kernel virtual addresses to physical addresses so they don't
78 * potentially conflict with the chip addresses used as mmap offsets.
79 * It doesn't really matter what mmap offset we use as long as we can
80 * interpret it correctly.
82 static u64
cvt_kvaddr(void *p
)
87 page
= vmalloc_to_page(p
);
89 paddr
= page_to_pfn(page
) << PAGE_SHIFT
;
94 static int ipath_get_base_info(struct file
*fp
,
95 void __user
*ubase
, size_t ubase_size
)
97 struct ipath_portdata
*pd
= port_fp(fp
);
99 struct ipath_base_info
*kinfo
= NULL
;
100 struct ipath_devdata
*dd
= pd
->port_dd
;
101 unsigned subport_cnt
;
105 subport_cnt
= pd
->port_subport_cnt
;
112 master
= !subport_fp(fp
);
116 /* If port sharing is not requested, allow the old size structure */
118 sz
-= 7 * sizeof(u64
);
119 if (ubase_size
< sz
) {
121 "Base size %zu, need %zu (version mismatch?)\n",
127 kinfo
= kzalloc(sizeof(*kinfo
), GFP_KERNEL
);
133 ret
= dd
->ipath_f_get_base_info(pd
, kinfo
);
137 kinfo
->spi_rcvhdr_cnt
= dd
->ipath_rcvhdrcnt
;
138 kinfo
->spi_rcvhdrent_size
= dd
->ipath_rcvhdrentsize
;
139 kinfo
->spi_tidegrcnt
= dd
->ipath_rcvegrcnt
;
140 kinfo
->spi_rcv_egrbufsize
= dd
->ipath_rcvegrbufsize
;
142 * have to mmap whole thing
144 kinfo
->spi_rcv_egrbuftotlen
=
145 pd
->port_rcvegrbuf_chunks
* pd
->port_rcvegrbuf_size
;
146 kinfo
->spi_rcv_egrperchunk
= pd
->port_rcvegrbufs_perchunk
;
147 kinfo
->spi_rcv_egrchunksize
= kinfo
->spi_rcv_egrbuftotlen
/
148 pd
->port_rcvegrbuf_chunks
;
149 kinfo
->spi_tidcnt
= dd
->ipath_rcvtidcnt
/ subport_cnt
;
151 kinfo
->spi_tidcnt
+= dd
->ipath_rcvtidcnt
% subport_cnt
;
153 * for this use, may be ipath_cfgports summed over all chips that
154 * are are configured and present
156 kinfo
->spi_nports
= dd
->ipath_cfgports
;
157 /* unit (chip/board) our port is on */
158 kinfo
->spi_unit
= dd
->ipath_unit
;
159 /* for now, only a single page */
160 kinfo
->spi_tid_maxsize
= PAGE_SIZE
;
163 * Doing this per port, and based on the skip value, etc. This has
164 * to be the actual buffer size, since the protocol code treats it
167 * These have to be set to user addresses in the user code via mmap.
168 * These values are used on return to user code for the mmap target
169 * addresses only. For 32 bit, same 44 bit address problem, so use
170 * the physical address, not virtual. Before 2.6.11, using the
171 * page_address() macro worked, but in 2.6.11, even that returns the
172 * full 64 bit address (upper bits all 1's). So far, using the
173 * physical addresses (or chip offsets, for chip mapping) works, but
174 * no doubt some future kernel release will change that, and we'll be
175 * on to yet another method of dealing with this.
177 kinfo
->spi_rcvhdr_base
= (u64
) pd
->port_rcvhdrq_phys
;
178 kinfo
->spi_rcvhdr_tailaddr
= (u64
) pd
->port_rcvhdrqtailaddr_phys
;
179 kinfo
->spi_rcv_egrbufs
= (u64
) pd
->port_rcvegr_phys
;
180 kinfo
->spi_pioavailaddr
= (u64
) dd
->ipath_pioavailregs_phys
;
181 kinfo
->spi_status
= (u64
) kinfo
->spi_pioavailaddr
+
182 (void *) dd
->ipath_statusp
-
183 (void *) dd
->ipath_pioavailregs_dma
;
185 kinfo
->spi_piocnt
= pd
->port_piocnt
;
186 kinfo
->spi_piobufbase
= (u64
) pd
->port_piobufs
;
187 kinfo
->__spi_uregbase
= (u64
) dd
->ipath_uregbase
+
188 dd
->ipath_ureg_align
* pd
->port_port
;
190 kinfo
->spi_piocnt
= (pd
->port_piocnt
/ subport_cnt
) +
191 (pd
->port_piocnt
% subport_cnt
);
192 /* Master's PIO buffers are after all the slave's */
193 kinfo
->spi_piobufbase
= (u64
) pd
->port_piobufs
+
195 (pd
->port_piocnt
- kinfo
->spi_piocnt
);
197 unsigned slave
= subport_fp(fp
) - 1;
199 kinfo
->spi_piocnt
= pd
->port_piocnt
/ subport_cnt
;
200 kinfo
->spi_piobufbase
= (u64
) pd
->port_piobufs
+
201 dd
->ipath_palign
* kinfo
->spi_piocnt
* slave
;
205 kinfo
->spi_port_uregbase
= (u64
) dd
->ipath_uregbase
+
206 dd
->ipath_ureg_align
* pd
->port_port
;
207 kinfo
->spi_port_rcvegrbuf
= kinfo
->spi_rcv_egrbufs
;
208 kinfo
->spi_port_rcvhdr_base
= kinfo
->spi_rcvhdr_base
;
209 kinfo
->spi_port_rcvhdr_tailaddr
= kinfo
->spi_rcvhdr_tailaddr
;
211 kinfo
->__spi_uregbase
= cvt_kvaddr(pd
->subport_uregbase
+
212 PAGE_SIZE
* subport_fp(fp
));
214 kinfo
->spi_rcvhdr_base
= cvt_kvaddr(pd
->subport_rcvhdr_base
+
215 pd
->port_rcvhdrq_size
* subport_fp(fp
));
216 kinfo
->spi_rcvhdr_tailaddr
= 0;
217 kinfo
->spi_rcv_egrbufs
= cvt_kvaddr(pd
->subport_rcvegrbuf
+
218 pd
->port_rcvegrbuf_chunks
* pd
->port_rcvegrbuf_size
*
221 kinfo
->spi_subport_uregbase
=
222 cvt_kvaddr(pd
->subport_uregbase
);
223 kinfo
->spi_subport_rcvegrbuf
=
224 cvt_kvaddr(pd
->subport_rcvegrbuf
);
225 kinfo
->spi_subport_rcvhdr_base
=
226 cvt_kvaddr(pd
->subport_rcvhdr_base
);
227 ipath_cdbg(PROC
, "port %u flags %x %llx %llx %llx\n",
228 kinfo
->spi_port
, kinfo
->spi_runtime_flags
,
229 (unsigned long long) kinfo
->spi_subport_uregbase
,
230 (unsigned long long) kinfo
->spi_subport_rcvegrbuf
,
231 (unsigned long long) kinfo
->spi_subport_rcvhdr_base
);
235 * All user buffers are 2KB buffers. If we ever support
236 * giving 4KB buffers to user processes, this will need some
239 kinfo
->spi_pioindex
= (kinfo
->spi_piobufbase
-
240 (dd
->ipath_piobufbase
& 0xffffffff)) / dd
->ipath_palign
;
241 kinfo
->spi_pioalign
= dd
->ipath_palign
;
243 kinfo
->spi_qpair
= IPATH_KD_QP
;
245 * user mode PIO buffers are always 2KB, even when 4KB can
246 * be received, and sent via the kernel; this is ibmaxlen
249 kinfo
->spi_piosize
= dd
->ipath_piosize2k
- 2 * sizeof(u32
);
250 kinfo
->spi_mtu
= dd
->ipath_ibmaxlen
; /* maxlen, not ibmtu */
251 kinfo
->spi_port
= pd
->port_port
;
252 kinfo
->spi_subport
= subport_fp(fp
);
253 kinfo
->spi_sw_version
= IPATH_KERN_SWVERSION
;
254 kinfo
->spi_hw_version
= dd
->ipath_revision
;
257 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_MASTER
;
260 sz
= (ubase_size
< sizeof(*kinfo
)) ? ubase_size
: sizeof(*kinfo
);
261 if (copy_to_user(ubase
, kinfo
, sz
))
270 * ipath_tid_update - update a port TID
272 * @fp: the ipath device file
273 * @ti: the TID information
275 * The new implementation as of Oct 2004 is that the driver assigns
276 * the tid and returns it to the caller. To make it easier to
277 * catch bugs, and to reduce search time, we keep a cursor for
278 * each port, walking the shadow tid array to find one that's not
281 * For now, if we can't allocate the full list, we fail, although
282 * in the long run, we'll allocate as many as we can, and the
283 * caller will deal with that by trying the remaining pages later.
284 * That means that when we fail, we have to mark the tids as not in
285 * use again, in our shadow copy.
287 * It's up to the caller to free the tids when they are done.
288 * We'll unlock the pages as they free them.
290 * Also, right now we are locking one page at a time, but since
291 * the intended use of this routine is for a single group of
292 * virtually contiguous pages, that should change to improve
295 static int ipath_tid_update(struct ipath_portdata
*pd
, struct file
*fp
,
296 const struct ipath_tid_info
*ti
)
299 u32 tid
, porttid
, cnt
, i
, tidcnt
, tidoff
;
301 struct ipath_devdata
*dd
= pd
->port_dd
;
304 u64 __iomem
*tidbase
;
305 unsigned long tidmap
[8];
306 struct page
**pagep
= NULL
;
307 unsigned subport
= subport_fp(fp
);
309 if (!dd
->ipath_pageshadow
) {
316 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
317 (unsigned long long) ti
->tidlist
);
319 * Should we treat as success? likely a bug
324 porttid
= pd
->port_port
* dd
->ipath_rcvtidcnt
;
325 if (!pd
->port_subport_cnt
) {
326 tidcnt
= dd
->ipath_rcvtidcnt
;
327 tid
= pd
->port_tidcursor
;
329 } else if (!subport
) {
330 tidcnt
= (dd
->ipath_rcvtidcnt
/ pd
->port_subport_cnt
) +
331 (dd
->ipath_rcvtidcnt
% pd
->port_subport_cnt
);
332 tidoff
= dd
->ipath_rcvtidcnt
- tidcnt
;
334 tid
= tidcursor_fp(fp
);
336 tidcnt
= dd
->ipath_rcvtidcnt
/ pd
->port_subport_cnt
;
337 tidoff
= tidcnt
* (subport
- 1);
339 tid
= tidcursor_fp(fp
);
342 /* make sure it all fits in port_tid_pg_list */
343 dev_info(&dd
->pcidev
->dev
, "Process tried to allocate %u "
344 "TIDs, only trying max (%u)\n", cnt
, tidcnt
);
347 pagep
= &((struct page
**) pd
->port_tid_pg_list
)[tidoff
];
348 tidlist
= &((u16
*) &pagep
[dd
->ipath_rcvtidcnt
])[tidoff
];
350 memset(tidmap
, 0, sizeof(tidmap
));
351 /* before decrement; chip actual # */
353 tidbase
= (u64 __iomem
*) (((char __iomem
*) dd
->ipath_kregbase
) +
354 dd
->ipath_rcvtidbase
+
355 porttid
* sizeof(*tidbase
));
357 ipath_cdbg(VERBOSE
, "Port%u %u tids, cursor %u, tidbase %p\n",
358 pd
->port_port
, cnt
, tid
, tidbase
);
360 /* virtual address of first page in transfer */
361 vaddr
= ti
->tidvaddr
;
362 if (!access_ok(VERIFY_WRITE
, (void __user
*) vaddr
,
364 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
369 ret
= ipath_get_user_pages(vaddr
, cnt
, pagep
);
372 ipath_dbg("Failed to lock addr %p, %u pages "
373 "(already locked)\n",
374 (void *) vaddr
, cnt
);
376 * for now, continue, and see what happens but with
377 * the new implementation, this should never happen,
378 * unless perhaps the user has mpin'ed the pages
379 * themselves (something we need to test)
383 dev_info(&dd
->pcidev
->dev
,
384 "Failed to lock addr %p, %u pages: "
385 "errno %d\n", (void *) vaddr
, cnt
, -ret
);
389 for (i
= 0; i
< cnt
; i
++, vaddr
+= PAGE_SIZE
) {
390 for (; ntids
--; tid
++) {
393 if (!dd
->ipath_pageshadow
[porttid
+ tid
])
398 * oops, wrapped all the way through their TIDs,
399 * and didn't have enough free; see comments at
402 ipath_dbg("Not enough free TIDs for %u pages "
403 "(index %d), failing\n", cnt
, i
);
404 i
--; /* last tidlist[i] not filled in */
408 tidlist
[i
] = tid
+ tidoff
;
409 ipath_cdbg(VERBOSE
, "Updating idx %u to TID %u, "
410 "vaddr %lx\n", i
, tid
+ tidoff
, vaddr
);
411 /* we "know" system pages and TID pages are same size */
412 dd
->ipath_pageshadow
[porttid
+ tid
] = pagep
[i
];
413 dd
->ipath_physshadow
[porttid
+ tid
] = ipath_map_page(
414 dd
->pcidev
, pagep
[i
], 0, PAGE_SIZE
,
417 * don't need atomic or it's overhead
419 __set_bit(tid
, tidmap
);
420 physaddr
= dd
->ipath_physshadow
[porttid
+ tid
];
421 ipath_stats
.sps_pagelocks
++;
423 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
424 tid
, vaddr
, (unsigned long long) physaddr
,
426 dd
->ipath_f_put_tid(dd
, &tidbase
[tid
], RCVHQ_RCV_TYPE_EXPECTED
,
429 * don't check this tid in ipath_portshadow, since we
430 * just filled it in; start with the next one.
438 /* jump here if copy out of updated info failed... */
439 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
441 /* same code that's in ipath_free_tid() */
442 limit
= sizeof(tidmap
) * BITS_PER_BYTE
;
444 /* just in case size changes in future */
446 tid
= find_first_bit((const unsigned long *)tidmap
, limit
);
447 for (; tid
< limit
; tid
++) {
448 if (!test_bit(tid
, tidmap
))
450 if (dd
->ipath_pageshadow
[porttid
+ tid
]) {
451 ipath_cdbg(VERBOSE
, "Freeing TID %u\n",
453 dd
->ipath_f_put_tid(dd
, &tidbase
[tid
],
454 RCVHQ_RCV_TYPE_EXPECTED
,
455 dd
->ipath_tidinvalid
);
456 pci_unmap_page(dd
->pcidev
,
457 dd
->ipath_physshadow
[porttid
+ tid
],
458 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
459 dd
->ipath_pageshadow
[porttid
+ tid
] = NULL
;
460 ipath_stats
.sps_pageunlocks
++;
463 ipath_release_user_pages(pagep
, cnt
);
466 * Copy the updated array, with ipath_tid's filled in, back
467 * to user. Since we did the copy in already, this "should
468 * never fail" If it does, we have to clean up...
470 if (copy_to_user((void __user
*)
471 (unsigned long) ti
->tidlist
,
472 tidlist
, cnt
* sizeof(*tidlist
))) {
476 if (copy_to_user((void __user
*) (unsigned long) ti
->tidmap
,
477 tidmap
, sizeof tidmap
)) {
483 if (!pd
->port_subport_cnt
)
484 pd
->port_tidcursor
= tid
;
486 tidcursor_fp(fp
) = tid
;
491 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
497 * ipath_tid_free - free a port TID
499 * @subport: the subport
502 * right now we are unlocking one page at a time, but since
503 * the intended use of this routine is for a single group of
504 * virtually contiguous pages, that should change to improve
505 * performance. We check that the TID is in range for this port
506 * but otherwise don't check validity; if user has an error and
507 * frees the wrong tid, it's only their own data that can thereby
508 * be corrupted. We do check that the TID was in use, for sanity
509 * We always use our idea of the saved address, not the address that
510 * they pass in to us.
513 static int ipath_tid_free(struct ipath_portdata
*pd
, unsigned subport
,
514 const struct ipath_tid_info
*ti
)
517 u32 tid
, porttid
, cnt
, limit
, tidcnt
;
518 struct ipath_devdata
*dd
= pd
->port_dd
;
519 u64 __iomem
*tidbase
;
520 unsigned long tidmap
[8];
522 if (!dd
->ipath_pageshadow
) {
527 if (copy_from_user(tidmap
, (void __user
*)(unsigned long)ti
->tidmap
,
533 porttid
= pd
->port_port
* dd
->ipath_rcvtidcnt
;
534 if (!pd
->port_subport_cnt
)
535 tidcnt
= dd
->ipath_rcvtidcnt
;
537 tidcnt
= (dd
->ipath_rcvtidcnt
/ pd
->port_subport_cnt
) +
538 (dd
->ipath_rcvtidcnt
% pd
->port_subport_cnt
);
539 porttid
+= dd
->ipath_rcvtidcnt
- tidcnt
;
541 tidcnt
= dd
->ipath_rcvtidcnt
/ pd
->port_subport_cnt
;
542 porttid
+= tidcnt
* (subport
- 1);
544 tidbase
= (u64 __iomem
*) ((char __iomem
*)(dd
->ipath_kregbase
) +
545 dd
->ipath_rcvtidbase
+
546 porttid
* sizeof(*tidbase
));
548 limit
= sizeof(tidmap
) * BITS_PER_BYTE
;
550 /* just in case size changes in future */
552 tid
= find_first_bit(tidmap
, limit
);
553 ipath_cdbg(VERBOSE
, "Port%u free %u tids; first bit (max=%d) "
554 "set is %d, porttid %u\n", pd
->port_port
, ti
->tidcnt
,
555 limit
, tid
, porttid
);
556 for (cnt
= 0; tid
< limit
; tid
++) {
558 * small optimization; if we detect a run of 3 or so without
559 * any set, use find_first_bit again. That's mainly to
560 * accelerate the case where we wrapped, so we have some at
561 * the beginning, and some at the end, and a big gap
564 if (!test_bit(tid
, tidmap
))
567 if (dd
->ipath_pageshadow
[porttid
+ tid
]) {
569 p
= dd
->ipath_pageshadow
[porttid
+ tid
];
570 dd
->ipath_pageshadow
[porttid
+ tid
] = NULL
;
571 ipath_cdbg(VERBOSE
, "PID %u freeing TID %u\n",
572 pid_nr(pd
->port_pid
), tid
);
573 dd
->ipath_f_put_tid(dd
, &tidbase
[tid
],
574 RCVHQ_RCV_TYPE_EXPECTED
,
575 dd
->ipath_tidinvalid
);
576 pci_unmap_page(dd
->pcidev
,
577 dd
->ipath_physshadow
[porttid
+ tid
],
578 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
579 ipath_release_user_pages(&p
, 1);
580 ipath_stats
.sps_pageunlocks
++;
582 ipath_dbg("Unused tid %u, ignoring\n", tid
);
584 if (cnt
!= ti
->tidcnt
)
585 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
589 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
595 * ipath_set_part_key - set a partition key
599 * We can have up to 4 active at a time (other than the default, which is
600 * always allowed). This is somewhat tricky, since multiple ports may set
601 * the same key, so we reference count them, and clean up at exit. All 4
602 * partition keys are packed into a single infinipath register. It's an
603 * error for a process to set the same pkey multiple times. We provide no
604 * mechanism to de-allocate a pkey at this time, we may eventually need to
605 * do that. I've used the atomic operations, and no locking, and only make
606 * a single pass through what's available. This should be more than
607 * adequate for some time. I'll think about spinlocks or the like if and as
610 static int ipath_set_part_key(struct ipath_portdata
*pd
, u16 key
)
612 struct ipath_devdata
*dd
= pd
->port_dd
;
613 int i
, any
= 0, pidx
= -1;
614 u16 lkey
= key
& 0x7FFF;
617 if (lkey
== (IPATH_DEFAULT_P_KEY
& 0x7FFF)) {
618 /* nothing to do; this key always valid */
623 ipath_cdbg(VERBOSE
, "p%u try to set pkey %hx, current keys "
624 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
625 pd
->port_port
, key
, dd
->ipath_pkeys
[0],
626 atomic_read(&dd
->ipath_pkeyrefs
[0]), dd
->ipath_pkeys
[1],
627 atomic_read(&dd
->ipath_pkeyrefs
[1]), dd
->ipath_pkeys
[2],
628 atomic_read(&dd
->ipath_pkeyrefs
[2]), dd
->ipath_pkeys
[3],
629 atomic_read(&dd
->ipath_pkeyrefs
[3]));
632 ipath_cdbg(PROC
, "p%u tries to set key 0, not allowed\n",
639 * Set the full membership bit, because it has to be
640 * set in the register or the packet, and it seems
641 * cleaner to set in the register than to force all
642 * callers to set it. (see bug 4331)
646 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
647 if (!pd
->port_pkeys
[i
] && pidx
== -1)
649 if (pd
->port_pkeys
[i
] == key
) {
650 ipath_cdbg(VERBOSE
, "p%u tries to set same pkey "
651 "(%x) more than once\n",
658 ipath_dbg("All pkeys for port %u already in use, "
659 "can't set %x\n", pd
->port_port
, key
);
663 for (any
= i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
664 if (!dd
->ipath_pkeys
[i
]) {
668 if (dd
->ipath_pkeys
[i
] == key
) {
669 atomic_t
*pkrefs
= &dd
->ipath_pkeyrefs
[i
];
671 if (atomic_inc_return(pkrefs
) > 1) {
672 pd
->port_pkeys
[pidx
] = key
;
673 ipath_cdbg(VERBOSE
, "p%u set key %x "
674 "matches #%d, count now %d\n",
675 pd
->port_port
, key
, i
,
676 atomic_read(pkrefs
));
681 * lost race, decrement count, catch below
684 ipath_cdbg(VERBOSE
, "Lost race, count was "
685 "0, after dec, it's %d\n",
686 atomic_read(pkrefs
));
690 if ((dd
->ipath_pkeys
[i
] & 0x7FFF) == lkey
) {
692 * It makes no sense to have both the limited and
693 * full membership PKEY set at the same time since
694 * the unlimited one will disable the limited one.
701 ipath_dbg("port %u, all pkeys already in use, "
702 "can't set %x\n", pd
->port_port
, key
);
706 for (any
= i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
707 if (!dd
->ipath_pkeys
[i
] &&
708 atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) == 1) {
711 /* for ipathstats, etc. */
712 ipath_stats
.sps_pkeys
[i
] = lkey
;
713 pd
->port_pkeys
[pidx
] = dd
->ipath_pkeys
[i
] = key
;
715 (u64
) dd
->ipath_pkeys
[0] |
716 ((u64
) dd
->ipath_pkeys
[1] << 16) |
717 ((u64
) dd
->ipath_pkeys
[2] << 32) |
718 ((u64
) dd
->ipath_pkeys
[3] << 48);
719 ipath_cdbg(PROC
, "p%u set key %x in #%d, "
720 "portidx %d, new pkey reg %llx\n",
721 pd
->port_port
, key
, i
, pidx
,
722 (unsigned long long) pkey
);
724 dd
, dd
->ipath_kregs
->kr_partitionkey
, pkey
);
730 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
731 "can't set %x\n", pd
->port_port
, key
);
739 * ipath_manage_rcvq - manage a port's receive queue
741 * @subport: the subport
742 * @start_stop: action to carry out
744 * start_stop == 0 disables receive on the port, for use in queue
745 * overflow conditions. start_stop==1 re-enables, to be used to
746 * re-init the software copy of the head register
748 static int ipath_manage_rcvq(struct ipath_portdata
*pd
, unsigned subport
,
751 struct ipath_devdata
*dd
= pd
->port_dd
;
753 ipath_cdbg(PROC
, "%sabling rcv for unit %u port %u:%u\n",
754 start_stop
? "en" : "dis", dd
->ipath_unit
,
755 pd
->port_port
, subport
);
758 /* atomically clear receive enable port. */
761 * On enable, force in-memory copy of the tail register to
762 * 0, so that protocol code doesn't have to worry about
763 * whether or not the chip has yet updated the in-memory
764 * copy or not on return from the system call. The chip
765 * always resets it's tail register back to 0 on a
766 * transition from disabled to enabled. This could cause a
767 * problem if software was broken, and did the enable w/o
768 * the disable, but eventually the in-memory copy will be
769 * updated and correct itself, even in the face of software
772 if (pd
->port_rcvhdrtail_kvaddr
)
773 ipath_clear_rcvhdrtail(pd
);
774 set_bit(dd
->ipath_r_portenable_shift
+ pd
->port_port
,
777 clear_bit(dd
->ipath_r_portenable_shift
+ pd
->port_port
,
779 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
781 /* now be sure chip saw it before we return */
782 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
785 * And try to be sure that tail reg update has happened too.
786 * This should in theory interlock with the RXE changes to
787 * the tail register. Don't assign it to the tail register
788 * in memory copy, since we could overwrite an update by the
791 ipath_read_ureg32(dd
, ur_rcvhdrtail
, pd
->port_port
);
793 /* always; new head should be equal to new tail; see above */
798 static void ipath_clean_part_key(struct ipath_portdata
*pd
,
799 struct ipath_devdata
*dd
)
801 int i
, j
, pchanged
= 0;
804 /* for debugging only */
805 oldpkey
= (u64
) dd
->ipath_pkeys
[0] |
806 ((u64
) dd
->ipath_pkeys
[1] << 16) |
807 ((u64
) dd
->ipath_pkeys
[2] << 32) |
808 ((u64
) dd
->ipath_pkeys
[3] << 48);
810 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
811 if (!pd
->port_pkeys
[i
])
813 ipath_cdbg(VERBOSE
, "look for key[%d] %hx in pkeys\n", i
,
815 for (j
= 0; j
< ARRAY_SIZE(dd
->ipath_pkeys
); j
++) {
816 /* check for match independent of the global bit */
817 if ((dd
->ipath_pkeys
[j
] & 0x7fff) !=
818 (pd
->port_pkeys
[i
] & 0x7fff))
820 if (atomic_dec_and_test(&dd
->ipath_pkeyrefs
[j
])) {
821 ipath_cdbg(VERBOSE
, "p%u clear key "
824 pd
->port_pkeys
[i
], j
);
825 ipath_stats
.sps_pkeys
[j
] =
826 dd
->ipath_pkeys
[j
] = 0;
829 ipath_cdbg(VERBOSE
, "p%u key %x matches #%d, "
830 "but ref still %d\n", pd
->port_port
,
831 pd
->port_pkeys
[i
], j
,
832 atomic_read(&dd
->ipath_pkeyrefs
[j
]));
836 pd
->port_pkeys
[i
] = 0;
839 u64 pkey
= (u64
) dd
->ipath_pkeys
[0] |
840 ((u64
) dd
->ipath_pkeys
[1] << 16) |
841 ((u64
) dd
->ipath_pkeys
[2] << 32) |
842 ((u64
) dd
->ipath_pkeys
[3] << 48);
843 ipath_cdbg(VERBOSE
, "p%u old pkey reg %llx, "
844 "new pkey reg %llx\n", pd
->port_port
,
845 (unsigned long long) oldpkey
,
846 (unsigned long long) pkey
);
847 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_partitionkey
,
853 * Initialize the port data with the receive buffer sizes
854 * so this can be done while the master port is locked.
855 * Otherwise, there is a race with a slave opening the port
856 * and seeing these fields uninitialized.
858 static void init_user_egr_sizes(struct ipath_portdata
*pd
)
860 struct ipath_devdata
*dd
= pd
->port_dd
;
861 unsigned egrperchunk
, egrcnt
, size
;
864 * to avoid wasting a lot of memory, we allocate 32KB chunks of
865 * physically contiguous memory, advance through it until used up
866 * and then allocate more. Of course, we need memory to store those
867 * extra pointers, now. Started out with 256KB, but under heavy
868 * memory pressure (creating large files and then copying them over
869 * NFS while doing lots of MPI jobs), we hit some allocation
870 * failures, even though we can sleep... (2.6.10) Still get
871 * failures at 64K. 32K is the lowest we can go without wasting
875 egrperchunk
= size
/ dd
->ipath_rcvegrbufsize
;
876 egrcnt
= dd
->ipath_rcvegrcnt
;
877 pd
->port_rcvegrbuf_chunks
= (egrcnt
+ egrperchunk
- 1) / egrperchunk
;
878 pd
->port_rcvegrbufs_perchunk
= egrperchunk
;
879 pd
->port_rcvegrbuf_size
= size
;
883 * ipath_create_user_egr - allocate eager TID buffers
884 * @pd: the port to allocate TID buffers for
886 * This routine is now quite different for user and kernel, because
887 * the kernel uses skb's, for the accelerated network performance
888 * This is the user port version
890 * Allocate the eager TID buffers and program them into infinipath
891 * They are no longer completely contiguous, we do multiple allocation
894 static int ipath_create_user_egr(struct ipath_portdata
*pd
)
896 struct ipath_devdata
*dd
= pd
->port_dd
;
897 unsigned e
, egrcnt
, egrperchunk
, chunk
, egrsize
, egroff
;
903 * GFP_USER, but without GFP_FS, so buffer cache can be
904 * coalesced (we hope); otherwise, even at order 4,
905 * heavy filesystem activity makes these fail, and we can
906 * use compound pages.
908 gfp_flags
= __GFP_RECLAIM
| __GFP_IO
| __GFP_COMP
;
910 egrcnt
= dd
->ipath_rcvegrcnt
;
911 /* TID number offset for this port */
912 egroff
= (pd
->port_port
- 1) * egrcnt
+ dd
->ipath_p0_rcvegrcnt
;
913 egrsize
= dd
->ipath_rcvegrbufsize
;
914 ipath_cdbg(VERBOSE
, "Allocating %d egr buffers, at egrtid "
915 "offset %x, egrsize %u\n", egrcnt
, egroff
, egrsize
);
917 chunk
= pd
->port_rcvegrbuf_chunks
;
918 egrperchunk
= pd
->port_rcvegrbufs_perchunk
;
919 size
= pd
->port_rcvegrbuf_size
;
920 pd
->port_rcvegrbuf
= kmalloc_array(chunk
, sizeof(pd
->port_rcvegrbuf
[0]),
922 if (!pd
->port_rcvegrbuf
) {
926 pd
->port_rcvegrbuf_phys
=
927 kmalloc_array(chunk
, sizeof(pd
->port_rcvegrbuf_phys
[0]),
929 if (!pd
->port_rcvegrbuf_phys
) {
933 for (e
= 0; e
< pd
->port_rcvegrbuf_chunks
; e
++) {
935 pd
->port_rcvegrbuf
[e
] = dma_alloc_coherent(
936 &dd
->pcidev
->dev
, size
, &pd
->port_rcvegrbuf_phys
[e
],
939 if (!pd
->port_rcvegrbuf
[e
]) {
941 goto bail_rcvegrbuf_phys
;
945 pd
->port_rcvegr_phys
= pd
->port_rcvegrbuf_phys
[0];
947 for (e
= chunk
= 0; chunk
< pd
->port_rcvegrbuf_chunks
; chunk
++) {
948 dma_addr_t pa
= pd
->port_rcvegrbuf_phys
[chunk
];
951 for (i
= 0; e
< egrcnt
&& i
< egrperchunk
; e
++, i
++) {
952 dd
->ipath_f_put_tid(dd
, e
+ egroff
+
956 dd
->ipath_rcvegrbase
),
957 RCVHQ_RCV_TYPE_EAGER
, pa
);
960 cond_resched(); /* don't hog the cpu */
967 for (e
= 0; e
< pd
->port_rcvegrbuf_chunks
&&
968 pd
->port_rcvegrbuf
[e
]; e
++) {
969 dma_free_coherent(&dd
->pcidev
->dev
, size
,
970 pd
->port_rcvegrbuf
[e
],
971 pd
->port_rcvegrbuf_phys
[e
]);
974 kfree(pd
->port_rcvegrbuf_phys
);
975 pd
->port_rcvegrbuf_phys
= NULL
;
977 kfree(pd
->port_rcvegrbuf
);
978 pd
->port_rcvegrbuf
= NULL
;
984 /* common code for the mappings on dma_alloc_coherent mem */
985 static int ipath_mmap_mem(struct vm_area_struct
*vma
,
986 struct ipath_portdata
*pd
, unsigned len
, int write_ok
,
987 void *kvaddr
, char *what
)
989 struct ipath_devdata
*dd
= pd
->port_dd
;
993 if ((vma
->vm_end
- vma
->vm_start
) > len
) {
994 dev_info(&dd
->pcidev
->dev
,
995 "FAIL on %s: len %lx > %x\n", what
,
996 vma
->vm_end
- vma
->vm_start
, len
);
1002 if (vma
->vm_flags
& VM_WRITE
) {
1003 dev_info(&dd
->pcidev
->dev
,
1004 "%s must be mapped readonly\n", what
);
1009 /* don't allow them to later change with mprotect */
1010 vma
->vm_flags
&= ~VM_MAYWRITE
;
1013 pfn
= virt_to_phys(kvaddr
) >> PAGE_SHIFT
;
1014 ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
,
1015 len
, vma
->vm_page_prot
);
1017 dev_info(&dd
->pcidev
->dev
, "%s port%u mmap of %lx, %x "
1018 "bytes r%c failed: %d\n", what
, pd
->port_port
,
1019 pfn
, len
, write_ok
?'w':'o', ret
);
1021 ipath_cdbg(VERBOSE
, "%s port%u mmaped %lx, %x bytes "
1022 "r%c\n", what
, pd
->port_port
, pfn
, len
,
1028 static int mmap_ureg(struct vm_area_struct
*vma
, struct ipath_devdata
*dd
,
1035 * This is real hardware, so use io_remap. This is the mechanism
1036 * for the user process to update the head registers for their port
1039 if ((vma
->vm_end
- vma
->vm_start
) > PAGE_SIZE
) {
1040 dev_info(&dd
->pcidev
->dev
, "FAIL mmap userreg: reqlen "
1041 "%lx > PAGE\n", vma
->vm_end
- vma
->vm_start
);
1044 phys
= dd
->ipath_physaddr
+ ureg
;
1045 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1047 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
1048 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
1050 vma
->vm_end
- vma
->vm_start
,
1056 static int mmap_piobufs(struct vm_area_struct
*vma
,
1057 struct ipath_devdata
*dd
,
1058 struct ipath_portdata
*pd
,
1059 unsigned piobufs
, unsigned piocnt
)
1065 * When we map the PIO buffers in the chip, we want to map them as
1066 * writeonly, no read possible. This prevents access to previous
1067 * process data, and catches users who might try to read the i/o
1068 * space due to a bug.
1070 if ((vma
->vm_end
- vma
->vm_start
) > (piocnt
* dd
->ipath_palign
)) {
1071 dev_info(&dd
->pcidev
->dev
, "FAIL mmap piobufs: "
1072 "reqlen %lx > PAGE\n",
1073 vma
->vm_end
- vma
->vm_start
);
1078 phys
= dd
->ipath_physaddr
+ piobufs
;
1080 #if defined(__powerpc__)
1081 /* There isn't a generic way to specify writethrough mappings */
1082 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
1083 pgprot_val(vma
->vm_page_prot
) |= _PAGE_WRITETHRU
;
1084 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_GUARDED
;
1088 * don't allow them to later change to readable with mprotect (for when
1089 * not initially mapped readable, as is normally the case)
1091 vma
->vm_flags
&= ~VM_MAYREAD
;
1092 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
1094 ret
= io_remap_pfn_range(vma
, vma
->vm_start
, phys
>> PAGE_SHIFT
,
1095 vma
->vm_end
- vma
->vm_start
,
1101 static int mmap_rcvegrbufs(struct vm_area_struct
*vma
,
1102 struct ipath_portdata
*pd
)
1104 struct ipath_devdata
*dd
= pd
->port_dd
;
1105 unsigned long start
, size
;
1106 size_t total_size
, i
;
1110 size
= pd
->port_rcvegrbuf_size
;
1111 total_size
= pd
->port_rcvegrbuf_chunks
* size
;
1112 if ((vma
->vm_end
- vma
->vm_start
) > total_size
) {
1113 dev_info(&dd
->pcidev
->dev
, "FAIL on egr bufs: "
1114 "reqlen %lx > actual %lx\n",
1115 vma
->vm_end
- vma
->vm_start
,
1116 (unsigned long) total_size
);
1121 if (vma
->vm_flags
& VM_WRITE
) {
1122 dev_info(&dd
->pcidev
->dev
, "Can't map eager buffers as "
1123 "writable (flags=%lx)\n", vma
->vm_flags
);
1127 /* don't allow them to later change to writeable with mprotect */
1128 vma
->vm_flags
&= ~VM_MAYWRITE
;
1130 start
= vma
->vm_start
;
1132 for (i
= 0; i
< pd
->port_rcvegrbuf_chunks
; i
++, start
+= size
) {
1133 pfn
= virt_to_phys(pd
->port_rcvegrbuf
[i
]) >> PAGE_SHIFT
;
1134 ret
= remap_pfn_range(vma
, start
, pfn
, size
,
1146 * ipath_file_vma_fault - handle a VMA page fault.
1148 static int ipath_file_vma_fault(struct vm_area_struct
*vma
,
1149 struct vm_fault
*vmf
)
1153 page
= vmalloc_to_page((void *)(vmf
->pgoff
<< PAGE_SHIFT
));
1155 return VM_FAULT_SIGBUS
;
1162 static const struct vm_operations_struct ipath_file_vm_ops
= {
1163 .fault
= ipath_file_vma_fault
,
1166 static int mmap_kvaddr(struct vm_area_struct
*vma
, u64 pgaddr
,
1167 struct ipath_portdata
*pd
, unsigned subport
)
1170 struct ipath_devdata
*dd
;
1175 /* If the port is not shared, all addresses should be physical */
1176 if (!pd
->port_subport_cnt
)
1180 size
= pd
->port_rcvegrbuf_chunks
* pd
->port_rcvegrbuf_size
;
1183 * Each process has all the subport uregbase, rcvhdrq, and
1184 * rcvegrbufs mmapped - as an array for all the processes,
1185 * and also separately for this process.
1187 if (pgaddr
== cvt_kvaddr(pd
->subport_uregbase
)) {
1188 addr
= pd
->subport_uregbase
;
1189 size
= PAGE_SIZE
* pd
->port_subport_cnt
;
1190 } else if (pgaddr
== cvt_kvaddr(pd
->subport_rcvhdr_base
)) {
1191 addr
= pd
->subport_rcvhdr_base
;
1192 size
= pd
->port_rcvhdrq_size
* pd
->port_subport_cnt
;
1193 } else if (pgaddr
== cvt_kvaddr(pd
->subport_rcvegrbuf
)) {
1194 addr
= pd
->subport_rcvegrbuf
;
1195 size
*= pd
->port_subport_cnt
;
1196 } else if (pgaddr
== cvt_kvaddr(pd
->subport_uregbase
+
1197 PAGE_SIZE
* subport
)) {
1198 addr
= pd
->subport_uregbase
+ PAGE_SIZE
* subport
;
1200 } else if (pgaddr
== cvt_kvaddr(pd
->subport_rcvhdr_base
+
1201 pd
->port_rcvhdrq_size
* subport
)) {
1202 addr
= pd
->subport_rcvhdr_base
+
1203 pd
->port_rcvhdrq_size
* subport
;
1204 size
= pd
->port_rcvhdrq_size
;
1205 } else if (pgaddr
== cvt_kvaddr(pd
->subport_rcvegrbuf
+
1207 addr
= pd
->subport_rcvegrbuf
+ size
* subport
;
1208 /* rcvegrbufs are read-only on the slave */
1209 if (vma
->vm_flags
& VM_WRITE
) {
1210 dev_info(&dd
->pcidev
->dev
,
1211 "Can't map eager buffers as "
1212 "writable (flags=%lx)\n", vma
->vm_flags
);
1217 * Don't allow permission to later change to writeable
1220 vma
->vm_flags
&= ~VM_MAYWRITE
;
1224 len
= vma
->vm_end
- vma
->vm_start
;
1226 ipath_cdbg(MM
, "FAIL: reqlen %lx > %zx\n", len
, size
);
1231 vma
->vm_pgoff
= (unsigned long) addr
>> PAGE_SHIFT
;
1232 vma
->vm_ops
= &ipath_file_vm_ops
;
1233 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
1241 * ipath_mmap - mmap various structures into user space
1242 * @fp: the file pointer
1245 * We use this to have a shared buffer between the kernel and the user code
1246 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1247 * buffers in the chip. We have the open and close entries so we can bump
1248 * the ref count and keep the driver from being unloaded while still mapped.
1250 static int ipath_mmap(struct file
*fp
, struct vm_area_struct
*vma
)
1252 struct ipath_portdata
*pd
;
1253 struct ipath_devdata
*dd
;
1255 unsigned piobufs
, piocnt
;
1266 * This is the ipath_do_user_init() code, mapping the shared buffers
1267 * into the user process. The address referred to by vm_pgoff is the
1268 * file offset passed via mmap(). For shared ports, this is the
1269 * kernel vmalloc() address of the pages to share with the master.
1270 * For non-shared or master ports, this is a physical address.
1271 * We only do one mmap for each space mapped.
1273 pgaddr
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1276 * Check for 0 in case one of the allocations failed, but user
1277 * called mmap anyway.
1284 ipath_cdbg(MM
, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1285 (unsigned long long) pgaddr
, vma
->vm_start
,
1286 vma
->vm_end
- vma
->vm_start
, dd
->ipath_unit
,
1287 pd
->port_port
, subport_fp(fp
));
1290 * Physical addresses must fit in 40 bits for our hardware.
1291 * Check for kernel virtual addresses first, anything else must
1292 * match a HW or memory address.
1294 ret
= mmap_kvaddr(vma
, pgaddr
, pd
, subport_fp(fp
));
1301 ureg
= dd
->ipath_uregbase
+ dd
->ipath_ureg_align
* pd
->port_port
;
1302 if (!pd
->port_subport_cnt
) {
1303 /* port is not shared */
1304 piocnt
= pd
->port_piocnt
;
1305 piobufs
= pd
->port_piobufs
;
1306 } else if (!subport_fp(fp
)) {
1307 /* caller is the master */
1308 piocnt
= (pd
->port_piocnt
/ pd
->port_subport_cnt
) +
1309 (pd
->port_piocnt
% pd
->port_subport_cnt
);
1310 piobufs
= pd
->port_piobufs
+
1311 dd
->ipath_palign
* (pd
->port_piocnt
- piocnt
);
1313 unsigned slave
= subport_fp(fp
) - 1;
1315 /* caller is a slave */
1316 piocnt
= pd
->port_piocnt
/ pd
->port_subport_cnt
;
1317 piobufs
= pd
->port_piobufs
+ dd
->ipath_palign
* piocnt
* slave
;
1321 ret
= mmap_ureg(vma
, dd
, ureg
);
1322 else if (pgaddr
== piobufs
)
1323 ret
= mmap_piobufs(vma
, dd
, pd
, piobufs
, piocnt
);
1324 else if (pgaddr
== dd
->ipath_pioavailregs_phys
)
1325 /* in-memory copy of pioavail registers */
1326 ret
= ipath_mmap_mem(vma
, pd
, PAGE_SIZE
, 0,
1327 (void *) dd
->ipath_pioavailregs_dma
,
1328 "pioavail registers");
1329 else if (pgaddr
== pd
->port_rcvegr_phys
)
1330 ret
= mmap_rcvegrbufs(vma
, pd
);
1331 else if (pgaddr
== (u64
) pd
->port_rcvhdrq_phys
)
1333 * The rcvhdrq itself; readonly except on HT (so have
1334 * to allow writable mapping), multiple pages, contiguous
1335 * from an i/o perspective.
1337 ret
= ipath_mmap_mem(vma
, pd
, pd
->port_rcvhdrq_size
, 1,
1340 else if (pgaddr
== (u64
) pd
->port_rcvhdrqtailaddr_phys
)
1341 /* in-memory copy of rcvhdrq tail register */
1342 ret
= ipath_mmap_mem(vma
, pd
, PAGE_SIZE
, 0,
1343 pd
->port_rcvhdrtail_kvaddr
,
1348 vma
->vm_private_data
= NULL
;
1351 dev_info(&dd
->pcidev
->dev
,
1352 "Failure %d on off %llx len %lx\n",
1353 -ret
, (unsigned long long)pgaddr
,
1354 vma
->vm_end
- vma
->vm_start
);
1359 static unsigned ipath_poll_hdrqfull(struct ipath_portdata
*pd
)
1361 unsigned pollflag
= 0;
1363 if ((pd
->poll_type
& IPATH_POLL_TYPE_OVERFLOW
) &&
1364 pd
->port_hdrqfull
!= pd
->port_hdrqfull_poll
) {
1365 pollflag
|= POLLIN
| POLLRDNORM
;
1366 pd
->port_hdrqfull_poll
= pd
->port_hdrqfull
;
1372 static unsigned int ipath_poll_urgent(struct ipath_portdata
*pd
,
1374 struct poll_table_struct
*pt
)
1376 unsigned pollflag
= 0;
1377 struct ipath_devdata
*dd
;
1381 /* variable access in ipath_poll_hdrqfull() needs this */
1383 pollflag
= ipath_poll_hdrqfull(pd
);
1385 if (pd
->port_urgent
!= pd
->port_urgent_poll
) {
1386 pollflag
|= POLLIN
| POLLRDNORM
;
1387 pd
->port_urgent_poll
= pd
->port_urgent
;
1391 /* this saves a spin_lock/unlock in interrupt handler... */
1392 set_bit(IPATH_PORT_WAITING_URG
, &pd
->port_flag
);
1393 /* flush waiting flag so don't miss an event... */
1395 poll_wait(fp
, &pd
->port_wait
, pt
);
1401 static unsigned int ipath_poll_next(struct ipath_portdata
*pd
,
1403 struct poll_table_struct
*pt
)
1407 unsigned pollflag
= 0;
1408 struct ipath_devdata
*dd
;
1412 /* variable access in ipath_poll_hdrqfull() needs this */
1414 pollflag
= ipath_poll_hdrqfull(pd
);
1416 head
= ipath_read_ureg32(dd
, ur_rcvhdrhead
, pd
->port_port
);
1417 if (pd
->port_rcvhdrtail_kvaddr
)
1418 tail
= ipath_get_rcvhdrtail(pd
);
1420 tail
= ipath_read_ureg32(dd
, ur_rcvhdrtail
, pd
->port_port
);
1423 pollflag
|= POLLIN
| POLLRDNORM
;
1425 /* this saves a spin_lock/unlock in interrupt handler */
1426 set_bit(IPATH_PORT_WAITING_RCV
, &pd
->port_flag
);
1427 /* flush waiting flag so we don't miss an event */
1430 set_bit(pd
->port_port
+ dd
->ipath_r_intravail_shift
,
1431 &dd
->ipath_rcvctrl
);
1433 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
1436 if (dd
->ipath_rhdrhead_intr_off
) /* arm rcv interrupt */
1437 ipath_write_ureg(dd
, ur_rcvhdrhead
,
1438 dd
->ipath_rhdrhead_intr_off
| head
,
1441 poll_wait(fp
, &pd
->port_wait
, pt
);
1447 static unsigned int ipath_poll(struct file
*fp
,
1448 struct poll_table_struct
*pt
)
1450 struct ipath_portdata
*pd
;
1456 else if (pd
->poll_type
& IPATH_POLL_TYPE_URGENT
)
1457 pollflag
= ipath_poll_urgent(pd
, fp
, pt
);
1459 pollflag
= ipath_poll_next(pd
, fp
, pt
);
1464 static int ipath_supports_subports(int user_swmajor
, int user_swminor
)
1466 /* no subport implementation prior to software version 1.3 */
1467 return (user_swmajor
> 1) || (user_swminor
>= 3);
1470 static int ipath_compatible_subports(int user_swmajor
, int user_swminor
)
1472 /* this code is written long-hand for clarity */
1473 if (IPATH_USER_SWMAJOR
!= user_swmajor
) {
1474 /* no promise of compatibility if major mismatch */
1477 if (IPATH_USER_SWMAJOR
== 1) {
1478 switch (IPATH_USER_SWMINOR
) {
1482 /* no subport implementation so cannot be compatible */
1485 /* 3 is only compatible with itself */
1486 return user_swminor
== 3;
1488 /* >= 4 are compatible (or are expected to be) */
1489 return user_swminor
>= 4;
1492 /* make no promises yet for future major versions */
1496 static int init_subports(struct ipath_devdata
*dd
,
1497 struct ipath_portdata
*pd
,
1498 const struct ipath_user_info
*uinfo
)
1501 unsigned num_subports
;
1505 * If the user is requesting zero subports,
1506 * skip the subport allocation.
1508 if (uinfo
->spu_subport_cnt
<= 0)
1511 /* Self-consistency check for ipath_compatible_subports() */
1512 if (ipath_supports_subports(IPATH_USER_SWMAJOR
, IPATH_USER_SWMINOR
) &&
1513 !ipath_compatible_subports(IPATH_USER_SWMAJOR
,
1514 IPATH_USER_SWMINOR
)) {
1515 dev_info(&dd
->pcidev
->dev
,
1516 "Inconsistent ipath_compatible_subports()\n");
1520 /* Check for subport compatibility */
1521 if (!ipath_compatible_subports(uinfo
->spu_userversion
>> 16,
1522 uinfo
->spu_userversion
& 0xffff)) {
1523 dev_info(&dd
->pcidev
->dev
,
1524 "Mismatched user version (%d.%d) and driver "
1525 "version (%d.%d) while port sharing. Ensure "
1526 "that driver and library are from the same "
1528 (int) (uinfo
->spu_userversion
>> 16),
1529 (int) (uinfo
->spu_userversion
& 0xffff),
1531 IPATH_USER_SWMINOR
);
1534 if (uinfo
->spu_subport_cnt
> INFINIPATH_MAX_SUBPORT
) {
1539 num_subports
= uinfo
->spu_subport_cnt
;
1540 pd
->subport_uregbase
= vzalloc(PAGE_SIZE
* num_subports
);
1541 if (!pd
->subport_uregbase
) {
1545 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1546 size
= ALIGN(dd
->ipath_rcvhdrcnt
* dd
->ipath_rcvhdrentsize
*
1547 sizeof(u32
), PAGE_SIZE
) * num_subports
;
1548 pd
->subport_rcvhdr_base
= vzalloc(size
);
1549 if (!pd
->subport_rcvhdr_base
) {
1554 pd
->subport_rcvegrbuf
= vzalloc(pd
->port_rcvegrbuf_chunks
*
1555 pd
->port_rcvegrbuf_size
*
1557 if (!pd
->subport_rcvegrbuf
) {
1562 pd
->port_subport_cnt
= uinfo
->spu_subport_cnt
;
1563 pd
->port_subport_id
= uinfo
->spu_subport_id
;
1564 pd
->active_slaves
= 1;
1565 set_bit(IPATH_PORT_MASTER_UNINIT
, &pd
->port_flag
);
1569 vfree(pd
->subport_rcvhdr_base
);
1571 vfree(pd
->subport_uregbase
);
1572 pd
->subport_uregbase
= NULL
;
1577 static int try_alloc_port(struct ipath_devdata
*dd
, int port
,
1579 const struct ipath_user_info
*uinfo
)
1581 struct ipath_portdata
*pd
;
1584 if (!(pd
= dd
->ipath_pd
[port
])) {
1587 pd
= kzalloc(sizeof(struct ipath_portdata
), GFP_KERNEL
);
1590 * Allocate memory for use in ipath_tid_update() just once
1591 * at open, not per call. Reduces cost of expected send
1594 ptmp
= kmalloc(dd
->ipath_rcvtidcnt
* sizeof(u16
) +
1595 dd
->ipath_rcvtidcnt
* sizeof(struct page
**),
1598 ipath_dev_err(dd
, "Unable to allocate portdata "
1599 "memory, failing open\n");
1605 dd
->ipath_pd
[port
] = pd
;
1606 dd
->ipath_pd
[port
]->port_port
= port
;
1607 dd
->ipath_pd
[port
]->port_dd
= dd
;
1608 dd
->ipath_pd
[port
]->port_tid_pg_list
= ptmp
;
1609 init_waitqueue_head(&dd
->ipath_pd
[port
]->port_wait
);
1611 if (!pd
->port_cnt
) {
1612 pd
->userversion
= uinfo
->spu_userversion
;
1613 init_user_egr_sizes(pd
);
1614 if ((ret
= init_subports(dd
, pd
, uinfo
)) != 0)
1616 ipath_cdbg(PROC
, "%s[%u] opened unit:port %u:%u\n",
1617 current
->comm
, current
->pid
, dd
->ipath_unit
,
1621 pd
->port_pid
= get_pid(task_pid(current
));
1622 strlcpy(pd
->port_comm
, current
->comm
, sizeof(pd
->port_comm
));
1623 ipath_stats
.sps_ports
++;
1632 static inline int usable(struct ipath_devdata
*dd
)
1635 (dd
->ipath_flags
& IPATH_PRESENT
) &&
1636 dd
->ipath_kregbase
&&
1638 !(dd
->ipath_flags
& (IPATH_LINKDOWN
| IPATH_DISABLED
1642 static int find_free_port(int unit
, struct file
*fp
,
1643 const struct ipath_user_info
*uinfo
)
1645 struct ipath_devdata
*dd
= ipath_lookup(unit
);
1658 for (i
= 1; i
< dd
->ipath_cfgports
; i
++) {
1659 ret
= try_alloc_port(dd
, i
, fp
, uinfo
);
1669 static int find_best_unit(struct file
*fp
,
1670 const struct ipath_user_info
*uinfo
)
1672 int ret
= 0, i
, prefunit
= -1, devmax
;
1673 int maxofallports
, npresent
, nup
;
1676 devmax
= ipath_count_units(&npresent
, &nup
, &maxofallports
);
1679 * This code is present to allow a knowledgeable person to
1680 * specify the layout of processes to processors before opening
1681 * this driver, and then we'll assign the process to the "closest"
1682 * InfiniPath chip to that processor (we assume reasonable connectivity,
1683 * for now). This code assumes that if affinity has been set
1684 * before this point, that at most one cpu is set; for now this
1685 * is reasonable. I check for both cpumask_empty() and cpumask_full(),
1686 * in case some kernel variant sets none of the bits when no
1687 * affinity is set. 2.6.11 and 12 kernels have all present
1688 * cpus set. Some day we'll have to fix it up further to handle
1689 * a cpu subset. This algorithm fails for two HT chips connected
1690 * in tunnel fashion. Eventually this needs real topology
1691 * information. There may be some issues with dual core numbering
1692 * as well. This needs more work prior to release.
1694 if (!cpumask_empty(tsk_cpus_allowed(current
)) &&
1695 !cpumask_full(tsk_cpus_allowed(current
))) {
1696 int ncpus
= num_online_cpus(), curcpu
= -1, nset
= 0;
1698 for_each_online_cpu(i
)
1699 if (cpumask_test_cpu(i
, tsk_cpus_allowed(current
))) {
1700 ipath_cdbg(PROC
, "%s[%u] affinity set for "
1701 "cpu %d/%d\n", current
->comm
,
1702 current
->pid
, i
, ncpus
);
1707 if (curcpu
!= -1 && nset
!= ncpus
) {
1709 prefunit
= curcpu
/ (ncpus
/ npresent
);
1710 ipath_cdbg(PROC
,"%s[%u] %d chips, %d cpus, "
1711 "%d cpus/chip, select unit %d\n",
1712 current
->comm
, current
->pid
,
1713 npresent
, ncpus
, ncpus
/ npresent
,
1720 * user ports start at 1, kernel port is 0
1721 * For now, we do round-robin access across all chips
1725 devmax
= prefunit
+ 1;
1727 for (i
= 1; i
< maxofallports
; i
++) {
1728 for (ndev
= prefunit
!= -1 ? prefunit
: 0; ndev
< devmax
;
1730 struct ipath_devdata
*dd
= ipath_lookup(ndev
);
1733 continue; /* can't use this unit */
1734 if (i
>= dd
->ipath_cfgports
)
1736 * Maxed out on users of this unit. Try
1740 ret
= try_alloc_port(dd
, i
, fp
, uinfo
);
1749 ipath_dbg("No ports available (none initialized "
1753 /* if started above 0, retry from 0 */
1755 "%s[%u] no ports on prefunit "
1756 "%d, clear and re-check\n",
1757 current
->comm
, current
->pid
,
1759 devmax
= ipath_count_units(NULL
, NULL
,
1765 ipath_dbg("No ports available\n");
1769 ipath_dbg("No boards found\n");
1776 static int find_shared_port(struct file
*fp
,
1777 const struct ipath_user_info
*uinfo
)
1779 int devmax
, ndev
, i
;
1782 devmax
= ipath_count_units(NULL
, NULL
, NULL
);
1784 for (ndev
= 0; ndev
< devmax
; ndev
++) {
1785 struct ipath_devdata
*dd
= ipath_lookup(ndev
);
1789 for (i
= 1; i
< dd
->ipath_cfgports
; i
++) {
1790 struct ipath_portdata
*pd
= dd
->ipath_pd
[i
];
1792 /* Skip ports which are not yet open */
1793 if (!pd
|| !pd
->port_cnt
)
1795 /* Skip port if it doesn't match the requested one */
1796 if (pd
->port_subport_id
!= uinfo
->spu_subport_id
)
1798 /* Verify the sharing process matches the master */
1799 if (pd
->port_subport_cnt
!= uinfo
->spu_subport_cnt
||
1800 pd
->userversion
!= uinfo
->spu_userversion
||
1801 pd
->port_cnt
>= pd
->port_subport_cnt
) {
1806 subport_fp(fp
) = pd
->port_cnt
++;
1807 pd
->port_subpid
[subport_fp(fp
)] =
1808 get_pid(task_pid(current
));
1809 tidcursor_fp(fp
) = 0;
1810 pd
->active_slaves
|= 1 << subport_fp(fp
);
1812 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1813 current
->comm
, current
->pid
,
1815 pd
->port_comm
, pid_nr(pd
->port_pid
),
1816 dd
->ipath_unit
, pd
->port_port
);
1826 static int ipath_open(struct inode
*in
, struct file
*fp
)
1828 /* The real work is performed later in ipath_assign_port() */
1829 fp
->private_data
= kzalloc(sizeof(struct ipath_filedata
), GFP_KERNEL
);
1830 return fp
->private_data
? 0 : -ENOMEM
;
1833 /* Get port early, so can set affinity prior to memory allocation */
1834 static int ipath_assign_port(struct file
*fp
,
1835 const struct ipath_user_info
*uinfo
)
1839 unsigned swmajor
, swminor
;
1841 /* Check to be sure we haven't already initialized this file */
1847 /* for now, if major version is different, bail */
1848 swmajor
= uinfo
->spu_userversion
>> 16;
1849 if (swmajor
!= IPATH_USER_SWMAJOR
) {
1850 ipath_dbg("User major version %d not same as driver "
1851 "major %d\n", uinfo
->spu_userversion
>> 16,
1852 IPATH_USER_SWMAJOR
);
1857 swminor
= uinfo
->spu_userversion
& 0xffff;
1858 if (swminor
!= IPATH_USER_SWMINOR
)
1859 ipath_dbg("User minor version %d not same as driver "
1860 "minor %d\n", swminor
, IPATH_USER_SWMINOR
);
1862 mutex_lock(&ipath_mutex
);
1864 if (ipath_compatible_subports(swmajor
, swminor
) &&
1865 uinfo
->spu_subport_cnt
&&
1866 (ret
= find_shared_port(fp
, uinfo
))) {
1872 i_minor
= iminor(file_inode(fp
)) - IPATH_USER_MINOR_BASE
;
1873 ipath_cdbg(VERBOSE
, "open on dev %lx (minor %d)\n",
1874 (long)file_inode(fp
)->i_rdev
, i_minor
);
1877 ret
= find_free_port(i_minor
- 1, fp
, uinfo
);
1879 ret
= find_best_unit(fp
, uinfo
);
1883 struct ipath_filedata
*fd
= fp
->private_data
;
1884 const struct ipath_portdata
*pd
= fd
->pd
;
1885 const struct ipath_devdata
*dd
= pd
->port_dd
;
1887 fd
->pq
= ipath_user_sdma_queue_create(&dd
->pcidev
->dev
,
1896 mutex_unlock(&ipath_mutex
);
1903 static int ipath_do_user_init(struct file
*fp
,
1904 const struct ipath_user_info
*uinfo
)
1907 struct ipath_portdata
*pd
= port_fp(fp
);
1908 struct ipath_devdata
*dd
;
1911 /* Subports don't need to initialize anything since master did it. */
1912 if (subport_fp(fp
)) {
1913 ret
= wait_event_interruptible(pd
->port_wait
,
1914 !test_bit(IPATH_PORT_MASTER_UNINIT
, &pd
->port_flag
));
1920 if (uinfo
->spu_rcvhdrsize
) {
1921 ret
= ipath_setrcvhdrsize(dd
, uinfo
->spu_rcvhdrsize
);
1926 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1928 /* some ports may get extra buffers, calculate that here */
1929 if (pd
->port_port
<= dd
->ipath_ports_extrabuf
)
1930 pd
->port_piocnt
= dd
->ipath_pbufsport
+ 1;
1932 pd
->port_piocnt
= dd
->ipath_pbufsport
;
1934 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1935 if (pd
->port_port
<= dd
->ipath_ports_extrabuf
)
1936 pd
->port_pio_base
= (dd
->ipath_pbufsport
+ 1)
1937 * (pd
->port_port
- 1);
1939 pd
->port_pio_base
= dd
->ipath_ports_extrabuf
+
1940 dd
->ipath_pbufsport
* (pd
->port_port
- 1);
1941 pd
->port_piobufs
= dd
->ipath_piobufbase
+
1942 pd
->port_pio_base
* dd
->ipath_palign
;
1943 ipath_cdbg(VERBOSE
, "piobuf base for port %u is 0x%x, piocnt %u,"
1944 " first pio %u\n", pd
->port_port
, pd
->port_piobufs
,
1945 pd
->port_piocnt
, pd
->port_pio_base
);
1946 ipath_chg_pioavailkernel(dd
, pd
->port_pio_base
, pd
->port_piocnt
, 0);
1949 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1950 * array for time being. If pd->port_port > chip-supported,
1951 * we need to do extra stuff here to handle by handling overflow
1952 * through port 0, someday
1954 ret
= ipath_create_rcvhdrq(dd
, pd
);
1956 ret
= ipath_create_user_egr(pd
);
1961 * set the eager head register for this port to the current values
1962 * of the tail pointers, since we don't know if they were
1963 * updated on last use of the port.
1965 head32
= ipath_read_ureg32(dd
, ur_rcvegrindextail
, pd
->port_port
);
1966 ipath_write_ureg(dd
, ur_rcvegrindexhead
, head32
, pd
->port_port
);
1967 pd
->port_lastrcvhdrqtail
= -1;
1968 ipath_cdbg(VERBOSE
, "Wrote port%d egrhead %x from tail regs\n",
1969 pd
->port_port
, head32
);
1970 pd
->port_tidcursor
= 0; /* start at beginning after open */
1972 /* initialize poll variables... */
1973 pd
->port_urgent
= 0;
1974 pd
->port_urgent_poll
= 0;
1975 pd
->port_hdrqfull_poll
= pd
->port_hdrqfull
;
1978 * Now enable the port for receive.
1979 * For chips that are set to DMA the tail register to memory
1980 * when they change (and when the update bit transitions from
1981 * 0 to 1. So for those chips, we turn it off and then back on.
1982 * This will (very briefly) affect any other open ports, but the
1983 * duration is very short, and therefore isn't an issue. We
1984 * explicitly set the in-memory tail copy to 0 beforehand, so we
1985 * don't have to wait to be sure the DMA update has happened
1986 * (chip resets head/tail to 0 on transition to enable).
1988 set_bit(dd
->ipath_r_portenable_shift
+ pd
->port_port
,
1989 &dd
->ipath_rcvctrl
);
1990 if (!(dd
->ipath_flags
& IPATH_NODMA_RTAIL
)) {
1991 if (pd
->port_rcvhdrtail_kvaddr
)
1992 ipath_clear_rcvhdrtail(pd
);
1993 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
1995 ~(1ULL << dd
->ipath_r_tailupd_shift
));
1997 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
1999 /* Notify any waiting slaves */
2000 if (pd
->port_subport_cnt
) {
2001 clear_bit(IPATH_PORT_MASTER_UNINIT
, &pd
->port_flag
);
2002 wake_up(&pd
->port_wait
);
2009 * unlock_exptid - unlock any expected TID entries port still had in use
2012 * We don't actually update the chip here, because we do a bulk update
2013 * below, using ipath_f_clear_tids.
2015 static void unlock_expected_tids(struct ipath_portdata
*pd
)
2017 struct ipath_devdata
*dd
= pd
->port_dd
;
2018 int port_tidbase
= pd
->port_port
* dd
->ipath_rcvtidcnt
;
2019 int i
, cnt
= 0, maxtid
= port_tidbase
+ dd
->ipath_rcvtidcnt
;
2021 ipath_cdbg(VERBOSE
, "Port %u unlocking any locked expTID pages\n",
2023 for (i
= port_tidbase
; i
< maxtid
; i
++) {
2024 struct page
*ps
= dd
->ipath_pageshadow
[i
];
2029 dd
->ipath_pageshadow
[i
] = NULL
;
2030 pci_unmap_page(dd
->pcidev
, dd
->ipath_physshadow
[i
],
2031 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
2032 ipath_release_user_pages_on_close(&ps
, 1);
2034 ipath_stats
.sps_pageunlocks
++;
2037 ipath_cdbg(VERBOSE
, "Port %u locked %u expTID entries\n",
2038 pd
->port_port
, cnt
);
2040 if (ipath_stats
.sps_pagelocks
|| ipath_stats
.sps_pageunlocks
)
2041 ipath_cdbg(VERBOSE
, "%llu pages locked, %llu unlocked\n",
2042 (unsigned long long) ipath_stats
.sps_pagelocks
,
2043 (unsigned long long)
2044 ipath_stats
.sps_pageunlocks
);
2047 static int ipath_close(struct inode
*in
, struct file
*fp
)
2049 struct ipath_filedata
*fd
;
2050 struct ipath_portdata
*pd
;
2051 struct ipath_devdata
*dd
;
2052 unsigned long flags
;
2056 ipath_cdbg(VERBOSE
, "close on dev %lx, private data %p\n",
2057 (long)in
->i_rdev
, fp
->private_data
);
2059 mutex_lock(&ipath_mutex
);
2061 fd
= fp
->private_data
;
2062 fp
->private_data
= NULL
;
2065 mutex_unlock(&ipath_mutex
);
2071 /* drain user sdma queue */
2072 ipath_user_sdma_queue_drain(dd
, fd
->pq
);
2073 ipath_user_sdma_queue_destroy(fd
->pq
);
2075 if (--pd
->port_cnt
) {
2077 * XXX If the master closes the port before the slave(s),
2078 * revoke the mmap for the eager receive queue so
2079 * the slave(s) don't wait for receive data forever.
2081 pd
->active_slaves
&= ~(1 << fd
->subport
);
2082 put_pid(pd
->port_subpid
[fd
->subport
]);
2083 pd
->port_subpid
[fd
->subport
] = NULL
;
2084 mutex_unlock(&ipath_mutex
);
2087 /* early; no interrupt users after this */
2088 spin_lock_irqsave(&dd
->ipath_uctxt_lock
, flags
);
2089 port
= pd
->port_port
;
2090 dd
->ipath_pd
[port
] = NULL
;
2092 pd
->port_pid
= NULL
;
2093 spin_unlock_irqrestore(&dd
->ipath_uctxt_lock
, flags
);
2095 if (pd
->port_rcvwait_to
|| pd
->port_piowait_to
2096 || pd
->port_rcvnowait
|| pd
->port_pionowait
) {
2097 ipath_cdbg(VERBOSE
, "port%u, %u rcv, %u pio wait timeo; "
2098 "%u rcv %u, pio already\n",
2099 pd
->port_port
, pd
->port_rcvwait_to
,
2100 pd
->port_piowait_to
, pd
->port_rcvnowait
,
2101 pd
->port_pionowait
);
2102 pd
->port_rcvwait_to
= pd
->port_piowait_to
=
2103 pd
->port_rcvnowait
= pd
->port_pionowait
= 0;
2105 if (pd
->port_flag
) {
2106 ipath_cdbg(PROC
, "port %u port_flag set: 0x%lx\n",
2107 pd
->port_port
, pd
->port_flag
);
2111 if (dd
->ipath_kregbase
) {
2112 /* atomically clear receive enable port and intr avail. */
2113 clear_bit(dd
->ipath_r_portenable_shift
+ port
,
2114 &dd
->ipath_rcvctrl
);
2115 clear_bit(pd
->port_port
+ dd
->ipath_r_intravail_shift
,
2116 &dd
->ipath_rcvctrl
);
2117 ipath_write_kreg( dd
, dd
->ipath_kregs
->kr_rcvctrl
,
2119 /* and read back from chip to be sure that nothing
2120 * else is in flight when we do the rest */
2121 (void)ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
2123 /* clean up the pkeys for this port user */
2124 ipath_clean_part_key(pd
, dd
);
2126 * be paranoid, and never write 0's to these, just use an
2127 * unused part of the port 0 tail page. Of course,
2128 * rcvhdraddr points to a large chunk of memory, so this
2129 * could still trash things, but at least it won't trash
2130 * page 0, and by disabling the port, it should stop "soon",
2131 * even if a packet or two is in already in flight after we
2132 * disabled the port.
2134 ipath_write_kreg_port(dd
,
2135 dd
->ipath_kregs
->kr_rcvhdrtailaddr
, port
,
2136 dd
->ipath_dummy_hdrq_phys
);
2137 ipath_write_kreg_port(dd
, dd
->ipath_kregs
->kr_rcvhdraddr
,
2138 pd
->port_port
, dd
->ipath_dummy_hdrq_phys
);
2140 ipath_disarm_piobufs(dd
, pd
->port_pio_base
, pd
->port_piocnt
);
2141 ipath_chg_pioavailkernel(dd
, pd
->port_pio_base
,
2142 pd
->port_piocnt
, 1);
2144 dd
->ipath_f_clear_tids(dd
, pd
->port_port
);
2146 if (dd
->ipath_pageshadow
)
2147 unlock_expected_tids(pd
);
2148 ipath_stats
.sps_ports
--;
2149 ipath_cdbg(PROC
, "%s[%u] closed port %u:%u\n",
2150 pd
->port_comm
, pid_nr(pid
),
2151 dd
->ipath_unit
, port
);
2155 mutex_unlock(&ipath_mutex
);
2156 ipath_free_pddata(dd
, pd
); /* after releasing the mutex */
2163 static int ipath_port_info(struct ipath_portdata
*pd
, u16 subport
,
2164 struct ipath_port_info __user
*uinfo
)
2166 struct ipath_port_info info
;
2171 (void) ipath_count_units(NULL
, &nup
, NULL
);
2172 info
.num_active
= nup
;
2173 info
.unit
= pd
->port_dd
->ipath_unit
;
2174 info
.port
= pd
->port_port
;
2175 info
.subport
= subport
;
2176 /* Don't return new fields if old library opened the port. */
2177 if (ipath_supports_subports(pd
->userversion
>> 16,
2178 pd
->userversion
& 0xffff)) {
2179 /* Number of user ports available for this device. */
2180 info
.num_ports
= pd
->port_dd
->ipath_cfgports
- 1;
2181 info
.num_subports
= pd
->port_subport_cnt
;
2184 sz
= sizeof(info
) - 2 * sizeof(u16
);
2186 if (copy_to_user(uinfo
, &info
, sz
)) {
2196 static int ipath_get_slave_info(struct ipath_portdata
*pd
,
2197 void __user
*slave_mask_addr
)
2201 if (copy_to_user(slave_mask_addr
, &pd
->active_slaves
, sizeof(u32
)))
2206 static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue
*pq
,
2207 u32 __user
*inflightp
)
2209 const u32 val
= ipath_user_sdma_inflight_counter(pq
);
2211 if (put_user(val
, inflightp
))
2217 static int ipath_sdma_get_complete(struct ipath_devdata
*dd
,
2218 struct ipath_user_sdma_queue
*pq
,
2219 u32 __user
*completep
)
2224 err
= ipath_user_sdma_make_progress(dd
, pq
);
2228 val
= ipath_user_sdma_complete_counter(pq
);
2229 if (put_user(val
, completep
))
2235 static ssize_t
ipath_write(struct file
*fp
, const char __user
*data
,
2236 size_t count
, loff_t
*off
)
2238 const struct ipath_cmd __user
*ucmd
;
2239 struct ipath_portdata
*pd
;
2240 const void __user
*src
;
2241 size_t consumed
, copy
;
2242 struct ipath_cmd cmd
;
2246 if (count
< sizeof(cmd
.type
)) {
2251 ucmd
= (const struct ipath_cmd __user
*) data
;
2253 if (copy_from_user(&cmd
.type
, &ucmd
->type
, sizeof(cmd
.type
))) {
2258 consumed
= sizeof(cmd
.type
);
2261 case IPATH_CMD_ASSIGN_PORT
:
2262 case __IPATH_CMD_USER_INIT
:
2263 case IPATH_CMD_USER_INIT
:
2264 copy
= sizeof(cmd
.cmd
.user_info
);
2265 dest
= &cmd
.cmd
.user_info
;
2266 src
= &ucmd
->cmd
.user_info
;
2268 case IPATH_CMD_RECV_CTRL
:
2269 copy
= sizeof(cmd
.cmd
.recv_ctrl
);
2270 dest
= &cmd
.cmd
.recv_ctrl
;
2271 src
= &ucmd
->cmd
.recv_ctrl
;
2273 case IPATH_CMD_PORT_INFO
:
2274 copy
= sizeof(cmd
.cmd
.port_info
);
2275 dest
= &cmd
.cmd
.port_info
;
2276 src
= &ucmd
->cmd
.port_info
;
2278 case IPATH_CMD_TID_UPDATE
:
2279 case IPATH_CMD_TID_FREE
:
2280 copy
= sizeof(cmd
.cmd
.tid_info
);
2281 dest
= &cmd
.cmd
.tid_info
;
2282 src
= &ucmd
->cmd
.tid_info
;
2284 case IPATH_CMD_SET_PART_KEY
:
2285 copy
= sizeof(cmd
.cmd
.part_key
);
2286 dest
= &cmd
.cmd
.part_key
;
2287 src
= &ucmd
->cmd
.part_key
;
2289 case __IPATH_CMD_SLAVE_INFO
:
2290 copy
= sizeof(cmd
.cmd
.slave_mask_addr
);
2291 dest
= &cmd
.cmd
.slave_mask_addr
;
2292 src
= &ucmd
->cmd
.slave_mask_addr
;
2294 case IPATH_CMD_PIOAVAILUPD
: // force an update of PIOAvail reg
2299 case IPATH_CMD_POLL_TYPE
:
2300 copy
= sizeof(cmd
.cmd
.poll_type
);
2301 dest
= &cmd
.cmd
.poll_type
;
2302 src
= &ucmd
->cmd
.poll_type
;
2304 case IPATH_CMD_ARMLAUNCH_CTRL
:
2305 copy
= sizeof(cmd
.cmd
.armlaunch_ctrl
);
2306 dest
= &cmd
.cmd
.armlaunch_ctrl
;
2307 src
= &ucmd
->cmd
.armlaunch_ctrl
;
2309 case IPATH_CMD_SDMA_INFLIGHT
:
2310 copy
= sizeof(cmd
.cmd
.sdma_inflight
);
2311 dest
= &cmd
.cmd
.sdma_inflight
;
2312 src
= &ucmd
->cmd
.sdma_inflight
;
2314 case IPATH_CMD_SDMA_COMPLETE
:
2315 copy
= sizeof(cmd
.cmd
.sdma_complete
);
2316 dest
= &cmd
.cmd
.sdma_complete
;
2317 src
= &ucmd
->cmd
.sdma_complete
;
2325 if ((count
- consumed
) < copy
) {
2330 if (copy_from_user(dest
, src
, copy
)) {
2339 if (!pd
&& cmd
.type
!= __IPATH_CMD_USER_INIT
&&
2340 cmd
.type
!= IPATH_CMD_ASSIGN_PORT
) {
2346 case IPATH_CMD_ASSIGN_PORT
:
2347 ret
= ipath_assign_port(fp
, &cmd
.cmd
.user_info
);
2351 case __IPATH_CMD_USER_INIT
:
2352 /* backwards compatibility, get port first */
2353 ret
= ipath_assign_port(fp
, &cmd
.cmd
.user_info
);
2356 /* and fall through to current version. */
2357 case IPATH_CMD_USER_INIT
:
2358 ret
= ipath_do_user_init(fp
, &cmd
.cmd
.user_info
);
2361 ret
= ipath_get_base_info(
2362 fp
, (void __user
*) (unsigned long)
2363 cmd
.cmd
.user_info
.spu_base_info
,
2364 cmd
.cmd
.user_info
.spu_base_info_size
);
2366 case IPATH_CMD_RECV_CTRL
:
2367 ret
= ipath_manage_rcvq(pd
, subport_fp(fp
), cmd
.cmd
.recv_ctrl
);
2369 case IPATH_CMD_PORT_INFO
:
2370 ret
= ipath_port_info(pd
, subport_fp(fp
),
2371 (struct ipath_port_info __user
*)
2372 (unsigned long) cmd
.cmd
.port_info
);
2374 case IPATH_CMD_TID_UPDATE
:
2375 ret
= ipath_tid_update(pd
, fp
, &cmd
.cmd
.tid_info
);
2377 case IPATH_CMD_TID_FREE
:
2378 ret
= ipath_tid_free(pd
, subport_fp(fp
), &cmd
.cmd
.tid_info
);
2380 case IPATH_CMD_SET_PART_KEY
:
2381 ret
= ipath_set_part_key(pd
, cmd
.cmd
.part_key
);
2383 case __IPATH_CMD_SLAVE_INFO
:
2384 ret
= ipath_get_slave_info(pd
,
2385 (void __user
*) (unsigned long)
2386 cmd
.cmd
.slave_mask_addr
);
2388 case IPATH_CMD_PIOAVAILUPD
:
2389 ipath_force_pio_avail_update(pd
->port_dd
);
2391 case IPATH_CMD_POLL_TYPE
:
2392 pd
->poll_type
= cmd
.cmd
.poll_type
;
2394 case IPATH_CMD_ARMLAUNCH_CTRL
:
2395 if (cmd
.cmd
.armlaunch_ctrl
)
2396 ipath_enable_armlaunch(pd
->port_dd
);
2398 ipath_disable_armlaunch(pd
->port_dd
);
2400 case IPATH_CMD_SDMA_INFLIGHT
:
2401 ret
= ipath_sdma_get_inflight(user_sdma_queue_fp(fp
),
2402 (u32 __user
*) (unsigned long)
2403 cmd
.cmd
.sdma_inflight
);
2405 case IPATH_CMD_SDMA_COMPLETE
:
2406 ret
= ipath_sdma_get_complete(pd
->port_dd
,
2407 user_sdma_queue_fp(fp
),
2408 (u32 __user
*) (unsigned long)
2409 cmd
.cmd
.sdma_complete
);
2420 static ssize_t
ipath_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2422 struct file
*filp
= iocb
->ki_filp
;
2423 struct ipath_filedata
*fp
= filp
->private_data
;
2424 struct ipath_portdata
*pd
= port_fp(filp
);
2425 struct ipath_user_sdma_queue
*pq
= fp
->pq
;
2427 if (!iter_is_iovec(from
) || !from
->nr_segs
)
2430 return ipath_user_sdma_writev(pd
->port_dd
, pq
, from
->iov
, from
->nr_segs
);
2433 static struct class *ipath_class
;
2435 static int init_cdev(int minor
, char *name
, const struct file_operations
*fops
,
2436 struct cdev
**cdevp
, struct device
**devp
)
2438 const dev_t dev
= MKDEV(IPATH_MAJOR
, minor
);
2439 struct cdev
*cdev
= NULL
;
2440 struct device
*device
= NULL
;
2443 cdev
= cdev_alloc();
2445 printk(KERN_ERR IPATH_DRV_NAME
2446 ": Could not allocate cdev for minor %d, %s\n",
2452 cdev
->owner
= THIS_MODULE
;
2454 kobject_set_name(&cdev
->kobj
, name
);
2456 ret
= cdev_add(cdev
, dev
, 1);
2458 printk(KERN_ERR IPATH_DRV_NAME
2459 ": Could not add cdev for minor %d, %s (err %d)\n",
2464 device
= device_create(ipath_class
, NULL
, dev
, NULL
, name
);
2466 if (IS_ERR(device
)) {
2467 ret
= PTR_ERR(device
);
2468 printk(KERN_ERR IPATH_DRV_NAME
": Could not create "
2469 "device for minor %d, %s (err %d)\n",
2492 int ipath_cdev_init(int minor
, char *name
, const struct file_operations
*fops
,
2493 struct cdev
**cdevp
, struct device
**devp
)
2495 return init_cdev(minor
, name
, fops
, cdevp
, devp
);
2498 static void cleanup_cdev(struct cdev
**cdevp
,
2499 struct device
**devp
)
2501 struct device
*dev
= *devp
;
2504 device_unregister(dev
);
2514 void ipath_cdev_cleanup(struct cdev
**cdevp
,
2515 struct device
**devp
)
2517 cleanup_cdev(cdevp
, devp
);
2520 static struct cdev
*wildcard_cdev
;
2521 static struct device
*wildcard_dev
;
2523 static const dev_t dev
= MKDEV(IPATH_MAJOR
, 0);
2525 static int user_init(void)
2529 ret
= register_chrdev_region(dev
, IPATH_NMINORS
, IPATH_DRV_NAME
);
2531 printk(KERN_ERR IPATH_DRV_NAME
": Could not register "
2532 "chrdev region (err %d)\n", -ret
);
2536 ipath_class
= class_create(THIS_MODULE
, IPATH_DRV_NAME
);
2538 if (IS_ERR(ipath_class
)) {
2539 ret
= PTR_ERR(ipath_class
);
2540 printk(KERN_ERR IPATH_DRV_NAME
": Could not create "
2541 "device class (err %d)\n", -ret
);
2547 unregister_chrdev_region(dev
, IPATH_NMINORS
);
2552 static void user_cleanup(void)
2555 class_destroy(ipath_class
);
2559 unregister_chrdev_region(dev
, IPATH_NMINORS
);
2562 static atomic_t user_count
= ATOMIC_INIT(0);
2563 static atomic_t user_setup
= ATOMIC_INIT(0);
2565 int ipath_user_add(struct ipath_devdata
*dd
)
2570 if (atomic_inc_return(&user_count
) == 1) {
2573 ipath_dev_err(dd
, "Unable to set up user support: "
2574 "error %d\n", -ret
);
2577 ret
= init_cdev(0, "ipath", &ipath_file_ops
, &wildcard_cdev
,
2580 ipath_dev_err(dd
, "Could not create wildcard "
2581 "minor: error %d\n", -ret
);
2585 atomic_set(&user_setup
, 1);
2588 snprintf(name
, sizeof(name
), "ipath%d", dd
->ipath_unit
);
2590 ret
= init_cdev(dd
->ipath_unit
+ 1, name
, &ipath_file_ops
,
2591 &dd
->user_cdev
, &dd
->user_dev
);
2593 ipath_dev_err(dd
, "Could not create user minor %d, %s\n",
2594 dd
->ipath_unit
+ 1, name
);
2604 void ipath_user_remove(struct ipath_devdata
*dd
)
2606 cleanup_cdev(&dd
->user_cdev
, &dd
->user_dev
);
2608 if (atomic_dec_return(&user_count
) == 0) {
2609 if (atomic_read(&user_setup
) == 0)
2612 cleanup_cdev(&wildcard_cdev
, &wildcard_dev
);
2615 atomic_set(&user_setup
, 0);