3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/poll.h>
51 #include <linux/cdev.h>
52 #include <linux/vmalloc.h>
60 #include "user_sdma.h"
61 #include "user_exp_rcv.h"
65 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
67 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70 * File operation functions
72 static int hfi1_file_open(struct inode
*, struct file
*);
73 static int hfi1_file_close(struct inode
*, struct file
*);
74 static ssize_t
hfi1_file_write(struct file
*, const char __user
*,
76 static ssize_t
hfi1_write_iter(struct kiocb
*, struct iov_iter
*);
77 static unsigned int hfi1_poll(struct file
*, struct poll_table_struct
*);
78 static int hfi1_file_mmap(struct file
*, struct vm_area_struct
*);
80 static u64
kvirt_to_phys(void *);
81 static int assign_ctxt(struct file
*, struct hfi1_user_info
*);
82 static int init_subctxts(struct hfi1_ctxtdata
*, const struct hfi1_user_info
*);
83 static int user_init(struct file
*);
84 static int get_ctxt_info(struct file
*, void __user
*, __u32
);
85 static int get_base_info(struct file
*, void __user
*, __u32
);
86 static int setup_ctxt(struct file
*);
87 static int setup_subctxt(struct hfi1_ctxtdata
*);
88 static int get_user_context(struct file
*, struct hfi1_user_info
*,
90 static int find_shared_ctxt(struct file
*, const struct hfi1_user_info
*);
91 static int allocate_ctxt(struct file
*, struct hfi1_devdata
*,
92 struct hfi1_user_info
*);
93 static unsigned int poll_urgent(struct file
*, struct poll_table_struct
*);
94 static unsigned int poll_next(struct file
*, struct poll_table_struct
*);
95 static int user_event_ack(struct hfi1_ctxtdata
*, int, unsigned long);
96 static int set_ctxt_pkey(struct hfi1_ctxtdata
*, unsigned, u16
);
97 static int manage_rcvq(struct hfi1_ctxtdata
*, unsigned, int);
98 static int vma_fault(struct vm_area_struct
*, struct vm_fault
*);
99 static int exp_tid_setup(struct file
*, struct hfi1_tid_info
*);
100 static int exp_tid_free(struct file
*, struct hfi1_tid_info
*);
101 static void unlock_exp_tids(struct hfi1_ctxtdata
*);
103 static const struct file_operations hfi1_file_ops
= {
104 .owner
= THIS_MODULE
,
105 .write
= hfi1_file_write
,
106 .write_iter
= hfi1_write_iter
,
107 .open
= hfi1_file_open
,
108 .release
= hfi1_file_close
,
110 .mmap
= hfi1_file_mmap
,
111 .llseek
= noop_llseek
,
114 static struct vm_operations_struct vm_ops
= {
119 * Types of memories mapped into user processes' space
138 * Masks and offsets defining the mmap tokens
140 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
141 #define HFI1_MMAP_OFFSET_SHIFT 0
142 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
143 #define HFI1_MMAP_SUBCTXT_SHIFT 12
144 #define HFI1_MMAP_CTXT_MASK 0xffULL
145 #define HFI1_MMAP_CTXT_SHIFT 16
146 #define HFI1_MMAP_TYPE_MASK 0xfULL
147 #define HFI1_MMAP_TYPE_SHIFT 24
148 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
149 #define HFI1_MMAP_MAGIC_SHIFT 32
151 #define HFI1_MMAP_MAGIC 0xdabbad00
153 #define HFI1_MMAP_TOKEN_SET(field, val) \
154 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
155 #define HFI1_MMAP_TOKEN_GET(field, token) \
156 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
157 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
158 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
159 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
160 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
161 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
162 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
164 #define dbg(fmt, ...) \
165 pr_info(fmt, ##__VA_ARGS__)
168 static inline int is_valid_mmap(u64 token
)
170 return (HFI1_MMAP_TOKEN_GET(MAGIC
, token
) == HFI1_MMAP_MAGIC
);
173 static int hfi1_file_open(struct inode
*inode
, struct file
*fp
)
175 /* The real work is performed later in assign_ctxt() */
176 fp
->private_data
= kzalloc(sizeof(struct hfi1_filedata
), GFP_KERNEL
);
177 if (fp
->private_data
) /* no cpu affinity by default */
178 ((struct hfi1_filedata
*)fp
->private_data
)->rec_cpu_num
= -1;
179 return fp
->private_data
? 0 : -ENOMEM
;
182 static ssize_t
hfi1_file_write(struct file
*fp
, const char __user
*data
,
183 size_t count
, loff_t
*offset
)
185 const struct hfi1_cmd __user
*ucmd
;
186 struct hfi1_filedata
*fd
= fp
->private_data
;
187 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
189 struct hfi1_user_info uinfo
;
190 struct hfi1_tid_info tinfo
;
191 ssize_t consumed
= 0, copy
= 0, ret
= 0;
194 int uctxt_required
= 1;
195 int must_be_root
= 0;
197 if (count
< sizeof(cmd
)) {
202 ucmd
= (const struct hfi1_cmd __user
*)data
;
203 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
))) {
208 consumed
= sizeof(cmd
);
211 case HFI1_CMD_ASSIGN_CTXT
:
212 uctxt_required
= 0; /* assigned user context not required */
213 copy
= sizeof(uinfo
);
216 case HFI1_CMD_SDMA_STATUS_UPD
:
217 case HFI1_CMD_CREDIT_UPD
:
220 case HFI1_CMD_TID_UPDATE
:
221 case HFI1_CMD_TID_FREE
:
222 copy
= sizeof(tinfo
);
225 case HFI1_CMD_USER_INFO
:
226 case HFI1_CMD_RECV_CTRL
:
227 case HFI1_CMD_POLL_TYPE
:
228 case HFI1_CMD_ACK_EVENT
:
229 case HFI1_CMD_CTXT_INFO
:
230 case HFI1_CMD_SET_PKEY
:
231 case HFI1_CMD_CTXT_RESET
:
235 case HFI1_CMD_EP_INFO
:
236 case HFI1_CMD_EP_ERASE_CHIP
:
237 case HFI1_CMD_EP_ERASE_RANGE
:
238 case HFI1_CMD_EP_READ_RANGE
:
239 case HFI1_CMD_EP_WRITE_RANGE
:
240 uctxt_required
= 0; /* assigned user context not required */
241 must_be_root
= 1; /* validate user */
249 /* If the command comes with user data, copy it. */
251 if (copy_from_user(dest
, (void __user
*)cmd
.addr
, copy
)) {
259 * Make sure there is a uctxt when needed.
261 if (uctxt_required
&& !uctxt
) {
266 /* only root can do these operations */
267 if (must_be_root
&& !capable(CAP_SYS_ADMIN
)) {
273 case HFI1_CMD_ASSIGN_CTXT
:
274 ret
= assign_ctxt(fp
, &uinfo
);
277 ret
= setup_ctxt(fp
);
282 case HFI1_CMD_CTXT_INFO
:
283 ret
= get_ctxt_info(fp
, (void __user
*)(unsigned long)
286 case HFI1_CMD_USER_INFO
:
287 ret
= get_base_info(fp
, (void __user
*)(unsigned long)
290 case HFI1_CMD_SDMA_STATUS_UPD
:
292 case HFI1_CMD_CREDIT_UPD
:
293 if (uctxt
&& uctxt
->sc
)
294 sc_return_credits(uctxt
->sc
);
296 case HFI1_CMD_TID_UPDATE
:
297 ret
= exp_tid_setup(fp
, &tinfo
);
301 * Copy the number of tidlist entries we used
302 * and the length of the buffer we registered.
303 * These fields are adjacent in the structure so
304 * we can copy them at the same time.
306 addr
= (unsigned long)cmd
.addr
+
307 offsetof(struct hfi1_tid_info
, tidcnt
);
308 if (copy_to_user((void __user
*)addr
, &tinfo
.tidcnt
,
309 sizeof(tinfo
.tidcnt
) +
310 sizeof(tinfo
.length
)))
314 case HFI1_CMD_TID_FREE
:
315 ret
= exp_tid_free(fp
, &tinfo
);
317 case HFI1_CMD_RECV_CTRL
:
318 ret
= manage_rcvq(uctxt
, fd
->subctxt
, (int)user_val
);
320 case HFI1_CMD_POLL_TYPE
:
321 uctxt
->poll_type
= (typeof(uctxt
->poll_type
))user_val
;
323 case HFI1_CMD_ACK_EVENT
:
324 ret
= user_event_ack(uctxt
, fd
->subctxt
, user_val
);
326 case HFI1_CMD_SET_PKEY
:
327 if (HFI1_CAP_IS_USET(PKEY_CHECK
))
328 ret
= set_ctxt_pkey(uctxt
, fd
->subctxt
, user_val
);
332 case HFI1_CMD_CTXT_RESET
: {
333 struct send_context
*sc
;
334 struct hfi1_devdata
*dd
;
336 if (!uctxt
|| !uctxt
->dd
|| !uctxt
->sc
) {
341 * There is no protection here. User level has to
342 * guarantee that no one will be writing to the send
343 * context while it is being re-initialized.
344 * If user level breaks that guarantee, it will break
345 * it's own context and no one else's.
350 * Wait until the interrupt handler has marked the
351 * context as halted or frozen. Report error if we time
354 wait_event_interruptible_timeout(
355 sc
->halt_wait
, (sc
->flags
& SCF_HALTED
),
356 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
357 if (!(sc
->flags
& SCF_HALTED
)) {
362 * If the send context was halted due to a Freeze,
363 * wait until the device has been "unfrozen" before
364 * resetting the context.
366 if (sc
->flags
& SCF_FROZEN
) {
367 wait_event_interruptible_timeout(
369 !(ACCESS_ONCE(dd
->flags
) & HFI1_FROZEN
),
370 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT
));
371 if (dd
->flags
& HFI1_FROZEN
) {
375 if (dd
->flags
& HFI1_FORCED_FREEZE
) {
376 /* Don't allow context reset if we are into
383 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_ENB
,
386 ret
= sc_restart(sc
);
388 sc_return_credits(sc
);
391 case HFI1_CMD_EP_INFO
:
392 case HFI1_CMD_EP_ERASE_CHIP
:
393 case HFI1_CMD_EP_ERASE_RANGE
:
394 case HFI1_CMD_EP_READ_RANGE
:
395 case HFI1_CMD_EP_WRITE_RANGE
:
396 ret
= handle_eprom_command(&cmd
);
406 static ssize_t
hfi1_write_iter(struct kiocb
*kiocb
, struct iov_iter
*from
)
408 struct hfi1_filedata
*fd
= kiocb
->ki_filp
->private_data
;
409 struct hfi1_user_sdma_pkt_q
*pq
= fd
->pq
;
410 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
411 int ret
= 0, done
= 0, reqs
= 0;
412 unsigned long dim
= from
->nr_segs
;
419 if (!iter_is_iovec(from
) || !dim
) {
424 hfi1_cdbg(SDMA
, "SDMA request from %u:%u (%lu)",
425 fd
->uctxt
->ctxt
, fd
->subctxt
, dim
);
427 if (atomic_read(&pq
->n_reqs
) == pq
->n_max_reqs
) {
433 unsigned long count
= 0;
435 ret
= hfi1_user_sdma_process_request(
436 kiocb
->ki_filp
, (struct iovec
*)(from
->iov
+ done
),
445 return ret
? ret
: reqs
;
448 static int hfi1_file_mmap(struct file
*fp
, struct vm_area_struct
*vma
)
450 struct hfi1_filedata
*fd
= fp
->private_data
;
451 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
452 struct hfi1_devdata
*dd
;
453 unsigned long flags
, pfn
;
454 u64 token
= vma
->vm_pgoff
<< PAGE_SHIFT
,
456 u8 subctxt
, mapio
= 0, vmf
= 0, type
;
461 if (!is_valid_mmap(token
) || !uctxt
||
462 !(vma
->vm_flags
& VM_SHARED
)) {
467 ctxt
= HFI1_MMAP_TOKEN_GET(CTXT
, token
);
468 subctxt
= HFI1_MMAP_TOKEN_GET(SUBCTXT
, token
);
469 type
= HFI1_MMAP_TOKEN_GET(TYPE
, token
);
470 if (ctxt
!= uctxt
->ctxt
|| subctxt
!= fd
->subctxt
) {
475 flags
= vma
->vm_flags
;
480 memaddr
= ((dd
->physaddr
+ TXE_PIO_SEND
) +
482 (uctxt
->sc
->hw_context
* BIT(16))) +
483 /* 64K PIO space / ctxt */
484 (type
== PIO_BUFS_SOP
?
485 (TXE_PIO_SIZE
/ 2) : 0); /* sop? */
487 * Map only the amount allocated to the context, not the
488 * entire available context's PIO space.
490 memlen
= ALIGN(uctxt
->sc
->credits
* PIO_BLOCK_SIZE
,
492 flags
&= ~VM_MAYREAD
;
493 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
494 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
498 if (flags
& VM_WRITE
) {
503 * The credit return location for this context could be on the
504 * second or third page allocated for credit returns (if number
505 * of enabled contexts > 64 and 128 respectively).
507 memaddr
= dd
->cr_base
[uctxt
->numa_id
].pa
+
508 (((u64
)uctxt
->sc
->hw_free
-
509 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) & PAGE_MASK
);
511 flags
&= ~VM_MAYWRITE
;
512 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
514 * The driver has already allocated memory for credit
515 * returns and programmed it into the chip. Has that
516 * memory been flagged as non-cached?
518 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
522 memaddr
= uctxt
->rcvhdrq_phys
;
523 memlen
= uctxt
->rcvhdrq_size
;
529 * The RcvEgr buffer need to be handled differently
530 * as multiple non-contiguous pages need to be mapped
531 * into the user process.
533 memlen
= uctxt
->egrbufs
.size
;
534 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
535 dd_dev_err(dd
, "Eager buffer map size invalid (%lu != %lu)\n",
536 (vma
->vm_end
- vma
->vm_start
), memlen
);
540 if (vma
->vm_flags
& VM_WRITE
) {
544 vma
->vm_flags
&= ~VM_MAYWRITE
;
545 addr
= vma
->vm_start
;
546 for (i
= 0 ; i
< uctxt
->egrbufs
.numbufs
; i
++) {
547 ret
= remap_pfn_range(
549 uctxt
->egrbufs
.buffers
[i
].phys
>> PAGE_SHIFT
,
550 uctxt
->egrbufs
.buffers
[i
].len
,
554 addr
+= uctxt
->egrbufs
.buffers
[i
].len
;
561 * Map only the page that contains this context's user
564 memaddr
= (unsigned long)
565 (dd
->physaddr
+ RXE_PER_CONTEXT_USER
)
566 + (uctxt
->ctxt
* RXE_PER_CONTEXT_SIZE
);
568 * TidFlow table is on the same page as the rest of the
572 flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
573 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
578 * Use the page where this context's flags are. User level
579 * knows where it's own bitmap is within the page.
581 memaddr
= (unsigned long)(dd
->events
+
582 ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
583 HFI1_MAX_SHARED_CTXTS
)) & PAGE_MASK
;
586 * v3.7 removes VM_RESERVED but the effect is kept by
589 flags
|= VM_IO
| VM_DONTEXPAND
;
593 memaddr
= kvirt_to_phys((void *)dd
->status
);
595 flags
|= VM_IO
| VM_DONTEXPAND
;
598 if (!HFI1_CAP_IS_USET(DMA_RTAIL
)) {
600 * If the memory allocation failed, the context alloc
601 * also would have failed, so we would never get here
606 if (flags
& VM_WRITE
) {
610 memaddr
= uctxt
->rcvhdrqtailaddr_phys
;
612 flags
&= ~VM_MAYWRITE
;
615 memaddr
= (u64
)uctxt
->subctxt_uregbase
;
617 flags
|= VM_IO
| VM_DONTEXPAND
;
620 case SUBCTXT_RCV_HDRQ
:
621 memaddr
= (u64
)uctxt
->subctxt_rcvhdr_base
;
622 memlen
= uctxt
->rcvhdrq_size
* uctxt
->subctxt_cnt
;
623 flags
|= VM_IO
| VM_DONTEXPAND
;
627 memaddr
= (u64
)uctxt
->subctxt_rcvegrbuf
;
628 memlen
= uctxt
->egrbufs
.size
* uctxt
->subctxt_cnt
;
629 flags
|= VM_IO
| VM_DONTEXPAND
;
630 flags
&= ~VM_MAYWRITE
;
634 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
640 memaddr
= (u64
)cq
->comps
;
641 memlen
= ALIGN(sizeof(*cq
->comps
) * cq
->nentries
, PAGE_SIZE
);
642 flags
|= VM_IO
| VM_DONTEXPAND
;
651 if ((vma
->vm_end
- vma
->vm_start
) != memlen
) {
652 hfi1_cdbg(PROC
, "%u:%u Memory size mismatch %lu:%lu",
653 uctxt
->ctxt
, fd
->subctxt
,
654 (vma
->vm_end
- vma
->vm_start
), memlen
);
659 vma
->vm_flags
= flags
;
661 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
662 ctxt
, subctxt
, type
, mapio
, vmf
, memaddr
, memlen
,
663 vma
->vm_end
- vma
->vm_start
, vma
->vm_flags
);
664 pfn
= (unsigned long)(memaddr
>> PAGE_SHIFT
);
667 vma
->vm_ops
= &vm_ops
;
670 ret
= io_remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
673 ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
, memlen
,
681 * Local (non-chip) user memory is not mapped right away but as it is
682 * accessed by the user-level code.
684 static int vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
688 page
= vmalloc_to_page((void *)(vmf
->pgoff
<< PAGE_SHIFT
));
690 return VM_FAULT_SIGBUS
;
698 static unsigned int hfi1_poll(struct file
*fp
, struct poll_table_struct
*pt
)
700 struct hfi1_ctxtdata
*uctxt
;
703 uctxt
= ((struct hfi1_filedata
*)fp
->private_data
)->uctxt
;
706 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_URGENT
)
707 pollflag
= poll_urgent(fp
, pt
);
708 else if (uctxt
->poll_type
== HFI1_POLL_TYPE_ANYRCV
)
709 pollflag
= poll_next(fp
, pt
);
716 static int hfi1_file_close(struct inode
*inode
, struct file
*fp
)
718 struct hfi1_filedata
*fdata
= fp
->private_data
;
719 struct hfi1_ctxtdata
*uctxt
= fdata
->uctxt
;
720 struct hfi1_devdata
*dd
;
721 unsigned long flags
, *ev
;
723 fp
->private_data
= NULL
;
728 hfi1_cdbg(PROC
, "freeing ctxt %u:%u", uctxt
->ctxt
, fdata
->subctxt
);
730 mutex_lock(&hfi1_mutex
);
733 /* drain user sdma queue */
734 hfi1_user_sdma_free_queues(fdata
);
737 * Clear any left over, unhandled events so the next process that
738 * gets this context doesn't get confused.
740 ev
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
741 HFI1_MAX_SHARED_CTXTS
) + fdata
->subctxt
;
745 uctxt
->active_slaves
&= ~(1 << fdata
->subctxt
);
746 uctxt
->subpid
[fdata
->subctxt
] = 0;
747 mutex_unlock(&hfi1_mutex
);
751 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
753 * Disable receive context and interrupt available, reset all
754 * RcvCtxtCtrl bits to default values.
756 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
757 HFI1_RCVCTRL_TIDFLOW_DIS
|
758 HFI1_RCVCTRL_INTRAVAIL_DIS
|
759 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
|
760 HFI1_RCVCTRL_NO_RHQ_DROP_DIS
|
761 HFI1_RCVCTRL_NO_EGR_DROP_DIS
, uctxt
->ctxt
);
762 /* Clear the context's J_KEY */
763 hfi1_clear_ctxt_jkey(dd
, uctxt
->ctxt
);
765 * Reset context integrity checks to default.
766 * (writes to CSRs probably belong in chip.c)
768 write_kctxt_csr(dd
, uctxt
->sc
->hw_context
, SEND_CTXT_CHECK_ENABLE
,
769 hfi1_pkt_default_send_ctxt_mask(dd
, uctxt
->sc
->type
));
770 sc_disable(uctxt
->sc
);
772 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
774 dd
->rcd
[uctxt
->ctxt
] = NULL
;
775 uctxt
->rcvwait_to
= 0;
776 uctxt
->piowait_to
= 0;
777 uctxt
->rcvnowait
= 0;
778 uctxt
->pionowait
= 0;
779 uctxt
->event_flags
= 0;
781 hfi1_clear_tids(uctxt
);
782 hfi1_clear_ctxt_pkey(dd
, uctxt
->ctxt
);
784 if (uctxt
->tid_pg_list
)
785 unlock_exp_tids(uctxt
);
787 hfi1_stats
.sps_ctxts
--;
789 mutex_unlock(&hfi1_mutex
);
790 hfi1_free_ctxtdata(dd
, uctxt
);
797 * Convert kernel *virtual* addresses to physical addresses.
798 * This is used to vmalloc'ed addresses.
800 static u64
kvirt_to_phys(void *addr
)
805 page
= vmalloc_to_page(addr
);
807 paddr
= page_to_pfn(page
) << PAGE_SHIFT
;
812 static int assign_ctxt(struct file
*fp
, struct hfi1_user_info
*uinfo
)
814 int i_minor
, ret
= 0;
815 unsigned swmajor
, swminor
, alg
= HFI1_ALG_ACROSS
;
817 swmajor
= uinfo
->userversion
>> 16;
818 if (swmajor
!= HFI1_USER_SWMAJOR
) {
823 swminor
= uinfo
->userversion
& 0xffff;
825 if (uinfo
->hfi1_alg
< HFI1_ALG_COUNT
)
826 alg
= uinfo
->hfi1_alg
;
828 mutex_lock(&hfi1_mutex
);
829 /* First, lets check if we need to setup a shared context? */
830 if (uinfo
->subctxt_cnt
)
831 ret
= find_shared_ctxt(fp
, uinfo
);
834 * We execute the following block if we couldn't find a
835 * shared context or if context sharing is not required.
838 i_minor
= iminor(file_inode(fp
)) - HFI1_USER_MINOR_BASE
;
839 ret
= get_user_context(fp
, uinfo
, i_minor
- 1, alg
);
841 mutex_unlock(&hfi1_mutex
);
846 /* return true if the device available for general use */
847 static int usable_device(struct hfi1_devdata
*dd
)
849 struct hfi1_pportdata
*ppd
= dd
->pport
;
851 return driver_lstate(ppd
) == IB_PORT_ACTIVE
;
854 static int get_user_context(struct file
*fp
, struct hfi1_user_info
*uinfo
,
855 int devno
, unsigned alg
)
857 struct hfi1_devdata
*dd
= NULL
;
858 int ret
= 0, devmax
, npresent
, nup
, dev
;
860 devmax
= hfi1_count_units(&npresent
, &nup
);
870 dd
= hfi1_lookup(devno
);
873 else if (!dd
->freectxts
)
876 struct hfi1_devdata
*pdd
;
878 if (alg
== HFI1_ALG_ACROSS
) {
881 for (dev
= 0; dev
< devmax
; dev
++) {
882 pdd
= hfi1_lookup(dev
);
885 if (!usable_device(pdd
))
887 if (pdd
->freectxts
&&
888 pdd
->freectxts
> free
) {
890 free
= pdd
->freectxts
;
894 for (dev
= 0; dev
< devmax
; dev
++) {
895 pdd
= hfi1_lookup(dev
);
898 if (!usable_device(pdd
))
900 if (pdd
->freectxts
) {
910 return ret
? ret
: allocate_ctxt(fp
, dd
, uinfo
);
913 static int find_shared_ctxt(struct file
*fp
,
914 const struct hfi1_user_info
*uinfo
)
918 struct hfi1_filedata
*fd
= fp
->private_data
;
920 devmax
= hfi1_count_units(NULL
, NULL
);
922 for (ndev
= 0; ndev
< devmax
; ndev
++) {
923 struct hfi1_devdata
*dd
= hfi1_lookup(ndev
);
925 if (!(dd
&& (dd
->flags
& HFI1_PRESENT
) && dd
->kregbase
))
927 for (i
= dd
->first_user_ctxt
; i
< dd
->num_rcv_contexts
; i
++) {
928 struct hfi1_ctxtdata
*uctxt
= dd
->rcd
[i
];
930 /* Skip ctxts which are not yet open */
931 if (!uctxt
|| !uctxt
->cnt
)
933 /* Skip ctxt if it doesn't match the requested one */
934 if (memcmp(uctxt
->uuid
, uinfo
->uuid
,
935 sizeof(uctxt
->uuid
)) ||
936 uctxt
->jkey
!= generate_jkey(current_uid()) ||
937 uctxt
->subctxt_id
!= uinfo
->subctxt_id
||
938 uctxt
->subctxt_cnt
!= uinfo
->subctxt_cnt
)
941 /* Verify the sharing process matches the master */
942 if (uctxt
->userversion
!= uinfo
->userversion
||
943 uctxt
->cnt
>= uctxt
->subctxt_cnt
) {
948 fd
->subctxt
= uctxt
->cnt
++;
949 uctxt
->subpid
[fd
->subctxt
] = current
->pid
;
950 uctxt
->active_slaves
|= 1 << fd
->subctxt
;
960 static int allocate_ctxt(struct file
*fp
, struct hfi1_devdata
*dd
,
961 struct hfi1_user_info
*uinfo
)
963 struct hfi1_filedata
*fd
= fp
->private_data
;
964 struct hfi1_ctxtdata
*uctxt
;
968 if (dd
->flags
& HFI1_FROZEN
) {
970 * Pick an error that is unique from all other errors
971 * that are returned so the user process knows that
972 * it tried to allocate while the SPC was frozen. It
973 * it should be able to retry with success in a short
979 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
; ctxt
++)
983 if (ctxt
== dd
->num_rcv_contexts
)
986 uctxt
= hfi1_create_ctxtdata(dd
->pport
, ctxt
);
989 "Unable to allocate ctxtdata memory, failing open\n");
993 * Allocate and enable a PIO send context.
995 uctxt
->sc
= sc_alloc(dd
, SC_USER
, uctxt
->rcvhdrqentsize
,
1000 hfi1_cdbg(PROC
, "allocated send context %u(%u)\n", uctxt
->sc
->sw_index
,
1001 uctxt
->sc
->hw_context
);
1002 ret
= sc_enable(uctxt
->sc
);
1006 * Setup shared context resources if the user-level has requested
1007 * shared contexts and this is the 'master' process.
1008 * This has to be done here so the rest of the sub-contexts find the
1011 if (uinfo
->subctxt_cnt
&& !fd
->subctxt
) {
1012 ret
= init_subctxts(uctxt
, uinfo
);
1014 * On error, we don't need to disable and de-allocate the
1015 * send context because it will be done during file close
1020 uctxt
->userversion
= uinfo
->userversion
;
1021 uctxt
->pid
= current
->pid
;
1022 uctxt
->flags
= HFI1_CAP_UGET(MASK
);
1023 init_waitqueue_head(&uctxt
->wait
);
1024 strlcpy(uctxt
->comm
, current
->comm
, sizeof(uctxt
->comm
));
1025 memcpy(uctxt
->uuid
, uinfo
->uuid
, sizeof(uctxt
->uuid
));
1026 uctxt
->jkey
= generate_jkey(current_uid());
1027 INIT_LIST_HEAD(&uctxt
->sdma_queues
);
1028 spin_lock_init(&uctxt
->sdma_qlock
);
1029 hfi1_stats
.sps_ctxts
++;
1036 static int init_subctxts(struct hfi1_ctxtdata
*uctxt
,
1037 const struct hfi1_user_info
*uinfo
)
1040 unsigned num_subctxts
;
1042 num_subctxts
= uinfo
->subctxt_cnt
;
1043 if (num_subctxts
> HFI1_MAX_SHARED_CTXTS
) {
1048 uctxt
->subctxt_cnt
= uinfo
->subctxt_cnt
;
1049 uctxt
->subctxt_id
= uinfo
->subctxt_id
;
1050 uctxt
->active_slaves
= 1;
1051 uctxt
->redirect_seq_cnt
= 1;
1052 set_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1057 static int setup_subctxt(struct hfi1_ctxtdata
*uctxt
)
1060 unsigned num_subctxts
= uctxt
->subctxt_cnt
;
1062 uctxt
->subctxt_uregbase
= vmalloc_user(PAGE_SIZE
);
1063 if (!uctxt
->subctxt_uregbase
) {
1067 /* We can take the size of the RcvHdr Queue from the master */
1068 uctxt
->subctxt_rcvhdr_base
= vmalloc_user(uctxt
->rcvhdrq_size
*
1070 if (!uctxt
->subctxt_rcvhdr_base
) {
1075 uctxt
->subctxt_rcvegrbuf
= vmalloc_user(uctxt
->egrbufs
.size
*
1077 if (!uctxt
->subctxt_rcvegrbuf
) {
1083 vfree(uctxt
->subctxt_rcvhdr_base
);
1085 vfree(uctxt
->subctxt_uregbase
);
1086 uctxt
->subctxt_uregbase
= NULL
;
1091 static int user_init(struct file
*fp
)
1094 unsigned int rcvctrl_ops
= 0;
1095 struct hfi1_filedata
*fd
= fp
->private_data
;
1096 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1098 /* make sure that the context has already been setup */
1099 if (!test_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
)) {
1105 * Subctxts don't need to initialize anything since master
1109 ret
= wait_event_interruptible(uctxt
->wait
,
1110 !test_bit(HFI1_CTXT_MASTER_UNINIT
,
1111 &uctxt
->event_flags
));
1115 /* initialize poll variables... */
1117 uctxt
->urgent_poll
= 0;
1120 * Now enable the ctxt for receive.
1121 * For chips that are set to DMA the tail register to memory
1122 * when they change (and when the update bit transitions from
1123 * 0 to 1. So for those chips, we turn it off and then back on.
1124 * This will (very briefly) affect any other open ctxts, but the
1125 * duration is very short, and therefore isn't an issue. We
1126 * explicitly set the in-memory tail copy to 0 beforehand, so we
1127 * don't have to wait to be sure the DMA update has happened
1128 * (chip resets head/tail to 0 on transition to enable).
1130 if (uctxt
->rcvhdrtail_kvaddr
)
1131 clear_rcvhdrtail(uctxt
);
1133 /* Setup J_KEY before enabling the context */
1134 hfi1_set_ctxt_jkey(uctxt
->dd
, uctxt
->ctxt
, uctxt
->jkey
);
1136 rcvctrl_ops
= HFI1_RCVCTRL_CTXT_ENB
;
1137 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, HDRSUPP
))
1138 rcvctrl_ops
|= HFI1_RCVCTRL_TIDFLOW_ENB
;
1140 * Ignore the bit in the flags for now until proper
1141 * support for multiple packet per rcv array entry is
1144 if (!HFI1_CAP_KGET_MASK(uctxt
->flags
, MULTI_PKT_EGR
))
1145 rcvctrl_ops
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
1146 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_EGR_FULL
))
1147 rcvctrl_ops
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
1148 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, NODROP_RHQ_FULL
))
1149 rcvctrl_ops
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
1150 if (HFI1_CAP_KGET_MASK(uctxt
->flags
, DMA_RTAIL
))
1151 rcvctrl_ops
|= HFI1_RCVCTRL_TAILUPD_ENB
;
1152 hfi1_rcvctrl(uctxt
->dd
, rcvctrl_ops
, uctxt
->ctxt
);
1154 /* Notify any waiting slaves */
1155 if (uctxt
->subctxt_cnt
) {
1156 clear_bit(HFI1_CTXT_MASTER_UNINIT
, &uctxt
->event_flags
);
1157 wake_up(&uctxt
->wait
);
1165 static int get_ctxt_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1167 struct hfi1_ctxt_info cinfo
;
1168 struct hfi1_filedata
*fd
= fp
->private_data
;
1169 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1172 memset(&cinfo
, 0, sizeof(cinfo
));
1173 ret
= hfi1_get_base_kinfo(uctxt
, &cinfo
);
1176 cinfo
.num_active
= hfi1_count_active_units();
1177 cinfo
.unit
= uctxt
->dd
->unit
;
1178 cinfo
.ctxt
= uctxt
->ctxt
;
1179 cinfo
.subctxt
= fd
->subctxt
;
1180 cinfo
.rcvtids
= roundup(uctxt
->egrbufs
.alloced
,
1181 uctxt
->dd
->rcv_entries
.group_size
) +
1182 uctxt
->expected_count
;
1183 cinfo
.credits
= uctxt
->sc
->credits
;
1184 cinfo
.numa_node
= uctxt
->numa_id
;
1185 cinfo
.rec_cpu
= fd
->rec_cpu_num
;
1186 cinfo
.send_ctxt
= uctxt
->sc
->hw_context
;
1188 cinfo
.egrtids
= uctxt
->egrbufs
.alloced
;
1189 cinfo
.rcvhdrq_cnt
= uctxt
->rcvhdrq_cnt
;
1190 cinfo
.rcvhdrq_entsize
= uctxt
->rcvhdrqentsize
<< 2;
1191 cinfo
.sdma_ring_size
= fd
->cq
->nentries
;
1192 cinfo
.rcvegr_size
= uctxt
->egrbufs
.rcvtid_size
;
1194 trace_hfi1_ctxt_info(uctxt
->dd
, uctxt
->ctxt
, fd
->subctxt
, cinfo
);
1195 if (copy_to_user(ubase
, &cinfo
, sizeof(cinfo
)))
1201 static int setup_ctxt(struct file
*fp
)
1203 struct hfi1_filedata
*fd
= fp
->private_data
;
1204 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1205 struct hfi1_devdata
*dd
= uctxt
->dd
;
1209 * Context should be set up only once (including allocation and
1210 * programming of eager buffers. This is done if context sharing
1211 * is not requested or by the master process.
1213 if (!uctxt
->subctxt_cnt
|| !fd
->subctxt
) {
1214 ret
= hfi1_init_ctxt(uctxt
->sc
);
1218 /* Now allocate the RcvHdr queue and eager buffers. */
1219 ret
= hfi1_create_rcvhdrq(dd
, uctxt
);
1222 ret
= hfi1_setup_eagerbufs(uctxt
);
1225 if (uctxt
->subctxt_cnt
&& !fd
->subctxt
) {
1226 ret
= setup_subctxt(uctxt
);
1230 /* Setup Expected Rcv memories */
1231 uctxt
->tid_pg_list
= vzalloc(uctxt
->expected_count
*
1232 sizeof(struct page
**));
1233 if (!uctxt
->tid_pg_list
) {
1237 uctxt
->physshadow
= vzalloc(uctxt
->expected_count
*
1238 sizeof(*uctxt
->physshadow
));
1239 if (!uctxt
->physshadow
) {
1243 /* allocate expected TID map and initialize the cursor */
1244 atomic_set(&uctxt
->tidcursor
, 0);
1245 uctxt
->numtidgroups
= uctxt
->expected_count
/
1246 dd
->rcv_entries
.group_size
;
1247 uctxt
->tidmapcnt
= uctxt
->numtidgroups
/ BITS_PER_LONG
+
1248 !!(uctxt
->numtidgroups
% BITS_PER_LONG
);
1249 uctxt
->tidusemap
= kzalloc_node(uctxt
->tidmapcnt
*
1250 sizeof(*uctxt
->tidusemap
),
1251 GFP_KERNEL
, uctxt
->numa_id
);
1252 if (!uctxt
->tidusemap
) {
1257 * In case that the number of groups is not a multiple of
1258 * 64 (the number of groups in a tidusemap element), mark
1259 * the extra ones as used. This will effectively make them
1260 * permanently used and should never be assigned. Otherwise,
1261 * the code which checks how many free groups we have will
1262 * get completely confused about the state of the bits.
1264 if (uctxt
->numtidgroups
% BITS_PER_LONG
)
1265 uctxt
->tidusemap
[uctxt
->tidmapcnt
- 1] =
1266 ~((1ULL << (uctxt
->numtidgroups
%
1267 BITS_PER_LONG
)) - 1);
1268 trace_hfi1_exp_tid_map(uctxt
->ctxt
, fd
->subctxt
, 0,
1269 uctxt
->tidusemap
, uctxt
->tidmapcnt
);
1271 ret
= hfi1_user_sdma_alloc_queues(uctxt
, fp
);
1275 set_bit(HFI1_CTXT_SETUP_DONE
, &uctxt
->event_flags
);
1280 static int get_base_info(struct file
*fp
, void __user
*ubase
, __u32 len
)
1282 struct hfi1_base_info binfo
;
1283 struct hfi1_filedata
*fd
= fp
->private_data
;
1284 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1285 struct hfi1_devdata
*dd
= uctxt
->dd
;
1290 trace_hfi1_uctxtdata(uctxt
->dd
, uctxt
);
1292 memset(&binfo
, 0, sizeof(binfo
));
1293 binfo
.hw_version
= dd
->revision
;
1294 binfo
.sw_version
= HFI1_KERN_SWVERSION
;
1295 binfo
.bthqp
= kdeth_qp
;
1296 binfo
.jkey
= uctxt
->jkey
;
1298 * If more than 64 contexts are enabled the allocated credit
1299 * return will span two or three contiguous pages. Since we only
1300 * map the page containing the context's credit return address,
1301 * we need to calculate the offset in the proper page.
1303 offset
= ((u64
)uctxt
->sc
->hw_free
-
1304 (u64
)dd
->cr_base
[uctxt
->numa_id
].va
) % PAGE_SIZE
;
1305 binfo
.sc_credits_addr
= HFI1_MMAP_TOKEN(PIO_CRED
, uctxt
->ctxt
,
1306 fd
->subctxt
, offset
);
1307 binfo
.pio_bufbase
= HFI1_MMAP_TOKEN(PIO_BUFS
, uctxt
->ctxt
,
1309 uctxt
->sc
->base_addr
);
1310 binfo
.pio_bufbase_sop
= HFI1_MMAP_TOKEN(PIO_BUFS_SOP
,
1313 uctxt
->sc
->base_addr
);
1314 binfo
.rcvhdr_bufbase
= HFI1_MMAP_TOKEN(RCV_HDRQ
, uctxt
->ctxt
,
1317 binfo
.rcvegr_bufbase
= HFI1_MMAP_TOKEN(RCV_EGRBUF
, uctxt
->ctxt
,
1319 uctxt
->egrbufs
.rcvtids
[0].phys
);
1320 binfo
.sdma_comp_bufbase
= HFI1_MMAP_TOKEN(SDMA_COMP
, uctxt
->ctxt
,
1324 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1326 binfo
.user_regbase
= HFI1_MMAP_TOKEN(UREGS
, uctxt
->ctxt
,
1328 offset
= offset_in_page((((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1329 HFI1_MAX_SHARED_CTXTS
) + fd
->subctxt
) *
1330 sizeof(*dd
->events
));
1331 binfo
.events_bufbase
= HFI1_MMAP_TOKEN(EVENTS
, uctxt
->ctxt
,
1334 binfo
.status_bufbase
= HFI1_MMAP_TOKEN(STATUS
, uctxt
->ctxt
,
1337 if (HFI1_CAP_IS_USET(DMA_RTAIL
))
1338 binfo
.rcvhdrtail_base
= HFI1_MMAP_TOKEN(RTAIL
, uctxt
->ctxt
,
1340 if (uctxt
->subctxt_cnt
) {
1341 binfo
.subctxt_uregbase
= HFI1_MMAP_TOKEN(SUBCTXT_UREGS
,
1344 binfo
.subctxt_rcvhdrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ
,
1347 binfo
.subctxt_rcvegrbuf
= HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF
,
1351 sz
= (len
< sizeof(binfo
)) ? len
: sizeof(binfo
);
1352 if (copy_to_user(ubase
, &binfo
, sz
))
1357 static unsigned int poll_urgent(struct file
*fp
,
1358 struct poll_table_struct
*pt
)
1360 struct hfi1_filedata
*fd
= fp
->private_data
;
1361 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1362 struct hfi1_devdata
*dd
= uctxt
->dd
;
1365 poll_wait(fp
, &uctxt
->wait
, pt
);
1367 spin_lock_irq(&dd
->uctxt_lock
);
1368 if (uctxt
->urgent
!= uctxt
->urgent_poll
) {
1369 pollflag
= POLLIN
| POLLRDNORM
;
1370 uctxt
->urgent_poll
= uctxt
->urgent
;
1373 set_bit(HFI1_CTXT_WAITING_URG
, &uctxt
->event_flags
);
1375 spin_unlock_irq(&dd
->uctxt_lock
);
1380 static unsigned int poll_next(struct file
*fp
,
1381 struct poll_table_struct
*pt
)
1383 struct hfi1_filedata
*fd
= fp
->private_data
;
1384 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1385 struct hfi1_devdata
*dd
= uctxt
->dd
;
1388 poll_wait(fp
, &uctxt
->wait
, pt
);
1390 spin_lock_irq(&dd
->uctxt_lock
);
1391 if (hdrqempty(uctxt
)) {
1392 set_bit(HFI1_CTXT_WAITING_RCV
, &uctxt
->event_flags
);
1393 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_INTRAVAIL_ENB
, uctxt
->ctxt
);
1396 pollflag
= POLLIN
| POLLRDNORM
;
1397 spin_unlock_irq(&dd
->uctxt_lock
);
1403 * Find all user contexts in use, and set the specified bit in their
1405 * See also find_ctxt() for a similar use, that is specific to send buffers.
1407 int hfi1_set_uevent_bits(struct hfi1_pportdata
*ppd
, const int evtbit
)
1409 struct hfi1_ctxtdata
*uctxt
;
1410 struct hfi1_devdata
*dd
= ppd
->dd
;
1413 unsigned long flags
;
1420 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1421 for (ctxt
= dd
->first_user_ctxt
; ctxt
< dd
->num_rcv_contexts
;
1423 uctxt
= dd
->rcd
[ctxt
];
1425 unsigned long *evs
= dd
->events
+
1426 (uctxt
->ctxt
- dd
->first_user_ctxt
) *
1427 HFI1_MAX_SHARED_CTXTS
;
1430 * subctxt_cnt is 0 if not shared, so do base
1431 * separately, first, then remaining subctxt, if any
1433 set_bit(evtbit
, evs
);
1434 for (i
= 1; i
< uctxt
->subctxt_cnt
; i
++)
1435 set_bit(evtbit
, evs
+ i
);
1438 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1444 * manage_rcvq - manage a context's receive queue
1445 * @uctxt: the context
1446 * @subctxt: the sub-context
1447 * @start_stop: action to carry out
1449 * start_stop == 0 disables receive on the context, for use in queue
1450 * overflow conditions. start_stop==1 re-enables, to be used to
1451 * re-init the software copy of the head register
1453 static int manage_rcvq(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1456 struct hfi1_devdata
*dd
= uctxt
->dd
;
1457 unsigned int rcvctrl_op
;
1461 /* atomically clear receive enable ctxt. */
1464 * On enable, force in-memory copy of the tail register to
1465 * 0, so that protocol code doesn't have to worry about
1466 * whether or not the chip has yet updated the in-memory
1467 * copy or not on return from the system call. The chip
1468 * always resets it's tail register back to 0 on a
1469 * transition from disabled to enabled.
1471 if (uctxt
->rcvhdrtail_kvaddr
)
1472 clear_rcvhdrtail(uctxt
);
1473 rcvctrl_op
= HFI1_RCVCTRL_CTXT_ENB
;
1475 rcvctrl_op
= HFI1_RCVCTRL_CTXT_DIS
;
1476 hfi1_rcvctrl(dd
, rcvctrl_op
, uctxt
->ctxt
);
1477 /* always; new head should be equal to new tail; see above */
1483 * clear the event notifier events for this context.
1484 * User process then performs actions appropriate to bit having been
1485 * set, if desired, and checks again in future.
1487 static int user_event_ack(struct hfi1_ctxtdata
*uctxt
, int subctxt
,
1488 unsigned long events
)
1491 struct hfi1_devdata
*dd
= uctxt
->dd
;
1497 evs
= dd
->events
+ ((uctxt
->ctxt
- dd
->first_user_ctxt
) *
1498 HFI1_MAX_SHARED_CTXTS
) + subctxt
;
1500 for (i
= 0; i
<= _HFI1_MAX_EVENT_BIT
; i
++) {
1501 if (!test_bit(i
, &events
))
1508 #define num_user_pages(vaddr, len) \
1509 (1 + (((((unsigned long)(vaddr) + \
1510 (unsigned long)(len) - 1) & PAGE_MASK) - \
1511 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1514 * tzcnt - count the number of trailing zeros in a 64bit value
1515 * @value: the value to be examined
1517 * Returns the number of trailing least significant zeros in the
1518 * the input value. If the value is zero, return the number of
1519 * bits of the value.
1521 static inline u8
tzcnt(u64 value
)
1523 return value
? __builtin_ctzl(value
) : sizeof(value
) * 8;
1526 static inline unsigned num_free_groups(unsigned long map
, u16
*start
)
1529 u16 bitidx
= *start
;
1531 if (bitidx
>= BITS_PER_LONG
)
1533 /* "Turn off" any bits set before our bit index */
1534 map
&= ~((1ULL << bitidx
) - 1);
1535 free
= tzcnt(map
) - bitidx
;
1536 while (!free
&& bitidx
< BITS_PER_LONG
) {
1537 /* Zero out the last set bit so we look at the rest */
1538 map
&= ~(1ULL << bitidx
);
1540 * Account for the previously checked bits and advance
1541 * the bit index. We don't have to check for bitidx
1542 * getting bigger than BITS_PER_LONG here as it would
1543 * mean extra instructions that we don't need. If it
1544 * did happen, it would push free to a negative value
1545 * which will break the loop.
1547 free
= tzcnt(map
) - ++bitidx
;
1553 static int exp_tid_setup(struct file
*fp
, struct hfi1_tid_info
*tinfo
)
1556 struct hfi1_filedata
*fd
= fp
->private_data
;
1557 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1558 struct hfi1_devdata
*dd
= uctxt
->dd
;
1559 unsigned tid
, mapped
= 0, npages
, ngroups
, exp_groups
,
1560 tidpairs
= uctxt
->expected_count
/ 2;
1561 struct page
**pages
;
1562 unsigned long vaddr
, tidmap
[uctxt
->tidmapcnt
];
1564 u32 tidlist
[tidpairs
], pairidx
= 0, tidcursor
;
1565 u16 useidx
, idx
, bitidx
, tidcnt
= 0;
1567 vaddr
= tinfo
->vaddr
;
1569 if (offset_in_page(vaddr
)) {
1574 npages
= num_user_pages(vaddr
, tinfo
->length
);
1579 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
,
1580 npages
* PAGE_SIZE
)) {
1581 dd_dev_err(dd
, "Fail vaddr %p, %u pages, !access_ok\n",
1582 (void *)vaddr
, npages
);
1587 memset(tidmap
, 0, sizeof(tidmap
[0]) * uctxt
->tidmapcnt
);
1588 memset(tidlist
, 0, sizeof(tidlist
[0]) * tidpairs
);
1590 exp_groups
= uctxt
->expected_count
/ dd
->rcv_entries
.group_size
;
1591 /* which group set do we look at first? */
1592 tidcursor
= atomic_read(&uctxt
->tidcursor
);
1593 useidx
= (tidcursor
>> 16) & 0xffff;
1594 bitidx
= tidcursor
& 0xffff;
1597 * Keep going until we've mapped all pages or we've exhausted all
1599 * This iterates over the number of tidmaps + 1
1600 * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1601 * started from one more time for any free bits before the
1602 * starting point bit.
1604 for (mapped
= 0, idx
= 0;
1605 mapped
< npages
&& idx
<= uctxt
->tidmapcnt
;) {
1607 unsigned free
, pinned
, pmapped
= 0, bits_used
;
1611 * "Reserve" the needed group bits under lock so other
1612 * processes can't step in the middle of it. Once
1613 * reserved, we don't need the lock anymore since we
1614 * are guaranteed the groups.
1616 spin_lock(&uctxt
->exp_lock
);
1617 if (uctxt
->tidusemap
[useidx
] == -1ULL ||
1618 bitidx
>= BITS_PER_LONG
) {
1619 /* no free groups in the set, use the next */
1620 useidx
= (useidx
+ 1) % uctxt
->tidmapcnt
;
1623 spin_unlock(&uctxt
->exp_lock
);
1626 ngroups
= ((npages
- mapped
) / dd
->rcv_entries
.group_size
) +
1627 !!((npages
- mapped
) % dd
->rcv_entries
.group_size
);
1630 * If we've gotten here, the current set of groups does have
1631 * one or more free groups.
1633 free
= num_free_groups(uctxt
->tidusemap
[useidx
], &bitidx
);
1636 * Despite the check above, free could still come back
1637 * as 0 because we don't check the entire bitmap but
1638 * we start from bitidx.
1640 spin_unlock(&uctxt
->exp_lock
);
1643 bits_used
= min(free
, ngroups
);
1644 tidmap
[useidx
] |= ((1ULL << bits_used
) - 1) << bitidx
;
1645 uctxt
->tidusemap
[useidx
] |= tidmap
[useidx
];
1646 spin_unlock(&uctxt
->exp_lock
);
1649 * At this point, we know where in the map we have free bits.
1650 * properly offset into the various "shadow" arrays and compute
1651 * the RcvArray entry index.
1653 offset
= ((useidx
* BITS_PER_LONG
) + bitidx
) *
1654 dd
->rcv_entries
.group_size
;
1655 pages
= uctxt
->tid_pg_list
+ offset
;
1656 phys
= uctxt
->physshadow
+ offset
;
1657 tid
= uctxt
->expected_base
+ offset
;
1659 /* Calculate how many pages we can pin based on free bits */
1660 pinned
= min((bits_used
* dd
->rcv_entries
.group_size
),
1663 * Now that we know how many free RcvArray entries we have,
1664 * we can pin that many user pages.
1666 ret
= hfi1_get_user_pages(vaddr
+ (mapped
* PAGE_SIZE
),
1670 * We can't continue because the pages array won't be
1671 * initialized. This should never happen,
1672 * unless perhaps the user has mpin'ed the pages
1676 "Failed to lock addr %p, %u pages: errno %d\n",
1677 (void *) vaddr
, pinned
, -ret
);
1679 * Let go of the bits that we reserved since we are not
1680 * going to use them.
1682 spin_lock(&uctxt
->exp_lock
);
1683 uctxt
->tidusemap
[useidx
] &=
1684 ~(((1ULL << bits_used
) - 1) << bitidx
);
1685 spin_unlock(&uctxt
->exp_lock
);
1689 * How many groups do we need based on how many pages we have
1692 ngroups
= (pinned
/ dd
->rcv_entries
.group_size
) +
1693 !!(pinned
% dd
->rcv_entries
.group_size
);
1695 * Keep programming RcvArray entries for all the <ngroups> free
1698 for (i
= 0, grp
= 0; grp
< ngroups
; i
++, grp
++) {
1700 u32 pair_size
= 0, tidsize
;
1702 * This inner loop will program an entire group or the
1703 * array of pinned pages (which ever limit is hit
1706 for (j
= 0; j
< dd
->rcv_entries
.group_size
&&
1707 pmapped
< pinned
; j
++, pmapped
++, tid
++) {
1708 tidsize
= PAGE_SIZE
;
1709 phys
[pmapped
] = hfi1_map_page(dd
->pcidev
,
1711 tidsize
, PCI_DMA_FROMDEVICE
);
1712 trace_hfi1_exp_rcv_set(uctxt
->ctxt
,
1718 * Each RcvArray entry is programmed with one
1719 * page * worth of memory. This will handle
1720 * the 8K MTU as well as anything smaller
1721 * due to the fact that both entries in the
1722 * RcvTidPair are programmed with a page.
1723 * PSM currently does not handle anything
1724 * bigger than 8K MTU, so should we even worry
1727 hfi1_put_tid(dd
, tid
, PT_EXPECTED
,
1729 ilog2(tidsize
>> PAGE_SHIFT
) + 1);
1730 pair_size
+= tidsize
>> PAGE_SHIFT
;
1731 EXP_TID_RESET(tidlist
[pairidx
], LEN
, pair_size
);
1735 (tid
- uctxt
->expected_base
)
1738 EXP_TID_SET(CTRL
, 1);
1742 EXP_TID_SET(CTRL
, 2);
1748 * We've programmed the entire group (or as much of the
1749 * group as we'll use. Now, it's time to push it out...
1754 atomic_set(&uctxt
->tidcursor
,
1755 (((useidx
& 0xffffff) << 16) |
1756 ((bitidx
+ bits_used
) & 0xffffff)));
1758 trace_hfi1_exp_tid_map(uctxt
->ctxt
, fd
->subctxt
, 0, uctxt
->tidusemap
,
1762 /* If we've mapped anything, copy relevant info to user */
1764 if (copy_to_user((void __user
*)(unsigned long)tinfo
->tidlist
,
1765 tidlist
, sizeof(tidlist
[0]) * tidcnt
)) {
1769 /* copy TID info to user */
1770 if (copy_to_user((void __user
*)(unsigned long)tinfo
->tidmap
,
1771 tidmap
, sizeof(tidmap
[0]) * uctxt
->tidmapcnt
))
1776 * Calculate mapped length. New Exp TID protocol does not "unwind" and
1777 * report an error if it can't map the entire buffer. It just reports
1778 * the length that was mapped.
1780 tinfo
->length
= mapped
* PAGE_SIZE
;
1781 tinfo
->tidcnt
= tidcnt
;
1785 static int exp_tid_free(struct file
*fp
, struct hfi1_tid_info
*tinfo
)
1787 struct hfi1_filedata
*fd
= fp
->private_data
;
1788 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
1789 struct hfi1_devdata
*dd
= uctxt
->dd
;
1790 unsigned long tidmap
[uctxt
->tidmapcnt
];
1791 struct page
**pages
;
1793 u16 idx
, bitidx
, tid
;
1796 if (copy_from_user(&tidmap
, (void __user
*)(unsigned long)
1798 sizeof(tidmap
[0]) * uctxt
->tidmapcnt
)) {
1802 for (idx
= 0; idx
< uctxt
->tidmapcnt
; idx
++) {
1809 while ((bitidx
= tzcnt(map
)) < BITS_PER_LONG
) {
1811 struct page
*pshadow
[dd
->rcv_entries
.group_size
];
1812 unsigned offset
= ((idx
* BITS_PER_LONG
) + bitidx
) *
1813 dd
->rcv_entries
.group_size
;
1815 pages
= uctxt
->tid_pg_list
+ offset
;
1816 phys
= uctxt
->physshadow
+ offset
;
1817 tid
= uctxt
->expected_base
+ offset
;
1818 for (i
= 0; i
< dd
->rcv_entries
.group_size
;
1821 hfi1_put_tid(dd
, tid
, PT_INVALID
,
1823 trace_hfi1_exp_rcv_free(uctxt
->ctxt
,
1827 pci_unmap_page(dd
->pcidev
, phys
[i
],
1828 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
1829 pshadow
[pcount
] = pages
[i
];
1836 hfi1_release_user_pages(pshadow
, pcount
);
1837 clear_bit(bitidx
, &uctxt
->tidusemap
[idx
]);
1838 map
&= ~(1ULL<<bitidx
);
1841 trace_hfi1_exp_tid_map(uctxt
->ctxt
, fd
->subctxt
, 1, uctxt
->tidusemap
,
1847 static void unlock_exp_tids(struct hfi1_ctxtdata
*uctxt
)
1849 struct hfi1_devdata
*dd
= uctxt
->dd
;
1852 dd_dev_info(dd
, "ctxt %u unlocking any locked expTID pages\n",
1854 for (tid
= 0; tid
< uctxt
->expected_count
; tid
++) {
1855 struct page
*p
= uctxt
->tid_pg_list
[tid
];
1861 phys
= uctxt
->physshadow
[tid
];
1862 uctxt
->physshadow
[tid
] = 0;
1863 uctxt
->tid_pg_list
[tid
] = NULL
;
1864 pci_unmap_page(dd
->pcidev
, phys
, PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
1865 hfi1_release_user_pages(&p
, 1);
1869 static int set_ctxt_pkey(struct hfi1_ctxtdata
*uctxt
, unsigned subctxt
,
1872 int ret
= -ENOENT
, i
, intable
= 0;
1873 struct hfi1_pportdata
*ppd
= uctxt
->ppd
;
1874 struct hfi1_devdata
*dd
= uctxt
->dd
;
1876 if (pkey
== LIM_MGMT_P_KEY
|| pkey
== FULL_MGMT_P_KEY
) {
1881 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++)
1882 if (pkey
== ppd
->pkeys
[i
]) {
1888 ret
= hfi1_set_ctxt_pkey(dd
, uctxt
->ctxt
, pkey
);
1893 static int ui_open(struct inode
*inode
, struct file
*filp
)
1895 struct hfi1_devdata
*dd
;
1897 dd
= container_of(inode
->i_cdev
, struct hfi1_devdata
, ui_cdev
);
1898 filp
->private_data
= dd
; /* for other methods */
1902 static int ui_release(struct inode
*inode
, struct file
*filp
)
1908 static loff_t
ui_lseek(struct file
*filp
, loff_t offset
, int whence
)
1910 struct hfi1_devdata
*dd
= filp
->private_data
;
1916 offset
+= filp
->f_pos
;
1919 offset
= ((dd
->kregend
- dd
->kregbase
) + DC8051_DATA_MEM_SIZE
) -
1929 if (offset
>= (dd
->kregend
- dd
->kregbase
) + DC8051_DATA_MEM_SIZE
)
1932 filp
->f_pos
= offset
;
1938 /* NOTE: assumes unsigned long is 8 bytes */
1939 static ssize_t
ui_read(struct file
*filp
, char __user
*buf
, size_t count
,
1942 struct hfi1_devdata
*dd
= filp
->private_data
;
1943 void __iomem
*base
= dd
->kregbase
;
1944 unsigned long total
, csr_off
,
1945 barlen
= (dd
->kregend
- dd
->kregbase
);
1948 /* only read 8 byte quantities */
1949 if ((count
% 8) != 0)
1951 /* offset must be 8-byte aligned */
1952 if ((*f_pos
% 8) != 0)
1954 /* destination buffer must be 8-byte aligned */
1955 if ((unsigned long)buf
% 8 != 0)
1957 /* must be in range */
1958 if (*f_pos
+ count
> (barlen
+ DC8051_DATA_MEM_SIZE
))
1960 /* only set the base if we are not starting past the BAR */
1961 if (*f_pos
< barlen
)
1964 for (total
= 0; total
< count
; total
+= 8, csr_off
+= 8) {
1965 /* accessing LCB CSRs requires more checks */
1966 if (is_lcb_offset(csr_off
)) {
1967 if (read_lcb_csr(dd
, csr_off
, (u64
*)&data
))
1971 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1972 * false parity error. Avoid the whole issue by not reading
1973 * them. These registers are defined as having a read value
1976 else if (csr_off
== ASIC_GPIO_CLEAR
1977 || csr_off
== ASIC_GPIO_FORCE
1978 || csr_off
== ASIC_QSFP1_CLEAR
1979 || csr_off
== ASIC_QSFP1_FORCE
1980 || csr_off
== ASIC_QSFP2_CLEAR
1981 || csr_off
== ASIC_QSFP2_FORCE
)
1983 else if (csr_off
>= barlen
) {
1985 * read_8051_data can read more than just 8 bytes at
1986 * a time. However, folding this into the loop and
1987 * handling the reads in 8 byte increments allows us
1988 * to smoothly transition from chip memory to 8051
1991 if (read_8051_data(dd
,
1992 (u32
)(csr_off
- barlen
),
1993 sizeof(data
), &data
))
1996 data
= readq(base
+ total
);
1997 if (put_user(data
, (unsigned long __user
*)(buf
+ total
)))
2004 /* NOTE: assumes unsigned long is 8 bytes */
2005 static ssize_t
ui_write(struct file
*filp
, const char __user
*buf
,
2006 size_t count
, loff_t
*f_pos
)
2008 struct hfi1_devdata
*dd
= filp
->private_data
;
2010 unsigned long total
, data
, csr_off
;
2013 /* only write 8 byte quantities */
2014 if ((count
% 8) != 0)
2016 /* offset must be 8-byte aligned */
2017 if ((*f_pos
% 8) != 0)
2019 /* source buffer must be 8-byte aligned */
2020 if ((unsigned long)buf
% 8 != 0)
2022 /* must be in range */
2023 if (*f_pos
+ count
> dd
->kregend
- dd
->kregbase
)
2026 base
= (void __iomem
*)dd
->kregbase
+ *f_pos
;
2029 for (total
= 0; total
< count
; total
+= 8, csr_off
+= 8) {
2030 if (get_user(data
, (unsigned long __user
*)(buf
+ total
)))
2032 /* accessing LCB CSRs requires a special procedure */
2033 if (is_lcb_offset(csr_off
)) {
2035 int ret
= acquire_lcb_access(dd
, 1);
2043 release_lcb_access(dd
, 1);
2047 writeq(data
, base
+ total
);
2050 release_lcb_access(dd
, 1);
2055 static const struct file_operations ui_file_ops
= {
2056 .owner
= THIS_MODULE
,
2061 .release
= ui_release
,
2064 #define UI_OFFSET 192 /* device minor offset for UI devices */
2065 static int create_ui
= 1;
2067 static struct cdev wildcard_cdev
;
2068 static struct device
*wildcard_device
;
2070 static atomic_t user_count
= ATOMIC_INIT(0);
2072 static void user_remove(struct hfi1_devdata
*dd
)
2074 if (atomic_dec_return(&user_count
) == 0)
2075 hfi1_cdev_cleanup(&wildcard_cdev
, &wildcard_device
);
2077 hfi1_cdev_cleanup(&dd
->user_cdev
, &dd
->user_device
);
2078 hfi1_cdev_cleanup(&dd
->ui_cdev
, &dd
->ui_device
);
2081 static int user_add(struct hfi1_devdata
*dd
)
2086 if (atomic_inc_return(&user_count
) == 1) {
2087 ret
= hfi1_cdev_init(0, class_name(), &hfi1_file_ops
,
2088 &wildcard_cdev
, &wildcard_device
,
2094 snprintf(name
, sizeof(name
), "%s_%d", class_name(), dd
->unit
);
2095 ret
= hfi1_cdev_init(dd
->unit
+ 1, name
, &hfi1_file_ops
,
2096 &dd
->user_cdev
, &dd
->user_device
,
2102 snprintf(name
, sizeof(name
),
2103 "%s_ui%d", class_name(), dd
->unit
);
2104 ret
= hfi1_cdev_init(dd
->unit
+ UI_OFFSET
, name
, &ui_file_ops
,
2105 &dd
->ui_cdev
, &dd
->ui_device
,
2118 * Create per-unit files in /dev
2120 int hfi1_device_create(struct hfi1_devdata
*dd
)
2125 ret
= hfi1_diag_add(dd
);
2132 * Remove per-unit files in /dev
2133 * void, core kernel returns no errors for this stuff
2135 void hfi1_device_remove(struct hfi1_devdata
*dd
)
2138 hfi1_diag_remove(dd
);