]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rdma/hfi1/file_ops.c
1bdc073fa881a095ce5d238c119508ba8f5d1cdf
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / file_ops.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50 #include <linux/poll.h>
51 #include <linux/cdev.h>
52 #include <linux/vmalloc.h>
53 #include <linux/io.h>
54
55 #include "hfi.h"
56 #include "pio.h"
57 #include "device.h"
58 #include "common.h"
59 #include "trace.h"
60 #include "user_sdma.h"
61 #include "user_exp_rcv.h"
62 #include "eprom.h"
63
64 #undef pr_fmt
65 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
66
67 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
68
69 /*
70 * File operation functions
71 */
72 static int hfi1_file_open(struct inode *, struct file *);
73 static int hfi1_file_close(struct inode *, struct file *);
74 static ssize_t hfi1_file_write(struct file *, const char __user *,
75 size_t, loff_t *);
76 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
77 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
78 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
79
80 static u64 kvirt_to_phys(void *);
81 static int assign_ctxt(struct file *, struct hfi1_user_info *);
82 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
83 static int user_init(struct file *);
84 static int get_ctxt_info(struct file *, void __user *, __u32);
85 static int get_base_info(struct file *, void __user *, __u32);
86 static int setup_ctxt(struct file *);
87 static int setup_subctxt(struct hfi1_ctxtdata *);
88 static int get_user_context(struct file *, struct hfi1_user_info *,
89 int, unsigned);
90 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
91 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
92 struct hfi1_user_info *);
93 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
94 static unsigned int poll_next(struct file *, struct poll_table_struct *);
95 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
96 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
97 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
98 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
99 static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
100 static int exp_tid_free(struct file *, struct hfi1_tid_info *);
101 static void unlock_exp_tids(struct hfi1_ctxtdata *);
102
103 static const struct file_operations hfi1_file_ops = {
104 .owner = THIS_MODULE,
105 .write = hfi1_file_write,
106 .write_iter = hfi1_write_iter,
107 .open = hfi1_file_open,
108 .release = hfi1_file_close,
109 .poll = hfi1_poll,
110 .mmap = hfi1_file_mmap,
111 .llseek = noop_llseek,
112 };
113
114 static struct vm_operations_struct vm_ops = {
115 .fault = vma_fault,
116 };
117
118 /*
119 * Types of memories mapped into user processes' space
120 */
121 enum mmap_types {
122 PIO_BUFS = 1,
123 PIO_BUFS_SOP,
124 PIO_CRED,
125 RCV_HDRQ,
126 RCV_EGRBUF,
127 UREGS,
128 EVENTS,
129 STATUS,
130 RTAIL,
131 SUBCTXT_UREGS,
132 SUBCTXT_RCV_HDRQ,
133 SUBCTXT_EGRBUF,
134 SDMA_COMP
135 };
136
137 /*
138 * Masks and offsets defining the mmap tokens
139 */
140 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
141 #define HFI1_MMAP_OFFSET_SHIFT 0
142 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
143 #define HFI1_MMAP_SUBCTXT_SHIFT 12
144 #define HFI1_MMAP_CTXT_MASK 0xffULL
145 #define HFI1_MMAP_CTXT_SHIFT 16
146 #define HFI1_MMAP_TYPE_MASK 0xfULL
147 #define HFI1_MMAP_TYPE_SHIFT 24
148 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
149 #define HFI1_MMAP_MAGIC_SHIFT 32
150
151 #define HFI1_MMAP_MAGIC 0xdabbad00
152
153 #define HFI1_MMAP_TOKEN_SET(field, val) \
154 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
155 #define HFI1_MMAP_TOKEN_GET(field, token) \
156 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
157 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
158 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
159 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
160 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
161 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
162 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
163
164 #define dbg(fmt, ...) \
165 pr_info(fmt, ##__VA_ARGS__)
166
167
168 static inline int is_valid_mmap(u64 token)
169 {
170 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
171 }
172
173 static int hfi1_file_open(struct inode *inode, struct file *fp)
174 {
175 /* The real work is performed later in assign_ctxt() */
176 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
177 if (fp->private_data) /* no cpu affinity by default */
178 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
179 return fp->private_data ? 0 : -ENOMEM;
180 }
181
182 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
183 size_t count, loff_t *offset)
184 {
185 const struct hfi1_cmd __user *ucmd;
186 struct hfi1_filedata *fd = fp->private_data;
187 struct hfi1_ctxtdata *uctxt = fd->uctxt;
188 struct hfi1_cmd cmd;
189 struct hfi1_user_info uinfo;
190 struct hfi1_tid_info tinfo;
191 ssize_t consumed = 0, copy = 0, ret = 0;
192 void *dest = NULL;
193 __u64 user_val = 0;
194 int uctxt_required = 1;
195 int must_be_root = 0;
196
197 if (count < sizeof(cmd)) {
198 ret = -EINVAL;
199 goto bail;
200 }
201
202 ucmd = (const struct hfi1_cmd __user *)data;
203 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
204 ret = -EFAULT;
205 goto bail;
206 }
207
208 consumed = sizeof(cmd);
209
210 switch (cmd.type) {
211 case HFI1_CMD_ASSIGN_CTXT:
212 uctxt_required = 0; /* assigned user context not required */
213 copy = sizeof(uinfo);
214 dest = &uinfo;
215 break;
216 case HFI1_CMD_SDMA_STATUS_UPD:
217 case HFI1_CMD_CREDIT_UPD:
218 copy = 0;
219 break;
220 case HFI1_CMD_TID_UPDATE:
221 case HFI1_CMD_TID_FREE:
222 copy = sizeof(tinfo);
223 dest = &tinfo;
224 break;
225 case HFI1_CMD_USER_INFO:
226 case HFI1_CMD_RECV_CTRL:
227 case HFI1_CMD_POLL_TYPE:
228 case HFI1_CMD_ACK_EVENT:
229 case HFI1_CMD_CTXT_INFO:
230 case HFI1_CMD_SET_PKEY:
231 case HFI1_CMD_CTXT_RESET:
232 copy = 0;
233 user_val = cmd.addr;
234 break;
235 case HFI1_CMD_EP_INFO:
236 case HFI1_CMD_EP_ERASE_CHIP:
237 case HFI1_CMD_EP_ERASE_RANGE:
238 case HFI1_CMD_EP_READ_RANGE:
239 case HFI1_CMD_EP_WRITE_RANGE:
240 uctxt_required = 0; /* assigned user context not required */
241 must_be_root = 1; /* validate user */
242 copy = 0;
243 break;
244 default:
245 ret = -EINVAL;
246 goto bail;
247 }
248
249 /* If the command comes with user data, copy it. */
250 if (copy) {
251 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
252 ret = -EFAULT;
253 goto bail;
254 }
255 consumed += copy;
256 }
257
258 /*
259 * Make sure there is a uctxt when needed.
260 */
261 if (uctxt_required && !uctxt) {
262 ret = -EINVAL;
263 goto bail;
264 }
265
266 /* only root can do these operations */
267 if (must_be_root && !capable(CAP_SYS_ADMIN)) {
268 ret = -EPERM;
269 goto bail;
270 }
271
272 switch (cmd.type) {
273 case HFI1_CMD_ASSIGN_CTXT:
274 ret = assign_ctxt(fp, &uinfo);
275 if (ret < 0)
276 goto bail;
277 ret = setup_ctxt(fp);
278 if (ret)
279 goto bail;
280 ret = user_init(fp);
281 break;
282 case HFI1_CMD_CTXT_INFO:
283 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
284 user_val, cmd.len);
285 break;
286 case HFI1_CMD_USER_INFO:
287 ret = get_base_info(fp, (void __user *)(unsigned long)
288 user_val, cmd.len);
289 break;
290 case HFI1_CMD_SDMA_STATUS_UPD:
291 break;
292 case HFI1_CMD_CREDIT_UPD:
293 if (uctxt && uctxt->sc)
294 sc_return_credits(uctxt->sc);
295 break;
296 case HFI1_CMD_TID_UPDATE:
297 ret = exp_tid_setup(fp, &tinfo);
298 if (!ret) {
299 unsigned long addr;
300 /*
301 * Copy the number of tidlist entries we used
302 * and the length of the buffer we registered.
303 * These fields are adjacent in the structure so
304 * we can copy them at the same time.
305 */
306 addr = (unsigned long)cmd.addr +
307 offsetof(struct hfi1_tid_info, tidcnt);
308 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
309 sizeof(tinfo.tidcnt) +
310 sizeof(tinfo.length)))
311 ret = -EFAULT;
312 }
313 break;
314 case HFI1_CMD_TID_FREE:
315 ret = exp_tid_free(fp, &tinfo);
316 break;
317 case HFI1_CMD_RECV_CTRL:
318 ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
319 break;
320 case HFI1_CMD_POLL_TYPE:
321 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
322 break;
323 case HFI1_CMD_ACK_EVENT:
324 ret = user_event_ack(uctxt, fd->subctxt, user_val);
325 break;
326 case HFI1_CMD_SET_PKEY:
327 if (HFI1_CAP_IS_USET(PKEY_CHECK))
328 ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
329 else
330 ret = -EPERM;
331 break;
332 case HFI1_CMD_CTXT_RESET: {
333 struct send_context *sc;
334 struct hfi1_devdata *dd;
335
336 if (!uctxt || !uctxt->dd || !uctxt->sc) {
337 ret = -EINVAL;
338 break;
339 }
340 /*
341 * There is no protection here. User level has to
342 * guarantee that no one will be writing to the send
343 * context while it is being re-initialized.
344 * If user level breaks that guarantee, it will break
345 * it's own context and no one else's.
346 */
347 dd = uctxt->dd;
348 sc = uctxt->sc;
349 /*
350 * Wait until the interrupt handler has marked the
351 * context as halted or frozen. Report error if we time
352 * out.
353 */
354 wait_event_interruptible_timeout(
355 sc->halt_wait, (sc->flags & SCF_HALTED),
356 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
357 if (!(sc->flags & SCF_HALTED)) {
358 ret = -ENOLCK;
359 break;
360 }
361 /*
362 * If the send context was halted due to a Freeze,
363 * wait until the device has been "unfrozen" before
364 * resetting the context.
365 */
366 if (sc->flags & SCF_FROZEN) {
367 wait_event_interruptible_timeout(
368 dd->event_queue,
369 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
370 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
371 if (dd->flags & HFI1_FROZEN) {
372 ret = -ENOLCK;
373 break;
374 }
375 if (dd->flags & HFI1_FORCED_FREEZE) {
376 /* Don't allow context reset if we are into
377 * forced freeze */
378 ret = -ENODEV;
379 break;
380 }
381 sc_disable(sc);
382 ret = sc_enable(sc);
383 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
384 uctxt->ctxt);
385 } else
386 ret = sc_restart(sc);
387 if (!ret)
388 sc_return_credits(sc);
389 break;
390 }
391 case HFI1_CMD_EP_INFO:
392 case HFI1_CMD_EP_ERASE_CHIP:
393 case HFI1_CMD_EP_ERASE_RANGE:
394 case HFI1_CMD_EP_READ_RANGE:
395 case HFI1_CMD_EP_WRITE_RANGE:
396 ret = handle_eprom_command(&cmd);
397 break;
398 }
399
400 if (ret >= 0)
401 ret = consumed;
402 bail:
403 return ret;
404 }
405
406 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
407 {
408 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
409 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
410 struct hfi1_user_sdma_comp_q *cq = fd->cq;
411 int ret = 0, done = 0, reqs = 0;
412 unsigned long dim = from->nr_segs;
413
414 if (!cq || !pq) {
415 ret = -EIO;
416 goto done;
417 }
418
419 if (!iter_is_iovec(from) || !dim) {
420 ret = -EINVAL;
421 goto done;
422 }
423
424 hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
425 fd->uctxt->ctxt, fd->subctxt, dim);
426
427 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
428 ret = -ENOSPC;
429 goto done;
430 }
431
432 while (dim) {
433 unsigned long count = 0;
434
435 ret = hfi1_user_sdma_process_request(
436 kiocb->ki_filp, (struct iovec *)(from->iov + done),
437 dim, &count);
438 if (ret)
439 goto done;
440 dim -= count;
441 done += count;
442 reqs++;
443 }
444 done:
445 return ret ? ret : reqs;
446 }
447
448 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
449 {
450 struct hfi1_filedata *fd = fp->private_data;
451 struct hfi1_ctxtdata *uctxt = fd->uctxt;
452 struct hfi1_devdata *dd;
453 unsigned long flags, pfn;
454 u64 token = vma->vm_pgoff << PAGE_SHIFT,
455 memaddr = 0;
456 u8 subctxt, mapio = 0, vmf = 0, type;
457 ssize_t memlen = 0;
458 int ret = 0;
459 u16 ctxt;
460
461 if (!is_valid_mmap(token) || !uctxt ||
462 !(vma->vm_flags & VM_SHARED)) {
463 ret = -EINVAL;
464 goto done;
465 }
466 dd = uctxt->dd;
467 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
468 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
469 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
470 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
471 ret = -EINVAL;
472 goto done;
473 }
474
475 flags = vma->vm_flags;
476
477 switch (type) {
478 case PIO_BUFS:
479 case PIO_BUFS_SOP:
480 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
481 /* chip pio base */
482 (uctxt->sc->hw_context * BIT(16))) +
483 /* 64K PIO space / ctxt */
484 (type == PIO_BUFS_SOP ?
485 (TXE_PIO_SIZE / 2) : 0); /* sop? */
486 /*
487 * Map only the amount allocated to the context, not the
488 * entire available context's PIO space.
489 */
490 memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
491 PAGE_SIZE);
492 flags &= ~VM_MAYREAD;
493 flags |= VM_DONTCOPY | VM_DONTEXPAND;
494 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
495 mapio = 1;
496 break;
497 case PIO_CRED:
498 if (flags & VM_WRITE) {
499 ret = -EPERM;
500 goto done;
501 }
502 /*
503 * The credit return location for this context could be on the
504 * second or third page allocated for credit returns (if number
505 * of enabled contexts > 64 and 128 respectively).
506 */
507 memaddr = dd->cr_base[uctxt->numa_id].pa +
508 (((u64)uctxt->sc->hw_free -
509 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
510 memlen = PAGE_SIZE;
511 flags &= ~VM_MAYWRITE;
512 flags |= VM_DONTCOPY | VM_DONTEXPAND;
513 /*
514 * The driver has already allocated memory for credit
515 * returns and programmed it into the chip. Has that
516 * memory been flagged as non-cached?
517 */
518 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
519 mapio = 1;
520 break;
521 case RCV_HDRQ:
522 memaddr = uctxt->rcvhdrq_phys;
523 memlen = uctxt->rcvhdrq_size;
524 break;
525 case RCV_EGRBUF: {
526 unsigned long addr;
527 int i;
528 /*
529 * The RcvEgr buffer need to be handled differently
530 * as multiple non-contiguous pages need to be mapped
531 * into the user process.
532 */
533 memlen = uctxt->egrbufs.size;
534 if ((vma->vm_end - vma->vm_start) != memlen) {
535 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
536 (vma->vm_end - vma->vm_start), memlen);
537 ret = -EINVAL;
538 goto done;
539 }
540 if (vma->vm_flags & VM_WRITE) {
541 ret = -EPERM;
542 goto done;
543 }
544 vma->vm_flags &= ~VM_MAYWRITE;
545 addr = vma->vm_start;
546 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
547 ret = remap_pfn_range(
548 vma, addr,
549 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
550 uctxt->egrbufs.buffers[i].len,
551 vma->vm_page_prot);
552 if (ret < 0)
553 goto done;
554 addr += uctxt->egrbufs.buffers[i].len;
555 }
556 ret = 0;
557 goto done;
558 }
559 case UREGS:
560 /*
561 * Map only the page that contains this context's user
562 * registers.
563 */
564 memaddr = (unsigned long)
565 (dd->physaddr + RXE_PER_CONTEXT_USER)
566 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
567 /*
568 * TidFlow table is on the same page as the rest of the
569 * user registers.
570 */
571 memlen = PAGE_SIZE;
572 flags |= VM_DONTCOPY | VM_DONTEXPAND;
573 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
574 mapio = 1;
575 break;
576 case EVENTS:
577 /*
578 * Use the page where this context's flags are. User level
579 * knows where it's own bitmap is within the page.
580 */
581 memaddr = (unsigned long)(dd->events +
582 ((uctxt->ctxt - dd->first_user_ctxt) *
583 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
584 memlen = PAGE_SIZE;
585 /*
586 * v3.7 removes VM_RESERVED but the effect is kept by
587 * using VM_IO.
588 */
589 flags |= VM_IO | VM_DONTEXPAND;
590 vmf = 1;
591 break;
592 case STATUS:
593 memaddr = kvirt_to_phys((void *)dd->status);
594 memlen = PAGE_SIZE;
595 flags |= VM_IO | VM_DONTEXPAND;
596 break;
597 case RTAIL:
598 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
599 /*
600 * If the memory allocation failed, the context alloc
601 * also would have failed, so we would never get here
602 */
603 ret = -EINVAL;
604 goto done;
605 }
606 if (flags & VM_WRITE) {
607 ret = -EPERM;
608 goto done;
609 }
610 memaddr = uctxt->rcvhdrqtailaddr_phys;
611 memlen = PAGE_SIZE;
612 flags &= ~VM_MAYWRITE;
613 break;
614 case SUBCTXT_UREGS:
615 memaddr = (u64)uctxt->subctxt_uregbase;
616 memlen = PAGE_SIZE;
617 flags |= VM_IO | VM_DONTEXPAND;
618 vmf = 1;
619 break;
620 case SUBCTXT_RCV_HDRQ:
621 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
622 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
623 flags |= VM_IO | VM_DONTEXPAND;
624 vmf = 1;
625 break;
626 case SUBCTXT_EGRBUF:
627 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
628 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
629 flags |= VM_IO | VM_DONTEXPAND;
630 flags &= ~VM_MAYWRITE;
631 vmf = 1;
632 break;
633 case SDMA_COMP: {
634 struct hfi1_user_sdma_comp_q *cq = fd->cq;
635
636 if (!cq) {
637 ret = -EFAULT;
638 goto done;
639 }
640 memaddr = (u64)cq->comps;
641 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
642 flags |= VM_IO | VM_DONTEXPAND;
643 vmf = 1;
644 break;
645 }
646 default:
647 ret = -EINVAL;
648 break;
649 }
650
651 if ((vma->vm_end - vma->vm_start) != memlen) {
652 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
653 uctxt->ctxt, fd->subctxt,
654 (vma->vm_end - vma->vm_start), memlen);
655 ret = -EINVAL;
656 goto done;
657 }
658
659 vma->vm_flags = flags;
660 hfi1_cdbg(PROC,
661 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
662 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
663 vma->vm_end - vma->vm_start, vma->vm_flags);
664 pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
665 if (vmf) {
666 vma->vm_pgoff = pfn;
667 vma->vm_ops = &vm_ops;
668 ret = 0;
669 } else if (mapio) {
670 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
671 vma->vm_page_prot);
672 } else {
673 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
674 vma->vm_page_prot);
675 }
676 done:
677 return ret;
678 }
679
680 /*
681 * Local (non-chip) user memory is not mapped right away but as it is
682 * accessed by the user-level code.
683 */
684 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
685 {
686 struct page *page;
687
688 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
689 if (!page)
690 return VM_FAULT_SIGBUS;
691
692 get_page(page);
693 vmf->page = page;
694
695 return 0;
696 }
697
698 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
699 {
700 struct hfi1_ctxtdata *uctxt;
701 unsigned pollflag;
702
703 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
704 if (!uctxt)
705 pollflag = POLLERR;
706 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
707 pollflag = poll_urgent(fp, pt);
708 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
709 pollflag = poll_next(fp, pt);
710 else /* invalid */
711 pollflag = POLLERR;
712
713 return pollflag;
714 }
715
716 static int hfi1_file_close(struct inode *inode, struct file *fp)
717 {
718 struct hfi1_filedata *fdata = fp->private_data;
719 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
720 struct hfi1_devdata *dd;
721 unsigned long flags, *ev;
722
723 fp->private_data = NULL;
724
725 if (!uctxt)
726 goto done;
727
728 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
729 dd = uctxt->dd;
730 mutex_lock(&hfi1_mutex);
731
732 flush_wc();
733 /* drain user sdma queue */
734 hfi1_user_sdma_free_queues(fdata);
735
736 /*
737 * Clear any left over, unhandled events so the next process that
738 * gets this context doesn't get confused.
739 */
740 ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
741 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
742 *ev = 0;
743
744 if (--uctxt->cnt) {
745 uctxt->active_slaves &= ~(1 << fdata->subctxt);
746 uctxt->subpid[fdata->subctxt] = 0;
747 mutex_unlock(&hfi1_mutex);
748 goto done;
749 }
750
751 spin_lock_irqsave(&dd->uctxt_lock, flags);
752 /*
753 * Disable receive context and interrupt available, reset all
754 * RcvCtxtCtrl bits to default values.
755 */
756 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
757 HFI1_RCVCTRL_TIDFLOW_DIS |
758 HFI1_RCVCTRL_INTRAVAIL_DIS |
759 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
760 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
761 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
762 /* Clear the context's J_KEY */
763 hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
764 /*
765 * Reset context integrity checks to default.
766 * (writes to CSRs probably belong in chip.c)
767 */
768 write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
769 hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
770 sc_disable(uctxt->sc);
771 uctxt->pid = 0;
772 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
773
774 dd->rcd[uctxt->ctxt] = NULL;
775 uctxt->rcvwait_to = 0;
776 uctxt->piowait_to = 0;
777 uctxt->rcvnowait = 0;
778 uctxt->pionowait = 0;
779 uctxt->event_flags = 0;
780
781 hfi1_clear_tids(uctxt);
782 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
783
784 if (uctxt->tid_pg_list)
785 unlock_exp_tids(uctxt);
786
787 hfi1_stats.sps_ctxts--;
788 dd->freectxts++;
789 mutex_unlock(&hfi1_mutex);
790 hfi1_free_ctxtdata(dd, uctxt);
791 done:
792 kfree(fdata);
793 return 0;
794 }
795
796 /*
797 * Convert kernel *virtual* addresses to physical addresses.
798 * This is used to vmalloc'ed addresses.
799 */
800 static u64 kvirt_to_phys(void *addr)
801 {
802 struct page *page;
803 u64 paddr = 0;
804
805 page = vmalloc_to_page(addr);
806 if (page)
807 paddr = page_to_pfn(page) << PAGE_SHIFT;
808
809 return paddr;
810 }
811
812 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
813 {
814 int i_minor, ret = 0;
815 unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
816
817 swmajor = uinfo->userversion >> 16;
818 if (swmajor != HFI1_USER_SWMAJOR) {
819 ret = -ENODEV;
820 goto done;
821 }
822
823 swminor = uinfo->userversion & 0xffff;
824
825 if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
826 alg = uinfo->hfi1_alg;
827
828 mutex_lock(&hfi1_mutex);
829 /* First, lets check if we need to setup a shared context? */
830 if (uinfo->subctxt_cnt)
831 ret = find_shared_ctxt(fp, uinfo);
832
833 /*
834 * We execute the following block if we couldn't find a
835 * shared context or if context sharing is not required.
836 */
837 if (!ret) {
838 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
839 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
840 }
841 mutex_unlock(&hfi1_mutex);
842 done:
843 return ret;
844 }
845
846 /* return true if the device available for general use */
847 static int usable_device(struct hfi1_devdata *dd)
848 {
849 struct hfi1_pportdata *ppd = dd->pport;
850
851 return driver_lstate(ppd) == IB_PORT_ACTIVE;
852 }
853
854 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
855 int devno, unsigned alg)
856 {
857 struct hfi1_devdata *dd = NULL;
858 int ret = 0, devmax, npresent, nup, dev;
859
860 devmax = hfi1_count_units(&npresent, &nup);
861 if (!npresent) {
862 ret = -ENXIO;
863 goto done;
864 }
865 if (!nup) {
866 ret = -ENETDOWN;
867 goto done;
868 }
869 if (devno >= 0) {
870 dd = hfi1_lookup(devno);
871 if (!dd)
872 ret = -ENODEV;
873 else if (!dd->freectxts)
874 ret = -EBUSY;
875 } else {
876 struct hfi1_devdata *pdd;
877
878 if (alg == HFI1_ALG_ACROSS) {
879 unsigned free = 0U;
880
881 for (dev = 0; dev < devmax; dev++) {
882 pdd = hfi1_lookup(dev);
883 if (!pdd)
884 continue;
885 if (!usable_device(pdd))
886 continue;
887 if (pdd->freectxts &&
888 pdd->freectxts > free) {
889 dd = pdd;
890 free = pdd->freectxts;
891 }
892 }
893 } else {
894 for (dev = 0; dev < devmax; dev++) {
895 pdd = hfi1_lookup(dev);
896 if (!pdd)
897 continue;
898 if (!usable_device(pdd))
899 continue;
900 if (pdd->freectxts) {
901 dd = pdd;
902 break;
903 }
904 }
905 }
906 if (!dd)
907 ret = -EBUSY;
908 }
909 done:
910 return ret ? ret : allocate_ctxt(fp, dd, uinfo);
911 }
912
913 static int find_shared_ctxt(struct file *fp,
914 const struct hfi1_user_info *uinfo)
915 {
916 int devmax, ndev, i;
917 int ret = 0;
918 struct hfi1_filedata *fd = fp->private_data;
919
920 devmax = hfi1_count_units(NULL, NULL);
921
922 for (ndev = 0; ndev < devmax; ndev++) {
923 struct hfi1_devdata *dd = hfi1_lookup(ndev);
924
925 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
926 continue;
927 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
928 struct hfi1_ctxtdata *uctxt = dd->rcd[i];
929
930 /* Skip ctxts which are not yet open */
931 if (!uctxt || !uctxt->cnt)
932 continue;
933 /* Skip ctxt if it doesn't match the requested one */
934 if (memcmp(uctxt->uuid, uinfo->uuid,
935 sizeof(uctxt->uuid)) ||
936 uctxt->jkey != generate_jkey(current_uid()) ||
937 uctxt->subctxt_id != uinfo->subctxt_id ||
938 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
939 continue;
940
941 /* Verify the sharing process matches the master */
942 if (uctxt->userversion != uinfo->userversion ||
943 uctxt->cnt >= uctxt->subctxt_cnt) {
944 ret = -EINVAL;
945 goto done;
946 }
947 fd->uctxt = uctxt;
948 fd->subctxt = uctxt->cnt++;
949 uctxt->subpid[fd->subctxt] = current->pid;
950 uctxt->active_slaves |= 1 << fd->subctxt;
951 ret = 1;
952 goto done;
953 }
954 }
955
956 done:
957 return ret;
958 }
959
960 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
961 struct hfi1_user_info *uinfo)
962 {
963 struct hfi1_filedata *fd = fp->private_data;
964 struct hfi1_ctxtdata *uctxt;
965 unsigned ctxt;
966 int ret;
967
968 if (dd->flags & HFI1_FROZEN) {
969 /*
970 * Pick an error that is unique from all other errors
971 * that are returned so the user process knows that
972 * it tried to allocate while the SPC was frozen. It
973 * it should be able to retry with success in a short
974 * while.
975 */
976 return -EIO;
977 }
978
979 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
980 if (!dd->rcd[ctxt])
981 break;
982
983 if (ctxt == dd->num_rcv_contexts)
984 return -EBUSY;
985
986 uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
987 if (!uctxt) {
988 dd_dev_err(dd,
989 "Unable to allocate ctxtdata memory, failing open\n");
990 return -ENOMEM;
991 }
992 /*
993 * Allocate and enable a PIO send context.
994 */
995 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
996 uctxt->numa_id);
997 if (!uctxt->sc)
998 return -ENOMEM;
999
1000 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1001 uctxt->sc->hw_context);
1002 ret = sc_enable(uctxt->sc);
1003 if (ret)
1004 return ret;
1005 /*
1006 * Setup shared context resources if the user-level has requested
1007 * shared contexts and this is the 'master' process.
1008 * This has to be done here so the rest of the sub-contexts find the
1009 * proper master.
1010 */
1011 if (uinfo->subctxt_cnt && !fd->subctxt) {
1012 ret = init_subctxts(uctxt, uinfo);
1013 /*
1014 * On error, we don't need to disable and de-allocate the
1015 * send context because it will be done during file close
1016 */
1017 if (ret)
1018 return ret;
1019 }
1020 uctxt->userversion = uinfo->userversion;
1021 uctxt->pid = current->pid;
1022 uctxt->flags = HFI1_CAP_UGET(MASK);
1023 init_waitqueue_head(&uctxt->wait);
1024 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1025 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1026 uctxt->jkey = generate_jkey(current_uid());
1027 INIT_LIST_HEAD(&uctxt->sdma_queues);
1028 spin_lock_init(&uctxt->sdma_qlock);
1029 hfi1_stats.sps_ctxts++;
1030 dd->freectxts--;
1031 fd->uctxt = uctxt;
1032
1033 return 0;
1034 }
1035
1036 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1037 const struct hfi1_user_info *uinfo)
1038 {
1039 int ret = 0;
1040 unsigned num_subctxts;
1041
1042 num_subctxts = uinfo->subctxt_cnt;
1043 if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
1044 ret = -EINVAL;
1045 goto bail;
1046 }
1047
1048 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1049 uctxt->subctxt_id = uinfo->subctxt_id;
1050 uctxt->active_slaves = 1;
1051 uctxt->redirect_seq_cnt = 1;
1052 set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1053 bail:
1054 return ret;
1055 }
1056
1057 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1058 {
1059 int ret = 0;
1060 unsigned num_subctxts = uctxt->subctxt_cnt;
1061
1062 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1063 if (!uctxt->subctxt_uregbase) {
1064 ret = -ENOMEM;
1065 goto bail;
1066 }
1067 /* We can take the size of the RcvHdr Queue from the master */
1068 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1069 num_subctxts);
1070 if (!uctxt->subctxt_rcvhdr_base) {
1071 ret = -ENOMEM;
1072 goto bail_ureg;
1073 }
1074
1075 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1076 num_subctxts);
1077 if (!uctxt->subctxt_rcvegrbuf) {
1078 ret = -ENOMEM;
1079 goto bail_rhdr;
1080 }
1081 goto bail;
1082 bail_rhdr:
1083 vfree(uctxt->subctxt_rcvhdr_base);
1084 bail_ureg:
1085 vfree(uctxt->subctxt_uregbase);
1086 uctxt->subctxt_uregbase = NULL;
1087 bail:
1088 return ret;
1089 }
1090
1091 static int user_init(struct file *fp)
1092 {
1093 int ret;
1094 unsigned int rcvctrl_ops = 0;
1095 struct hfi1_filedata *fd = fp->private_data;
1096 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1097
1098 /* make sure that the context has already been setup */
1099 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
1100 ret = -EFAULT;
1101 goto done;
1102 }
1103
1104 /*
1105 * Subctxts don't need to initialize anything since master
1106 * has done it.
1107 */
1108 if (fd->subctxt) {
1109 ret = wait_event_interruptible(uctxt->wait,
1110 !test_bit(HFI1_CTXT_MASTER_UNINIT,
1111 &uctxt->event_flags));
1112 goto done;
1113 }
1114
1115 /* initialize poll variables... */
1116 uctxt->urgent = 0;
1117 uctxt->urgent_poll = 0;
1118
1119 /*
1120 * Now enable the ctxt for receive.
1121 * For chips that are set to DMA the tail register to memory
1122 * when they change (and when the update bit transitions from
1123 * 0 to 1. So for those chips, we turn it off and then back on.
1124 * This will (very briefly) affect any other open ctxts, but the
1125 * duration is very short, and therefore isn't an issue. We
1126 * explicitly set the in-memory tail copy to 0 beforehand, so we
1127 * don't have to wait to be sure the DMA update has happened
1128 * (chip resets head/tail to 0 on transition to enable).
1129 */
1130 if (uctxt->rcvhdrtail_kvaddr)
1131 clear_rcvhdrtail(uctxt);
1132
1133 /* Setup J_KEY before enabling the context */
1134 hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1135
1136 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1137 if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1138 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1139 /*
1140 * Ignore the bit in the flags for now until proper
1141 * support for multiple packet per rcv array entry is
1142 * added.
1143 */
1144 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1145 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1146 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1147 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1148 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1149 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1150 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1151 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1152 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1153
1154 /* Notify any waiting slaves */
1155 if (uctxt->subctxt_cnt) {
1156 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1157 wake_up(&uctxt->wait);
1158 }
1159 ret = 0;
1160
1161 done:
1162 return ret;
1163 }
1164
1165 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1166 {
1167 struct hfi1_ctxt_info cinfo;
1168 struct hfi1_filedata *fd = fp->private_data;
1169 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1170 int ret = 0;
1171
1172 memset(&cinfo, 0, sizeof(cinfo));
1173 ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1174 if (ret < 0)
1175 goto done;
1176 cinfo.num_active = hfi1_count_active_units();
1177 cinfo.unit = uctxt->dd->unit;
1178 cinfo.ctxt = uctxt->ctxt;
1179 cinfo.subctxt = fd->subctxt;
1180 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1181 uctxt->dd->rcv_entries.group_size) +
1182 uctxt->expected_count;
1183 cinfo.credits = uctxt->sc->credits;
1184 cinfo.numa_node = uctxt->numa_id;
1185 cinfo.rec_cpu = fd->rec_cpu_num;
1186 cinfo.send_ctxt = uctxt->sc->hw_context;
1187
1188 cinfo.egrtids = uctxt->egrbufs.alloced;
1189 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1190 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1191 cinfo.sdma_ring_size = fd->cq->nentries;
1192 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1193
1194 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
1195 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1196 ret = -EFAULT;
1197 done:
1198 return ret;
1199 }
1200
1201 static int setup_ctxt(struct file *fp)
1202 {
1203 struct hfi1_filedata *fd = fp->private_data;
1204 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1205 struct hfi1_devdata *dd = uctxt->dd;
1206 int ret = 0;
1207
1208 /*
1209 * Context should be set up only once (including allocation and
1210 * programming of eager buffers. This is done if context sharing
1211 * is not requested or by the master process.
1212 */
1213 if (!uctxt->subctxt_cnt || !fd->subctxt) {
1214 ret = hfi1_init_ctxt(uctxt->sc);
1215 if (ret)
1216 goto done;
1217
1218 /* Now allocate the RcvHdr queue and eager buffers. */
1219 ret = hfi1_create_rcvhdrq(dd, uctxt);
1220 if (ret)
1221 goto done;
1222 ret = hfi1_setup_eagerbufs(uctxt);
1223 if (ret)
1224 goto done;
1225 if (uctxt->subctxt_cnt && !fd->subctxt) {
1226 ret = setup_subctxt(uctxt);
1227 if (ret)
1228 goto done;
1229 }
1230 /* Setup Expected Rcv memories */
1231 uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
1232 sizeof(struct page **));
1233 if (!uctxt->tid_pg_list) {
1234 ret = -ENOMEM;
1235 goto done;
1236 }
1237 uctxt->physshadow = vzalloc(uctxt->expected_count *
1238 sizeof(*uctxt->physshadow));
1239 if (!uctxt->physshadow) {
1240 ret = -ENOMEM;
1241 goto done;
1242 }
1243 /* allocate expected TID map and initialize the cursor */
1244 atomic_set(&uctxt->tidcursor, 0);
1245 uctxt->numtidgroups = uctxt->expected_count /
1246 dd->rcv_entries.group_size;
1247 uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
1248 !!(uctxt->numtidgroups % BITS_PER_LONG);
1249 uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
1250 sizeof(*uctxt->tidusemap),
1251 GFP_KERNEL, uctxt->numa_id);
1252 if (!uctxt->tidusemap) {
1253 ret = -ENOMEM;
1254 goto done;
1255 }
1256 /*
1257 * In case that the number of groups is not a multiple of
1258 * 64 (the number of groups in a tidusemap element), mark
1259 * the extra ones as used. This will effectively make them
1260 * permanently used and should never be assigned. Otherwise,
1261 * the code which checks how many free groups we have will
1262 * get completely confused about the state of the bits.
1263 */
1264 if (uctxt->numtidgroups % BITS_PER_LONG)
1265 uctxt->tidusemap[uctxt->tidmapcnt - 1] =
1266 ~((1ULL << (uctxt->numtidgroups %
1267 BITS_PER_LONG)) - 1);
1268 trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0,
1269 uctxt->tidusemap, uctxt->tidmapcnt);
1270 }
1271 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1272 if (ret)
1273 goto done;
1274
1275 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1276 done:
1277 return ret;
1278 }
1279
1280 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1281 {
1282 struct hfi1_base_info binfo;
1283 struct hfi1_filedata *fd = fp->private_data;
1284 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1285 struct hfi1_devdata *dd = uctxt->dd;
1286 ssize_t sz;
1287 unsigned offset;
1288 int ret = 0;
1289
1290 trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1291
1292 memset(&binfo, 0, sizeof(binfo));
1293 binfo.hw_version = dd->revision;
1294 binfo.sw_version = HFI1_KERN_SWVERSION;
1295 binfo.bthqp = kdeth_qp;
1296 binfo.jkey = uctxt->jkey;
1297 /*
1298 * If more than 64 contexts are enabled the allocated credit
1299 * return will span two or three contiguous pages. Since we only
1300 * map the page containing the context's credit return address,
1301 * we need to calculate the offset in the proper page.
1302 */
1303 offset = ((u64)uctxt->sc->hw_free -
1304 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1305 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1306 fd->subctxt, offset);
1307 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1308 fd->subctxt,
1309 uctxt->sc->base_addr);
1310 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1311 uctxt->ctxt,
1312 fd->subctxt,
1313 uctxt->sc->base_addr);
1314 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1315 fd->subctxt,
1316 uctxt->rcvhdrq);
1317 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1318 fd->subctxt,
1319 uctxt->egrbufs.rcvtids[0].phys);
1320 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1321 fd->subctxt, 0);
1322 /*
1323 * user regs are at
1324 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1325 */
1326 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1327 fd->subctxt, 0);
1328 offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
1329 HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
1330 sizeof(*dd->events));
1331 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1332 fd->subctxt,
1333 offset);
1334 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1335 fd->subctxt,
1336 dd->status);
1337 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1338 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1339 fd->subctxt, 0);
1340 if (uctxt->subctxt_cnt) {
1341 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1342 uctxt->ctxt,
1343 fd->subctxt, 0);
1344 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1345 uctxt->ctxt,
1346 fd->subctxt, 0);
1347 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1348 uctxt->ctxt,
1349 fd->subctxt, 0);
1350 }
1351 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1352 if (copy_to_user(ubase, &binfo, sz))
1353 ret = -EFAULT;
1354 return ret;
1355 }
1356
1357 static unsigned int poll_urgent(struct file *fp,
1358 struct poll_table_struct *pt)
1359 {
1360 struct hfi1_filedata *fd = fp->private_data;
1361 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1362 struct hfi1_devdata *dd = uctxt->dd;
1363 unsigned pollflag;
1364
1365 poll_wait(fp, &uctxt->wait, pt);
1366
1367 spin_lock_irq(&dd->uctxt_lock);
1368 if (uctxt->urgent != uctxt->urgent_poll) {
1369 pollflag = POLLIN | POLLRDNORM;
1370 uctxt->urgent_poll = uctxt->urgent;
1371 } else {
1372 pollflag = 0;
1373 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1374 }
1375 spin_unlock_irq(&dd->uctxt_lock);
1376
1377 return pollflag;
1378 }
1379
1380 static unsigned int poll_next(struct file *fp,
1381 struct poll_table_struct *pt)
1382 {
1383 struct hfi1_filedata *fd = fp->private_data;
1384 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1385 struct hfi1_devdata *dd = uctxt->dd;
1386 unsigned pollflag;
1387
1388 poll_wait(fp, &uctxt->wait, pt);
1389
1390 spin_lock_irq(&dd->uctxt_lock);
1391 if (hdrqempty(uctxt)) {
1392 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1393 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1394 pollflag = 0;
1395 } else
1396 pollflag = POLLIN | POLLRDNORM;
1397 spin_unlock_irq(&dd->uctxt_lock);
1398
1399 return pollflag;
1400 }
1401
1402 /*
1403 * Find all user contexts in use, and set the specified bit in their
1404 * event mask.
1405 * See also find_ctxt() for a similar use, that is specific to send buffers.
1406 */
1407 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1408 {
1409 struct hfi1_ctxtdata *uctxt;
1410 struct hfi1_devdata *dd = ppd->dd;
1411 unsigned ctxt;
1412 int ret = 0;
1413 unsigned long flags;
1414
1415 if (!dd->events) {
1416 ret = -EINVAL;
1417 goto done;
1418 }
1419
1420 spin_lock_irqsave(&dd->uctxt_lock, flags);
1421 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1422 ctxt++) {
1423 uctxt = dd->rcd[ctxt];
1424 if (uctxt) {
1425 unsigned long *evs = dd->events +
1426 (uctxt->ctxt - dd->first_user_ctxt) *
1427 HFI1_MAX_SHARED_CTXTS;
1428 int i;
1429 /*
1430 * subctxt_cnt is 0 if not shared, so do base
1431 * separately, first, then remaining subctxt, if any
1432 */
1433 set_bit(evtbit, evs);
1434 for (i = 1; i < uctxt->subctxt_cnt; i++)
1435 set_bit(evtbit, evs + i);
1436 }
1437 }
1438 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1439 done:
1440 return ret;
1441 }
1442
1443 /**
1444 * manage_rcvq - manage a context's receive queue
1445 * @uctxt: the context
1446 * @subctxt: the sub-context
1447 * @start_stop: action to carry out
1448 *
1449 * start_stop == 0 disables receive on the context, for use in queue
1450 * overflow conditions. start_stop==1 re-enables, to be used to
1451 * re-init the software copy of the head register
1452 */
1453 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1454 int start_stop)
1455 {
1456 struct hfi1_devdata *dd = uctxt->dd;
1457 unsigned int rcvctrl_op;
1458
1459 if (subctxt)
1460 goto bail;
1461 /* atomically clear receive enable ctxt. */
1462 if (start_stop) {
1463 /*
1464 * On enable, force in-memory copy of the tail register to
1465 * 0, so that protocol code doesn't have to worry about
1466 * whether or not the chip has yet updated the in-memory
1467 * copy or not on return from the system call. The chip
1468 * always resets it's tail register back to 0 on a
1469 * transition from disabled to enabled.
1470 */
1471 if (uctxt->rcvhdrtail_kvaddr)
1472 clear_rcvhdrtail(uctxt);
1473 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1474 } else
1475 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1476 hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1477 /* always; new head should be equal to new tail; see above */
1478 bail:
1479 return 0;
1480 }
1481
1482 /*
1483 * clear the event notifier events for this context.
1484 * User process then performs actions appropriate to bit having been
1485 * set, if desired, and checks again in future.
1486 */
1487 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1488 unsigned long events)
1489 {
1490 int i;
1491 struct hfi1_devdata *dd = uctxt->dd;
1492 unsigned long *evs;
1493
1494 if (!dd->events)
1495 return 0;
1496
1497 evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1498 HFI1_MAX_SHARED_CTXTS) + subctxt;
1499
1500 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1501 if (!test_bit(i, &events))
1502 continue;
1503 clear_bit(i, evs);
1504 }
1505 return 0;
1506 }
1507
1508 #define num_user_pages(vaddr, len) \
1509 (1 + (((((unsigned long)(vaddr) + \
1510 (unsigned long)(len) - 1) & PAGE_MASK) - \
1511 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1512
1513 /**
1514 * tzcnt - count the number of trailing zeros in a 64bit value
1515 * @value: the value to be examined
1516 *
1517 * Returns the number of trailing least significant zeros in the
1518 * the input value. If the value is zero, return the number of
1519 * bits of the value.
1520 */
1521 static inline u8 tzcnt(u64 value)
1522 {
1523 return value ? __builtin_ctzl(value) : sizeof(value) * 8;
1524 }
1525
1526 static inline unsigned num_free_groups(unsigned long map, u16 *start)
1527 {
1528 unsigned free;
1529 u16 bitidx = *start;
1530
1531 if (bitidx >= BITS_PER_LONG)
1532 return 0;
1533 /* "Turn off" any bits set before our bit index */
1534 map &= ~((1ULL << bitidx) - 1);
1535 free = tzcnt(map) - bitidx;
1536 while (!free && bitidx < BITS_PER_LONG) {
1537 /* Zero out the last set bit so we look at the rest */
1538 map &= ~(1ULL << bitidx);
1539 /*
1540 * Account for the previously checked bits and advance
1541 * the bit index. We don't have to check for bitidx
1542 * getting bigger than BITS_PER_LONG here as it would
1543 * mean extra instructions that we don't need. If it
1544 * did happen, it would push free to a negative value
1545 * which will break the loop.
1546 */
1547 free = tzcnt(map) - ++bitidx;
1548 }
1549 *start = bitidx;
1550 return free;
1551 }
1552
1553 static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
1554 {
1555 int ret = 0;
1556 struct hfi1_filedata *fd = fp->private_data;
1557 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1558 struct hfi1_devdata *dd = uctxt->dd;
1559 unsigned tid, mapped = 0, npages, ngroups, exp_groups,
1560 tidpairs = uctxt->expected_count / 2;
1561 struct page **pages;
1562 unsigned long vaddr, tidmap[uctxt->tidmapcnt];
1563 dma_addr_t *phys;
1564 u32 tidlist[tidpairs], pairidx = 0, tidcursor;
1565 u16 useidx, idx, bitidx, tidcnt = 0;
1566
1567 vaddr = tinfo->vaddr;
1568
1569 if (offset_in_page(vaddr)) {
1570 ret = -EINVAL;
1571 goto bail;
1572 }
1573
1574 npages = num_user_pages(vaddr, tinfo->length);
1575 if (!npages) {
1576 ret = -EINVAL;
1577 goto bail;
1578 }
1579 if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
1580 npages * PAGE_SIZE)) {
1581 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
1582 (void *)vaddr, npages);
1583 ret = -EFAULT;
1584 goto bail;
1585 }
1586
1587 memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
1588 memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
1589
1590 exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
1591 /* which group set do we look at first? */
1592 tidcursor = atomic_read(&uctxt->tidcursor);
1593 useidx = (tidcursor >> 16) & 0xffff;
1594 bitidx = tidcursor & 0xffff;
1595
1596 /*
1597 * Keep going until we've mapped all pages or we've exhausted all
1598 * RcvArray entries.
1599 * This iterates over the number of tidmaps + 1
1600 * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1601 * started from one more time for any free bits before the
1602 * starting point bit.
1603 */
1604 for (mapped = 0, idx = 0;
1605 mapped < npages && idx <= uctxt->tidmapcnt;) {
1606 u64 i, offset = 0;
1607 unsigned free, pinned, pmapped = 0, bits_used;
1608 u16 grp;
1609
1610 /*
1611 * "Reserve" the needed group bits under lock so other
1612 * processes can't step in the middle of it. Once
1613 * reserved, we don't need the lock anymore since we
1614 * are guaranteed the groups.
1615 */
1616 spin_lock(&uctxt->exp_lock);
1617 if (uctxt->tidusemap[useidx] == -1ULL ||
1618 bitidx >= BITS_PER_LONG) {
1619 /* no free groups in the set, use the next */
1620 useidx = (useidx + 1) % uctxt->tidmapcnt;
1621 idx++;
1622 bitidx = 0;
1623 spin_unlock(&uctxt->exp_lock);
1624 continue;
1625 }
1626 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
1627 !!((npages - mapped) % dd->rcv_entries.group_size);
1628
1629 /*
1630 * If we've gotten here, the current set of groups does have
1631 * one or more free groups.
1632 */
1633 free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
1634 if (!free) {
1635 /*
1636 * Despite the check above, free could still come back
1637 * as 0 because we don't check the entire bitmap but
1638 * we start from bitidx.
1639 */
1640 spin_unlock(&uctxt->exp_lock);
1641 continue;
1642 }
1643 bits_used = min(free, ngroups);
1644 tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
1645 uctxt->tidusemap[useidx] |= tidmap[useidx];
1646 spin_unlock(&uctxt->exp_lock);
1647
1648 /*
1649 * At this point, we know where in the map we have free bits.
1650 * properly offset into the various "shadow" arrays and compute
1651 * the RcvArray entry index.
1652 */
1653 offset = ((useidx * BITS_PER_LONG) + bitidx) *
1654 dd->rcv_entries.group_size;
1655 pages = uctxt->tid_pg_list + offset;
1656 phys = uctxt->physshadow + offset;
1657 tid = uctxt->expected_base + offset;
1658
1659 /* Calculate how many pages we can pin based on free bits */
1660 pinned = min((bits_used * dd->rcv_entries.group_size),
1661 (npages - mapped));
1662 /*
1663 * Now that we know how many free RcvArray entries we have,
1664 * we can pin that many user pages.
1665 */
1666 ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
1667 pinned, pages);
1668 if (ret) {
1669 /*
1670 * We can't continue because the pages array won't be
1671 * initialized. This should never happen,
1672 * unless perhaps the user has mpin'ed the pages
1673 * themselves.
1674 */
1675 dd_dev_info(dd,
1676 "Failed to lock addr %p, %u pages: errno %d\n",
1677 (void *) vaddr, pinned, -ret);
1678 /*
1679 * Let go of the bits that we reserved since we are not
1680 * going to use them.
1681 */
1682 spin_lock(&uctxt->exp_lock);
1683 uctxt->tidusemap[useidx] &=
1684 ~(((1ULL << bits_used) - 1) << bitidx);
1685 spin_unlock(&uctxt->exp_lock);
1686 goto done;
1687 }
1688 /*
1689 * How many groups do we need based on how many pages we have
1690 * pinned?
1691 */
1692 ngroups = (pinned / dd->rcv_entries.group_size) +
1693 !!(pinned % dd->rcv_entries.group_size);
1694 /*
1695 * Keep programming RcvArray entries for all the <ngroups> free
1696 * groups.
1697 */
1698 for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
1699 unsigned j;
1700 u32 pair_size = 0, tidsize;
1701 /*
1702 * This inner loop will program an entire group or the
1703 * array of pinned pages (which ever limit is hit
1704 * first).
1705 */
1706 for (j = 0; j < dd->rcv_entries.group_size &&
1707 pmapped < pinned; j++, pmapped++, tid++) {
1708 tidsize = PAGE_SIZE;
1709 phys[pmapped] = hfi1_map_page(dd->pcidev,
1710 pages[pmapped], 0,
1711 tidsize, PCI_DMA_FROMDEVICE);
1712 trace_hfi1_exp_rcv_set(uctxt->ctxt,
1713 fd->subctxt,
1714 tid, vaddr,
1715 phys[pmapped],
1716 pages[pmapped]);
1717 /*
1718 * Each RcvArray entry is programmed with one
1719 * page * worth of memory. This will handle
1720 * the 8K MTU as well as anything smaller
1721 * due to the fact that both entries in the
1722 * RcvTidPair are programmed with a page.
1723 * PSM currently does not handle anything
1724 * bigger than 8K MTU, so should we even worry
1725 * about 10K here?
1726 */
1727 hfi1_put_tid(dd, tid, PT_EXPECTED,
1728 phys[pmapped],
1729 ilog2(tidsize >> PAGE_SHIFT) + 1);
1730 pair_size += tidsize >> PAGE_SHIFT;
1731 EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
1732 if (!(tid % 2)) {
1733 tidlist[pairidx] |=
1734 EXP_TID_SET(IDX,
1735 (tid - uctxt->expected_base)
1736 / 2);
1737 tidlist[pairidx] |=
1738 EXP_TID_SET(CTRL, 1);
1739 tidcnt++;
1740 } else {
1741 tidlist[pairidx] |=
1742 EXP_TID_SET(CTRL, 2);
1743 pair_size = 0;
1744 pairidx++;
1745 }
1746 }
1747 /*
1748 * We've programmed the entire group (or as much of the
1749 * group as we'll use. Now, it's time to push it out...
1750 */
1751 flush_wc();
1752 }
1753 mapped += pinned;
1754 atomic_set(&uctxt->tidcursor,
1755 (((useidx & 0xffffff) << 16) |
1756 ((bitidx + bits_used) & 0xffffff)));
1757 }
1758 trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0, uctxt->tidusemap,
1759 uctxt->tidmapcnt);
1760
1761 done:
1762 /* If we've mapped anything, copy relevant info to user */
1763 if (mapped) {
1764 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
1765 tidlist, sizeof(tidlist[0]) * tidcnt)) {
1766 ret = -EFAULT;
1767 goto done;
1768 }
1769 /* copy TID info to user */
1770 if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
1771 tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
1772 ret = -EFAULT;
1773 }
1774 bail:
1775 /*
1776 * Calculate mapped length. New Exp TID protocol does not "unwind" and
1777 * report an error if it can't map the entire buffer. It just reports
1778 * the length that was mapped.
1779 */
1780 tinfo->length = mapped * PAGE_SIZE;
1781 tinfo->tidcnt = tidcnt;
1782 return ret;
1783 }
1784
1785 static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
1786 {
1787 struct hfi1_filedata *fd = fp->private_data;
1788 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1789 struct hfi1_devdata *dd = uctxt->dd;
1790 unsigned long tidmap[uctxt->tidmapcnt];
1791 struct page **pages;
1792 dma_addr_t *phys;
1793 u16 idx, bitidx, tid;
1794 int ret = 0;
1795
1796 if (copy_from_user(&tidmap, (void __user *)(unsigned long)
1797 tinfo->tidmap,
1798 sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
1799 ret = -EFAULT;
1800 goto done;
1801 }
1802 for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
1803 unsigned long map;
1804
1805 bitidx = 0;
1806 if (!tidmap[idx])
1807 continue;
1808 map = tidmap[idx];
1809 while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
1810 int i, pcount = 0;
1811 struct page *pshadow[dd->rcv_entries.group_size];
1812 unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
1813 dd->rcv_entries.group_size;
1814
1815 pages = uctxt->tid_pg_list + offset;
1816 phys = uctxt->physshadow + offset;
1817 tid = uctxt->expected_base + offset;
1818 for (i = 0; i < dd->rcv_entries.group_size;
1819 i++, tid++) {
1820 if (pages[i]) {
1821 hfi1_put_tid(dd, tid, PT_INVALID,
1822 0, 0);
1823 trace_hfi1_exp_rcv_free(uctxt->ctxt,
1824 fd->subctxt,
1825 tid, phys[i],
1826 pages[i]);
1827 pci_unmap_page(dd->pcidev, phys[i],
1828 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1829 pshadow[pcount] = pages[i];
1830 pages[i] = NULL;
1831 pcount++;
1832 phys[i] = 0;
1833 }
1834 }
1835 flush_wc();
1836 hfi1_release_user_pages(pshadow, pcount);
1837 clear_bit(bitidx, &uctxt->tidusemap[idx]);
1838 map &= ~(1ULL<<bitidx);
1839 }
1840 }
1841 trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 1, uctxt->tidusemap,
1842 uctxt->tidmapcnt);
1843 done:
1844 return ret;
1845 }
1846
1847 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
1848 {
1849 struct hfi1_devdata *dd = uctxt->dd;
1850 unsigned tid;
1851
1852 dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
1853 uctxt->ctxt);
1854 for (tid = 0; tid < uctxt->expected_count; tid++) {
1855 struct page *p = uctxt->tid_pg_list[tid];
1856 dma_addr_t phys;
1857
1858 if (!p)
1859 continue;
1860
1861 phys = uctxt->physshadow[tid];
1862 uctxt->physshadow[tid] = 0;
1863 uctxt->tid_pg_list[tid] = NULL;
1864 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1865 hfi1_release_user_pages(&p, 1);
1866 }
1867 }
1868
1869 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1870 u16 pkey)
1871 {
1872 int ret = -ENOENT, i, intable = 0;
1873 struct hfi1_pportdata *ppd = uctxt->ppd;
1874 struct hfi1_devdata *dd = uctxt->dd;
1875
1876 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1877 ret = -EINVAL;
1878 goto done;
1879 }
1880
1881 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1882 if (pkey == ppd->pkeys[i]) {
1883 intable = 1;
1884 break;
1885 }
1886
1887 if (intable)
1888 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1889 done:
1890 return ret;
1891 }
1892
1893 static int ui_open(struct inode *inode, struct file *filp)
1894 {
1895 struct hfi1_devdata *dd;
1896
1897 dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1898 filp->private_data = dd; /* for other methods */
1899 return 0;
1900 }
1901
1902 static int ui_release(struct inode *inode, struct file *filp)
1903 {
1904 /* nothing to do */
1905 return 0;
1906 }
1907
1908 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1909 {
1910 struct hfi1_devdata *dd = filp->private_data;
1911
1912 switch (whence) {
1913 case SEEK_SET:
1914 break;
1915 case SEEK_CUR:
1916 offset += filp->f_pos;
1917 break;
1918 case SEEK_END:
1919 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1920 offset;
1921 break;
1922 default:
1923 return -EINVAL;
1924 }
1925
1926 if (offset < 0)
1927 return -EINVAL;
1928
1929 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1930 return -EINVAL;
1931
1932 filp->f_pos = offset;
1933
1934 return filp->f_pos;
1935 }
1936
1937
1938 /* NOTE: assumes unsigned long is 8 bytes */
1939 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1940 loff_t *f_pos)
1941 {
1942 struct hfi1_devdata *dd = filp->private_data;
1943 void __iomem *base = dd->kregbase;
1944 unsigned long total, csr_off,
1945 barlen = (dd->kregend - dd->kregbase);
1946 u64 data;
1947
1948 /* only read 8 byte quantities */
1949 if ((count % 8) != 0)
1950 return -EINVAL;
1951 /* offset must be 8-byte aligned */
1952 if ((*f_pos % 8) != 0)
1953 return -EINVAL;
1954 /* destination buffer must be 8-byte aligned */
1955 if ((unsigned long)buf % 8 != 0)
1956 return -EINVAL;
1957 /* must be in range */
1958 if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1959 return -EINVAL;
1960 /* only set the base if we are not starting past the BAR */
1961 if (*f_pos < barlen)
1962 base += *f_pos;
1963 csr_off = *f_pos;
1964 for (total = 0; total < count; total += 8, csr_off += 8) {
1965 /* accessing LCB CSRs requires more checks */
1966 if (is_lcb_offset(csr_off)) {
1967 if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1968 break; /* failed */
1969 }
1970 /*
1971 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1972 * false parity error. Avoid the whole issue by not reading
1973 * them. These registers are defined as having a read value
1974 * of 0.
1975 */
1976 else if (csr_off == ASIC_GPIO_CLEAR
1977 || csr_off == ASIC_GPIO_FORCE
1978 || csr_off == ASIC_QSFP1_CLEAR
1979 || csr_off == ASIC_QSFP1_FORCE
1980 || csr_off == ASIC_QSFP2_CLEAR
1981 || csr_off == ASIC_QSFP2_FORCE)
1982 data = 0;
1983 else if (csr_off >= barlen) {
1984 /*
1985 * read_8051_data can read more than just 8 bytes at
1986 * a time. However, folding this into the loop and
1987 * handling the reads in 8 byte increments allows us
1988 * to smoothly transition from chip memory to 8051
1989 * memory.
1990 */
1991 if (read_8051_data(dd,
1992 (u32)(csr_off - barlen),
1993 sizeof(data), &data))
1994 break; /* failed */
1995 } else
1996 data = readq(base + total);
1997 if (put_user(data, (unsigned long __user *)(buf + total)))
1998 break;
1999 }
2000 *f_pos += total;
2001 return total;
2002 }
2003
2004 /* NOTE: assumes unsigned long is 8 bytes */
2005 static ssize_t ui_write(struct file *filp, const char __user *buf,
2006 size_t count, loff_t *f_pos)
2007 {
2008 struct hfi1_devdata *dd = filp->private_data;
2009 void __iomem *base;
2010 unsigned long total, data, csr_off;
2011 int in_lcb;
2012
2013 /* only write 8 byte quantities */
2014 if ((count % 8) != 0)
2015 return -EINVAL;
2016 /* offset must be 8-byte aligned */
2017 if ((*f_pos % 8) != 0)
2018 return -EINVAL;
2019 /* source buffer must be 8-byte aligned */
2020 if ((unsigned long)buf % 8 != 0)
2021 return -EINVAL;
2022 /* must be in range */
2023 if (*f_pos + count > dd->kregend - dd->kregbase)
2024 return -EINVAL;
2025
2026 base = (void __iomem *)dd->kregbase + *f_pos;
2027 csr_off = *f_pos;
2028 in_lcb = 0;
2029 for (total = 0; total < count; total += 8, csr_off += 8) {
2030 if (get_user(data, (unsigned long __user *)(buf + total)))
2031 break;
2032 /* accessing LCB CSRs requires a special procedure */
2033 if (is_lcb_offset(csr_off)) {
2034 if (!in_lcb) {
2035 int ret = acquire_lcb_access(dd, 1);
2036
2037 if (ret)
2038 break;
2039 in_lcb = 1;
2040 }
2041 } else {
2042 if (in_lcb) {
2043 release_lcb_access(dd, 1);
2044 in_lcb = 0;
2045 }
2046 }
2047 writeq(data, base + total);
2048 }
2049 if (in_lcb)
2050 release_lcb_access(dd, 1);
2051 *f_pos += total;
2052 return total;
2053 }
2054
2055 static const struct file_operations ui_file_ops = {
2056 .owner = THIS_MODULE,
2057 .llseek = ui_lseek,
2058 .read = ui_read,
2059 .write = ui_write,
2060 .open = ui_open,
2061 .release = ui_release,
2062 };
2063
2064 #define UI_OFFSET 192 /* device minor offset for UI devices */
2065 static int create_ui = 1;
2066
2067 static struct cdev wildcard_cdev;
2068 static struct device *wildcard_device;
2069
2070 static atomic_t user_count = ATOMIC_INIT(0);
2071
2072 static void user_remove(struct hfi1_devdata *dd)
2073 {
2074 if (atomic_dec_return(&user_count) == 0)
2075 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2076
2077 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2078 hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
2079 }
2080
2081 static int user_add(struct hfi1_devdata *dd)
2082 {
2083 char name[10];
2084 int ret;
2085
2086 if (atomic_inc_return(&user_count) == 1) {
2087 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2088 &wildcard_cdev, &wildcard_device,
2089 true);
2090 if (ret)
2091 goto done;
2092 }
2093
2094 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2095 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2096 &dd->user_cdev, &dd->user_device,
2097 true);
2098 if (ret)
2099 goto done;
2100
2101 if (create_ui) {
2102 snprintf(name, sizeof(name),
2103 "%s_ui%d", class_name(), dd->unit);
2104 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2105 &dd->ui_cdev, &dd->ui_device,
2106 false);
2107 if (ret)
2108 goto done;
2109 }
2110
2111 return 0;
2112 done:
2113 user_remove(dd);
2114 return ret;
2115 }
2116
2117 /*
2118 * Create per-unit files in /dev
2119 */
2120 int hfi1_device_create(struct hfi1_devdata *dd)
2121 {
2122 int r, ret;
2123
2124 r = user_add(dd);
2125 ret = hfi1_diag_add(dd);
2126 if (r && !ret)
2127 ret = r;
2128 return ret;
2129 }
2130
2131 /*
2132 * Remove per-unit files in /dev
2133 * void, core kernel returns no errors for this stuff
2134 */
2135 void hfi1_device_remove(struct hfi1_devdata *dd)
2136 {
2137 user_remove(dd);
2138 hfi1_diag_remove(dd);
2139 }