]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/infiniband/sw/rdmavt/mr.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / sw / rdmavt / mr.c
1 /*
2 * Copyright(c) 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
52 #include "vt.h"
53 #include "mr.h"
54 #include "trace.h"
55
56 /**
57 * rvt_driver_mr_init - Init MR resources per driver
58 * @rdi: rvt dev struct
59 *
60 * Do any intilization needed when a driver registers with rdmavt.
61 *
62 * Return: 0 on success or errno on failure
63 */
64 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
65 {
66 unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
67 unsigned lk_tab_size;
68 int i;
69
70 /*
71 * The top hfi1_lkey_table_size bits are used to index the
72 * table. The lower 8 bits can be owned by the user (copied from
73 * the LKEY). The remaining bits act as a generation number or tag.
74 */
75 if (!lkey_table_size)
76 return -EINVAL;
77
78 spin_lock_init(&rdi->lkey_table.lock);
79
80 /* ensure generation is at least 4 bits */
81 if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
82 rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
83 lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
84 rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
85 lkey_table_size = rdi->dparms.lkey_table_size;
86 }
87 rdi->lkey_table.max = 1 << lkey_table_size;
88 rdi->lkey_table.shift = 32 - lkey_table_size;
89 lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
90 rdi->lkey_table.table = (struct rvt_mregion __rcu **)
91 vmalloc_node(lk_tab_size, rdi->dparms.node);
92 if (!rdi->lkey_table.table)
93 return -ENOMEM;
94
95 RCU_INIT_POINTER(rdi->dma_mr, NULL);
96 for (i = 0; i < rdi->lkey_table.max; i++)
97 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
98
99 return 0;
100 }
101
102 /**
103 *rvt_mr_exit: clean up MR
104 *@rdi: rvt dev structure
105 *
106 * called when drivers have unregistered or perhaps failed to register with us
107 */
108 void rvt_mr_exit(struct rvt_dev_info *rdi)
109 {
110 if (rdi->dma_mr)
111 rvt_pr_err(rdi, "DMA MR not null!\n");
112
113 vfree(rdi->lkey_table.table);
114 }
115
116 static void rvt_deinit_mregion(struct rvt_mregion *mr)
117 {
118 int i = mr->mapsz;
119
120 mr->mapsz = 0;
121 while (i)
122 kfree(mr->map[--i]);
123 percpu_ref_exit(&mr->refcount);
124 }
125
126 static void __rvt_mregion_complete(struct percpu_ref *ref)
127 {
128 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
129 refcount);
130
131 complete(&mr->comp);
132 }
133
134 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
135 int count, unsigned int percpu_flags)
136 {
137 int m, i = 0;
138 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
139
140 mr->mapsz = 0;
141 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
142 for (; i < m; i++) {
143 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
144 dev->dparms.node);
145 if (!mr->map[i])
146 goto bail;
147 mr->mapsz++;
148 }
149 init_completion(&mr->comp);
150 /* count returning the ptr to user */
151 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
152 percpu_flags, GFP_KERNEL))
153 goto bail;
154
155 atomic_set(&mr->lkey_invalid, 0);
156 mr->pd = pd;
157 mr->max_segs = count;
158 return 0;
159 bail:
160 rvt_deinit_mregion(mr);
161 return -ENOMEM;
162 }
163
164 /**
165 * rvt_alloc_lkey - allocate an lkey
166 * @mr: memory region that this lkey protects
167 * @dma_region: 0->normal key, 1->restricted DMA key
168 *
169 * Returns 0 if successful, otherwise returns -errno.
170 *
171 * Increments mr reference count as required.
172 *
173 * Sets the lkey field mr for non-dma regions.
174 *
175 */
176 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
177 {
178 unsigned long flags;
179 u32 r;
180 u32 n;
181 int ret = 0;
182 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
183 struct rvt_lkey_table *rkt = &dev->lkey_table;
184
185 rvt_get_mr(mr);
186 spin_lock_irqsave(&rkt->lock, flags);
187
188 /* special case for dma_mr lkey == 0 */
189 if (dma_region) {
190 struct rvt_mregion *tmr;
191
192 tmr = rcu_access_pointer(dev->dma_mr);
193 if (!tmr) {
194 mr->lkey_published = 1;
195 /* Insure published written first */
196 rcu_assign_pointer(dev->dma_mr, mr);
197 rvt_get_mr(mr);
198 }
199 goto success;
200 }
201
202 /* Find the next available LKEY */
203 r = rkt->next;
204 n = r;
205 for (;;) {
206 if (!rcu_access_pointer(rkt->table[r]))
207 break;
208 r = (r + 1) & (rkt->max - 1);
209 if (r == n)
210 goto bail;
211 }
212 rkt->next = (r + 1) & (rkt->max - 1);
213 /*
214 * Make sure lkey is never zero which is reserved to indicate an
215 * unrestricted LKEY.
216 */
217 rkt->gen++;
218 /*
219 * bits are capped to ensure enough bits for generation number
220 */
221 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
222 ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
223 << 8);
224 if (mr->lkey == 0) {
225 mr->lkey |= 1 << 8;
226 rkt->gen++;
227 }
228 mr->lkey_published = 1;
229 /* Insure published written first */
230 rcu_assign_pointer(rkt->table[r], mr);
231 success:
232 spin_unlock_irqrestore(&rkt->lock, flags);
233 out:
234 return ret;
235 bail:
236 rvt_put_mr(mr);
237 spin_unlock_irqrestore(&rkt->lock, flags);
238 ret = -ENOMEM;
239 goto out;
240 }
241
242 /**
243 * rvt_free_lkey - free an lkey
244 * @mr: mr to free from tables
245 */
246 static void rvt_free_lkey(struct rvt_mregion *mr)
247 {
248 unsigned long flags;
249 u32 lkey = mr->lkey;
250 u32 r;
251 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
252 struct rvt_lkey_table *rkt = &dev->lkey_table;
253 int freed = 0;
254
255 spin_lock_irqsave(&rkt->lock, flags);
256 if (!lkey) {
257 if (mr->lkey_published) {
258 mr->lkey_published = 0;
259 /* insure published is written before pointer */
260 rcu_assign_pointer(dev->dma_mr, NULL);
261 rvt_put_mr(mr);
262 }
263 } else {
264 if (!mr->lkey_published)
265 goto out;
266 r = lkey >> (32 - dev->dparms.lkey_table_size);
267 mr->lkey_published = 0;
268 /* insure published is written before pointer */
269 rcu_assign_pointer(rkt->table[r], NULL);
270 }
271 freed++;
272 out:
273 spin_unlock_irqrestore(&rkt->lock, flags);
274 if (freed)
275 percpu_ref_kill(&mr->refcount);
276 }
277
278 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
279 {
280 struct rvt_mr *mr;
281 int rval = -ENOMEM;
282 int m;
283
284 /* Allocate struct plus pointers to first level page tables. */
285 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
286 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
287 if (!mr)
288 goto bail;
289
290 rval = rvt_init_mregion(&mr->mr, pd, count, 0);
291 if (rval)
292 goto bail;
293 /*
294 * ib_reg_phys_mr() will initialize mr->ibmr except for
295 * lkey and rkey.
296 */
297 rval = rvt_alloc_lkey(&mr->mr, 0);
298 if (rval)
299 goto bail_mregion;
300 mr->ibmr.lkey = mr->mr.lkey;
301 mr->ibmr.rkey = mr->mr.lkey;
302 done:
303 return mr;
304
305 bail_mregion:
306 rvt_deinit_mregion(&mr->mr);
307 bail:
308 kfree(mr);
309 mr = ERR_PTR(rval);
310 goto done;
311 }
312
313 static void __rvt_free_mr(struct rvt_mr *mr)
314 {
315 rvt_free_lkey(&mr->mr);
316 rvt_deinit_mregion(&mr->mr);
317 kfree(mr);
318 }
319
320 /**
321 * rvt_get_dma_mr - get a DMA memory region
322 * @pd: protection domain for this memory region
323 * @acc: access flags
324 *
325 * Return: the memory region on success, otherwise returns an errno.
326 * Note that all DMA addresses should be created via the functions in
327 * struct dma_virt_ops.
328 */
329 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
330 {
331 struct rvt_mr *mr;
332 struct ib_mr *ret;
333 int rval;
334
335 if (ibpd_to_rvtpd(pd)->user)
336 return ERR_PTR(-EPERM);
337
338 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
339 if (!mr) {
340 ret = ERR_PTR(-ENOMEM);
341 goto bail;
342 }
343
344 rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
345 if (rval) {
346 ret = ERR_PTR(rval);
347 goto bail;
348 }
349
350 rval = rvt_alloc_lkey(&mr->mr, 1);
351 if (rval) {
352 ret = ERR_PTR(rval);
353 goto bail_mregion;
354 }
355
356 mr->mr.access_flags = acc;
357 ret = &mr->ibmr;
358 done:
359 return ret;
360
361 bail_mregion:
362 rvt_deinit_mregion(&mr->mr);
363 bail:
364 kfree(mr);
365 goto done;
366 }
367
368 /**
369 * rvt_reg_user_mr - register a userspace memory region
370 * @pd: protection domain for this memory region
371 * @start: starting userspace address
372 * @length: length of region to register
373 * @mr_access_flags: access flags for this memory region
374 * @udata: unused by the driver
375 *
376 * Return: the memory region on success, otherwise returns an errno.
377 */
378 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
379 u64 virt_addr, int mr_access_flags,
380 struct ib_udata *udata)
381 {
382 struct rvt_mr *mr;
383 struct ib_umem *umem;
384 struct sg_page_iter sg_iter;
385 int n, m;
386 struct ib_mr *ret;
387
388 if (length == 0)
389 return ERR_PTR(-EINVAL);
390
391 umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
392 if (IS_ERR(umem))
393 return (void *)umem;
394
395 n = umem->nmap;
396
397 mr = __rvt_alloc_mr(n, pd);
398 if (IS_ERR(mr)) {
399 ret = (struct ib_mr *)mr;
400 goto bail_umem;
401 }
402
403 mr->mr.user_base = start;
404 mr->mr.iova = virt_addr;
405 mr->mr.length = length;
406 mr->mr.offset = ib_umem_offset(umem);
407 mr->mr.access_flags = mr_access_flags;
408 mr->umem = umem;
409
410 mr->mr.page_shift = PAGE_SHIFT;
411 m = 0;
412 n = 0;
413 for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
414 void *vaddr;
415
416 vaddr = page_address(sg_page_iter_page(&sg_iter));
417 if (!vaddr) {
418 ret = ERR_PTR(-EINVAL);
419 goto bail_inval;
420 }
421 mr->mr.map[m]->segs[n].vaddr = vaddr;
422 mr->mr.map[m]->segs[n].length = PAGE_SIZE;
423 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
424 if (++n == RVT_SEGSZ) {
425 m++;
426 n = 0;
427 }
428 }
429 return &mr->ibmr;
430
431 bail_inval:
432 __rvt_free_mr(mr);
433
434 bail_umem:
435 ib_umem_release(umem);
436
437 return ret;
438 }
439
440 /**
441 * rvt_dereg_clean_qp_cb - callback from iterator
442 * @qp - the qp
443 * @v - the mregion (as u64)
444 *
445 * This routine fields the callback for all QPs and
446 * for QPs in the same PD as the MR will call the
447 * rvt_qp_mr_clean() to potentially cleanup references.
448 */
449 static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
450 {
451 struct rvt_mregion *mr = (struct rvt_mregion *)v;
452
453 /* skip PDs that are not ours */
454 if (mr->pd != qp->ibqp.pd)
455 return;
456 rvt_qp_mr_clean(qp, mr->lkey);
457 }
458
459 /**
460 * rvt_dereg_clean_qps - find QPs for reference cleanup
461 * @mr - the MR that is being deregistered
462 *
463 * This routine iterates RC QPs looking for references
464 * to the lkey noted in mr.
465 */
466 static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
467 {
468 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
469
470 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
471 }
472
473 /**
474 * rvt_check_refs - check references
475 * @mr - the megion
476 * @t - the caller identification
477 *
478 * This routine checks MRs holding a reference during
479 * when being de-registered.
480 *
481 * If the count is non-zero, the code calls a clean routine then
482 * waits for the timeout for the count to zero.
483 */
484 static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
485 {
486 unsigned long timeout;
487 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
488
489 if (mr->lkey) {
490 /* avoid dma mr */
491 rvt_dereg_clean_qps(mr);
492 /* @mr was indexed on rcu protected @lkey_table */
493 synchronize_rcu();
494 }
495
496 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
497 if (!timeout) {
498 rvt_pr_err(rdi,
499 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
500 t, mr, mr->pd, mr->lkey,
501 atomic_long_read(&mr->refcount.count));
502 rvt_get_mr(mr);
503 return -EBUSY;
504 }
505 return 0;
506 }
507
508 /**
509 * rvt_mr_has_lkey - is MR
510 * @mr - the mregion
511 * @lkey - the lkey
512 */
513 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
514 {
515 return mr && lkey == mr->lkey;
516 }
517
518 /**
519 * rvt_ss_has_lkey - is mr in sge tests
520 * @ss - the sge state
521 * @lkey
522 *
523 * This code tests for an MR in the indicated
524 * sge state.
525 */
526 bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
527 {
528 int i;
529 bool rval = false;
530
531 if (!ss->num_sge)
532 return rval;
533 /* first one */
534 rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
535 /* any others */
536 for (i = 0; !rval && i < ss->num_sge - 1; i++)
537 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
538 return rval;
539 }
540
541 /**
542 * rvt_dereg_mr - unregister and free a memory region
543 * @ibmr: the memory region to free
544 *
545 *
546 * Note that this is called to free MRs created by rvt_get_dma_mr()
547 * or rvt_reg_user_mr().
548 *
549 * Returns 0 on success.
550 */
551 int rvt_dereg_mr(struct ib_mr *ibmr)
552 {
553 struct rvt_mr *mr = to_imr(ibmr);
554 int ret;
555
556 rvt_free_lkey(&mr->mr);
557
558 rvt_put_mr(&mr->mr); /* will set completion if last */
559 ret = rvt_check_refs(&mr->mr, __func__);
560 if (ret)
561 goto out;
562 rvt_deinit_mregion(&mr->mr);
563 if (mr->umem)
564 ib_umem_release(mr->umem);
565 kfree(mr);
566 out:
567 return ret;
568 }
569
570 /**
571 * rvt_alloc_mr - Allocate a memory region usable with the
572 * @pd: protection domain for this memory region
573 * @mr_type: mem region type
574 * @max_num_sg: Max number of segments allowed
575 *
576 * Return: the memory region on success, otherwise return an errno.
577 */
578 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
579 enum ib_mr_type mr_type,
580 u32 max_num_sg)
581 {
582 struct rvt_mr *mr;
583
584 if (mr_type != IB_MR_TYPE_MEM_REG)
585 return ERR_PTR(-EINVAL);
586
587 mr = __rvt_alloc_mr(max_num_sg, pd);
588 if (IS_ERR(mr))
589 return (struct ib_mr *)mr;
590
591 return &mr->ibmr;
592 }
593
594 /**
595 * rvt_set_page - page assignment function called by ib_sg_to_pages
596 * @ibmr: memory region
597 * @addr: dma address of mapped page
598 *
599 * Return: 0 on success
600 */
601 static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
602 {
603 struct rvt_mr *mr = to_imr(ibmr);
604 u32 ps = 1 << mr->mr.page_shift;
605 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
606 int m, n;
607
608 if (unlikely(mapped_segs == mr->mr.max_segs))
609 return -ENOMEM;
610
611 if (mr->mr.length == 0) {
612 mr->mr.user_base = addr;
613 mr->mr.iova = addr;
614 }
615
616 m = mapped_segs / RVT_SEGSZ;
617 n = mapped_segs % RVT_SEGSZ;
618 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
619 mr->mr.map[m]->segs[n].length = ps;
620 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
621 mr->mr.length += ps;
622
623 return 0;
624 }
625
626 /**
627 * rvt_map_mr_sg - map sg list and set it the memory region
628 * @ibmr: memory region
629 * @sg: dma mapped scatterlist
630 * @sg_nents: number of entries in sg
631 * @sg_offset: offset in bytes into sg
632 *
633 * Return: number of sg elements mapped to the memory region
634 */
635 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
636 int sg_nents, unsigned int *sg_offset)
637 {
638 struct rvt_mr *mr = to_imr(ibmr);
639
640 mr->mr.length = 0;
641 mr->mr.page_shift = PAGE_SHIFT;
642 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
643 rvt_set_page);
644 }
645
646 /**
647 * rvt_fast_reg_mr - fast register physical MR
648 * @qp: the queue pair where the work request comes from
649 * @ibmr: the memory region to be registered
650 * @key: updated key for this memory region
651 * @access: access flags for this memory region
652 *
653 * Returns 0 on success.
654 */
655 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
656 int access)
657 {
658 struct rvt_mr *mr = to_imr(ibmr);
659
660 if (qp->ibqp.pd != mr->mr.pd)
661 return -EACCES;
662
663 /* not applicable to dma MR or user MR */
664 if (!mr->mr.lkey || mr->umem)
665 return -EINVAL;
666
667 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
668 return -EINVAL;
669
670 ibmr->lkey = key;
671 ibmr->rkey = key;
672 mr->mr.lkey = key;
673 mr->mr.access_flags = access;
674 atomic_set(&mr->mr.lkey_invalid, 0);
675
676 return 0;
677 }
678 EXPORT_SYMBOL(rvt_fast_reg_mr);
679
680 /**
681 * rvt_invalidate_rkey - invalidate an MR rkey
682 * @qp: queue pair associated with the invalidate op
683 * @rkey: rkey to invalidate
684 *
685 * Returns 0 on success.
686 */
687 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
688 {
689 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
690 struct rvt_lkey_table *rkt = &dev->lkey_table;
691 struct rvt_mregion *mr;
692
693 if (rkey == 0)
694 return -EINVAL;
695
696 rcu_read_lock();
697 mr = rcu_dereference(
698 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
699 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
700 goto bail;
701
702 atomic_set(&mr->lkey_invalid, 1);
703 rcu_read_unlock();
704 return 0;
705
706 bail:
707 rcu_read_unlock();
708 return -EINVAL;
709 }
710 EXPORT_SYMBOL(rvt_invalidate_rkey);
711
712 /**
713 * rvt_alloc_fmr - allocate a fast memory region
714 * @pd: the protection domain for this memory region
715 * @mr_access_flags: access flags for this memory region
716 * @fmr_attr: fast memory region attributes
717 *
718 * Return: the memory region on success, otherwise returns an errno.
719 */
720 struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
721 struct ib_fmr_attr *fmr_attr)
722 {
723 struct rvt_fmr *fmr;
724 int m;
725 struct ib_fmr *ret;
726 int rval = -ENOMEM;
727
728 /* Allocate struct plus pointers to first level page tables. */
729 m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
730 fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
731 if (!fmr)
732 goto bail;
733
734 rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
735 PERCPU_REF_INIT_ATOMIC);
736 if (rval)
737 goto bail;
738
739 /*
740 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
741 * rkey.
742 */
743 rval = rvt_alloc_lkey(&fmr->mr, 0);
744 if (rval)
745 goto bail_mregion;
746 fmr->ibfmr.rkey = fmr->mr.lkey;
747 fmr->ibfmr.lkey = fmr->mr.lkey;
748 /*
749 * Resources are allocated but no valid mapping (RKEY can't be
750 * used).
751 */
752 fmr->mr.access_flags = mr_access_flags;
753 fmr->mr.max_segs = fmr_attr->max_pages;
754 fmr->mr.page_shift = fmr_attr->page_shift;
755
756 ret = &fmr->ibfmr;
757 done:
758 return ret;
759
760 bail_mregion:
761 rvt_deinit_mregion(&fmr->mr);
762 bail:
763 kfree(fmr);
764 ret = ERR_PTR(rval);
765 goto done;
766 }
767
768 /**
769 * rvt_map_phys_fmr - set up a fast memory region
770 * @ibfmr: the fast memory region to set up
771 * @page_list: the list of pages to associate with the fast memory region
772 * @list_len: the number of pages to associate with the fast memory region
773 * @iova: the virtual address of the start of the fast memory region
774 *
775 * This may be called from interrupt context.
776 *
777 * Return: 0 on success
778 */
779
780 int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
781 int list_len, u64 iova)
782 {
783 struct rvt_fmr *fmr = to_ifmr(ibfmr);
784 struct rvt_lkey_table *rkt;
785 unsigned long flags;
786 int m, n;
787 unsigned long i;
788 u32 ps;
789 struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
790
791 i = atomic_long_read(&fmr->mr.refcount.count);
792 if (i > 2)
793 return -EBUSY;
794
795 if (list_len > fmr->mr.max_segs)
796 return -EINVAL;
797
798 rkt = &rdi->lkey_table;
799 spin_lock_irqsave(&rkt->lock, flags);
800 fmr->mr.user_base = iova;
801 fmr->mr.iova = iova;
802 ps = 1 << fmr->mr.page_shift;
803 fmr->mr.length = list_len * ps;
804 m = 0;
805 n = 0;
806 for (i = 0; i < list_len; i++) {
807 fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
808 fmr->mr.map[m]->segs[n].length = ps;
809 trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
810 if (++n == RVT_SEGSZ) {
811 m++;
812 n = 0;
813 }
814 }
815 spin_unlock_irqrestore(&rkt->lock, flags);
816 return 0;
817 }
818
819 /**
820 * rvt_unmap_fmr - unmap fast memory regions
821 * @fmr_list: the list of fast memory regions to unmap
822 *
823 * Return: 0 on success.
824 */
825 int rvt_unmap_fmr(struct list_head *fmr_list)
826 {
827 struct rvt_fmr *fmr;
828 struct rvt_lkey_table *rkt;
829 unsigned long flags;
830 struct rvt_dev_info *rdi;
831
832 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
833 rdi = ib_to_rvt(fmr->ibfmr.device);
834 rkt = &rdi->lkey_table;
835 spin_lock_irqsave(&rkt->lock, flags);
836 fmr->mr.user_base = 0;
837 fmr->mr.iova = 0;
838 fmr->mr.length = 0;
839 spin_unlock_irqrestore(&rkt->lock, flags);
840 }
841 return 0;
842 }
843
844 /**
845 * rvt_dealloc_fmr - deallocate a fast memory region
846 * @ibfmr: the fast memory region to deallocate
847 *
848 * Return: 0 on success.
849 */
850 int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
851 {
852 struct rvt_fmr *fmr = to_ifmr(ibfmr);
853 int ret = 0;
854
855 rvt_free_lkey(&fmr->mr);
856 rvt_put_mr(&fmr->mr); /* will set completion if last */
857 ret = rvt_check_refs(&fmr->mr, __func__);
858 if (ret)
859 goto out;
860 rvt_deinit_mregion(&fmr->mr);
861 kfree(fmr);
862 out:
863 return ret;
864 }
865
866 /**
867 * rvt_sge_adjacent - is isge compressible
868 * @last_sge: last outgoing SGE written
869 * @sge: SGE to check
870 *
871 * If adjacent will update last_sge to add length.
872 *
873 * Return: true if isge is adjacent to last sge
874 */
875 static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
876 struct ib_sge *sge)
877 {
878 if (last_sge && sge->lkey == last_sge->mr->lkey &&
879 ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
880 if (sge->lkey) {
881 if (unlikely((sge->addr - last_sge->mr->user_base +
882 sge->length > last_sge->mr->length)))
883 return false; /* overrun, caller will catch */
884 } else {
885 last_sge->length += sge->length;
886 }
887 last_sge->sge_length += sge->length;
888 trace_rvt_sge_adjacent(last_sge, sge);
889 return true;
890 }
891 return false;
892 }
893
894 /**
895 * rvt_lkey_ok - check IB SGE for validity and initialize
896 * @rkt: table containing lkey to check SGE against
897 * @pd: protection domain
898 * @isge: outgoing internal SGE
899 * @last_sge: last outgoing SGE written
900 * @sge: SGE to check
901 * @acc: access flags
902 *
903 * Check the IB SGE for validity and initialize our internal version
904 * of it.
905 *
906 * Increments the reference count when a new sge is stored.
907 *
908 * Return: 0 if compressed, 1 if added , otherwise returns -errno.
909 */
910 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
911 struct rvt_sge *isge, struct rvt_sge *last_sge,
912 struct ib_sge *sge, int acc)
913 {
914 struct rvt_mregion *mr;
915 unsigned n, m;
916 size_t off;
917
918 /*
919 * We use LKEY == zero for kernel virtual addresses
920 * (see rvt_get_dma_mr() and dma_virt_ops).
921 */
922 if (sge->lkey == 0) {
923 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
924
925 if (pd->user)
926 return -EINVAL;
927 if (rvt_sge_adjacent(last_sge, sge))
928 return 0;
929 rcu_read_lock();
930 mr = rcu_dereference(dev->dma_mr);
931 if (!mr)
932 goto bail;
933 rvt_get_mr(mr);
934 rcu_read_unlock();
935
936 isge->mr = mr;
937 isge->vaddr = (void *)sge->addr;
938 isge->length = sge->length;
939 isge->sge_length = sge->length;
940 isge->m = 0;
941 isge->n = 0;
942 goto ok;
943 }
944 if (rvt_sge_adjacent(last_sge, sge))
945 return 0;
946 rcu_read_lock();
947 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
948 if (!mr)
949 goto bail;
950 rvt_get_mr(mr);
951 if (!READ_ONCE(mr->lkey_published))
952 goto bail_unref;
953
954 if (unlikely(atomic_read(&mr->lkey_invalid) ||
955 mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
956 goto bail_unref;
957
958 off = sge->addr - mr->user_base;
959 if (unlikely(sge->addr < mr->user_base ||
960 off + sge->length > mr->length ||
961 (mr->access_flags & acc) != acc))
962 goto bail_unref;
963 rcu_read_unlock();
964
965 off += mr->offset;
966 if (mr->page_shift) {
967 /*
968 * page sizes are uniform power of 2 so no loop is necessary
969 * entries_spanned_by_off is the number of times the loop below
970 * would have executed.
971 */
972 size_t entries_spanned_by_off;
973
974 entries_spanned_by_off = off >> mr->page_shift;
975 off -= (entries_spanned_by_off << mr->page_shift);
976 m = entries_spanned_by_off / RVT_SEGSZ;
977 n = entries_spanned_by_off % RVT_SEGSZ;
978 } else {
979 m = 0;
980 n = 0;
981 while (off >= mr->map[m]->segs[n].length) {
982 off -= mr->map[m]->segs[n].length;
983 n++;
984 if (n >= RVT_SEGSZ) {
985 m++;
986 n = 0;
987 }
988 }
989 }
990 isge->mr = mr;
991 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
992 isge->length = mr->map[m]->segs[n].length - off;
993 isge->sge_length = sge->length;
994 isge->m = m;
995 isge->n = n;
996 ok:
997 trace_rvt_sge_new(isge, sge);
998 return 1;
999 bail_unref:
1000 rvt_put_mr(mr);
1001 bail:
1002 rcu_read_unlock();
1003 return -EINVAL;
1004 }
1005 EXPORT_SYMBOL(rvt_lkey_ok);
1006
1007 /**
1008 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
1009 * @qp: qp for validation
1010 * @sge: SGE state
1011 * @len: length of data
1012 * @vaddr: virtual address to place data
1013 * @rkey: rkey to check
1014 * @acc: access flags
1015 *
1016 * Return: 1 if successful, otherwise 0.
1017 *
1018 * increments the reference count upon success
1019 */
1020 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
1021 u32 len, u64 vaddr, u32 rkey, int acc)
1022 {
1023 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
1024 struct rvt_lkey_table *rkt = &dev->lkey_table;
1025 struct rvt_mregion *mr;
1026 unsigned n, m;
1027 size_t off;
1028
1029 /*
1030 * We use RKEY == zero for kernel virtual addresses
1031 * (see rvt_get_dma_mr() and dma_virt_ops).
1032 */
1033 rcu_read_lock();
1034 if (rkey == 0) {
1035 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
1036 struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
1037
1038 if (pd->user)
1039 goto bail;
1040 mr = rcu_dereference(rdi->dma_mr);
1041 if (!mr)
1042 goto bail;
1043 rvt_get_mr(mr);
1044 rcu_read_unlock();
1045
1046 sge->mr = mr;
1047 sge->vaddr = (void *)vaddr;
1048 sge->length = len;
1049 sge->sge_length = len;
1050 sge->m = 0;
1051 sge->n = 0;
1052 goto ok;
1053 }
1054
1055 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
1056 if (!mr)
1057 goto bail;
1058 rvt_get_mr(mr);
1059 /* insure mr read is before test */
1060 if (!READ_ONCE(mr->lkey_published))
1061 goto bail_unref;
1062 if (unlikely(atomic_read(&mr->lkey_invalid) ||
1063 mr->lkey != rkey || qp->ibqp.pd != mr->pd))
1064 goto bail_unref;
1065
1066 off = vaddr - mr->iova;
1067 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
1068 (mr->access_flags & acc) == 0))
1069 goto bail_unref;
1070 rcu_read_unlock();
1071
1072 off += mr->offset;
1073 if (mr->page_shift) {
1074 /*
1075 * page sizes are uniform power of 2 so no loop is necessary
1076 * entries_spanned_by_off is the number of times the loop below
1077 * would have executed.
1078 */
1079 size_t entries_spanned_by_off;
1080
1081 entries_spanned_by_off = off >> mr->page_shift;
1082 off -= (entries_spanned_by_off << mr->page_shift);
1083 m = entries_spanned_by_off / RVT_SEGSZ;
1084 n = entries_spanned_by_off % RVT_SEGSZ;
1085 } else {
1086 m = 0;
1087 n = 0;
1088 while (off >= mr->map[m]->segs[n].length) {
1089 off -= mr->map[m]->segs[n].length;
1090 n++;
1091 if (n >= RVT_SEGSZ) {
1092 m++;
1093 n = 0;
1094 }
1095 }
1096 }
1097 sge->mr = mr;
1098 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
1099 sge->length = mr->map[m]->segs[n].length - off;
1100 sge->sge_length = len;
1101 sge->m = m;
1102 sge->n = n;
1103 ok:
1104 return 1;
1105 bail_unref:
1106 rvt_put_mr(mr);
1107 bail:
1108 rcu_read_unlock();
1109 return 0;
1110 }
1111 EXPORT_SYMBOL(rvt_rkey_ok);