]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/hw/mlx4/mr.c
IB: Pass uverbs_attr_bundle down uobject destroy path
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / mlx4 / mr.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
5a0e3ad6 34#include <linux/slab.h>
b2a239df 35#include <rdma/ib_user_verbs.h>
5a0e3ad6 36
225c7b1f
RD
37#include "mlx4_ib.h"
38
39static u32 convert_access(int acc)
40{
41 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
42 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
43 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
44 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
804d6a89 45 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
225c7b1f
RD
46 MLX4_PERM_LOCAL_READ;
47}
48
804d6a89
SM
49static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
50{
51 switch (type) {
52 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
53 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
54 default: return -1;
55 }
56}
57
225c7b1f
RD
58struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
59{
60 struct mlx4_ib_mr *mr;
61 int err;
62
1b2cd0fc 63 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
225c7b1f
RD
64 if (!mr)
65 return ERR_PTR(-ENOMEM);
66
67 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
69 if (err)
70 goto err_free;
71
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
73 if (err)
74 goto err_mr;
75
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
77 mr->umem = NULL;
78
79 return &mr->ibmr;
80
81err_mr:
61083720 82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
225c7b1f
RD
83
84err_free:
85 kfree(mr);
86
87 return ERR_PTR(err);
88}
89
9901abf5
GL
90enum {
91 MLX4_MAX_MTT_SHIFT = 31
92};
93
94static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
95 struct mlx4_mtt *mtt,
96 u64 mtt_size, u64 mtt_shift, u64 len,
97 u64 cur_start_addr, u64 *pages,
98 int *start_index, int *npages)
99{
100 u64 cur_end_addr = cur_start_addr + len;
101 u64 cur_end_addr_aligned = 0;
102 u64 mtt_entries;
103 int err = 0;
104 int k;
105
106 len += (cur_start_addr & (mtt_size - 1ULL));
107 cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
108 len += (cur_end_addr_aligned - cur_end_addr);
109 if (len & (mtt_size - 1ULL)) {
110 pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
111 len, mtt_size);
112 return -EINVAL;
113 }
114
115 mtt_entries = (len >> mtt_shift);
116
117 /*
118 * Align the MTT start address to the mtt_size.
119 * Required to handle cases when the MR starts in the middle of an MTT
120 * record. Was not required in old code since the physical addresses
121 * provided by the dma subsystem were page aligned, which was also the
122 * MTT size.
123 */
124 cur_start_addr = round_down(cur_start_addr, mtt_size);
125 /* A new block is started ... */
126 for (k = 0; k < mtt_entries; ++k) {
127 pages[*npages] = cur_start_addr + (mtt_size * k);
128 (*npages)++;
129 /*
130 * Be friendly to mlx4_write_mtt() and pass it chunks of
131 * appropriate size.
132 */
133 if (*npages == PAGE_SIZE / sizeof(u64)) {
134 err = mlx4_write_mtt(dev->dev, mtt, *start_index,
135 *npages, pages);
136 if (err)
137 return err;
138
139 (*start_index) += *npages;
140 *npages = 0;
141 }
142 }
143
144 return 0;
145}
146
147static inline u64 alignment_of(u64 ptr)
148{
149 return ilog2(ptr & (~(ptr - 1)));
150}
151
152static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
153 u64 current_block_end,
154 u64 block_shift)
155{
156 /* Check whether the alignment of the new block is aligned as well as
157 * the previous block.
158 * Block address must start with zeros till size of entity_size.
159 */
160 if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
161 /*
162 * It is not as well aligned as the previous block-reduce the
163 * mtt size accordingly. Here we take the last right bit which
164 * is 1.
165 */
166 block_shift = alignment_of(next_block_start);
167
168 /*
169 * Check whether the alignment of the end of previous block - is it
170 * aligned as well as the start of the block
171 */
172 if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
173 /*
174 * It is not as well aligned as the start of the block -
175 * reduce the mtt size accordingly.
176 */
177 block_shift = alignment_of(current_block_end);
178
179 return block_shift;
180}
181
225c7b1f
RD
182int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
183 struct ib_umem *umem)
184{
185 u64 *pages;
9901abf5 186 u64 len = 0;
225c7b1f 187 int err = 0;
9901abf5
GL
188 u64 mtt_size;
189 u64 cur_start_addr = 0;
190 u64 mtt_shift;
191 int start_index = 0;
192 int npages = 0;
eeb8461e 193 struct scatterlist *sg;
9901abf5 194 int i;
225c7b1f
RD
195
196 pages = (u64 *) __get_free_page(GFP_KERNEL);
197 if (!pages)
198 return -ENOMEM;
199
9901abf5
GL
200 mtt_shift = mtt->page_shift;
201 mtt_size = 1ULL << mtt_shift;
225c7b1f 202
9901abf5
GL
203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
204 if (cur_start_addr + len == sg_dma_address(sg)) {
205 /* still the same block */
206 len += sg_dma_len(sg);
207 continue;
225c7b1f 208 }
9901abf5
GL
209 /*
210 * A new block is started ...
211 * If len is malaligned, write an extra mtt entry to cover the
212 * misaligned area (round up the division)
213 */
214 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
215 mtt_shift, len,
216 cur_start_addr,
217 pages, &start_index,
218 &npages);
219 if (err)
220 goto out;
221
222 cur_start_addr = sg_dma_address(sg);
223 len = sg_dma_len(sg);
224 }
225
226 /* Handle the last block */
227 if (len > 0) {
228 /*
229 * If len is malaligned, write an extra mtt entry to cover
230 * the misaligned area (round up the division)
231 */
232 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
233 mtt_shift, len,
234 cur_start_addr, pages,
235 &start_index, &npages);
236 if (err)
237 goto out;
eeb8461e 238 }
225c7b1f 239
9901abf5
GL
240 if (npages)
241 err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
225c7b1f
RD
242
243out:
244 free_page((unsigned long) pages);
245 return err;
246}
247
9901abf5
GL
248/*
249 * Calculate optimal mtt size based on contiguous pages.
250 * Function will return also the number of pages that are not aligned to the
251 * calculated mtt_size to be added to total number of pages. For that we should
252 * check the first chunk length & last chunk length and if not aligned to
253 * mtt_size we should increment the non_aligned_pages number. All chunks in the
254 * middle already handled as part of mtt shift calculation for both their start
255 * & end addresses.
256 */
ed8637d3
GL
257int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
258 int *num_of_mtts)
9901abf5
GL
259{
260 u64 block_shift = MLX4_MAX_MTT_SHIFT;
261 u64 min_shift = umem->page_shift;
262 u64 last_block_aligned_end = 0;
263 u64 current_block_start = 0;
264 u64 first_block_start = 0;
265 u64 current_block_len = 0;
266 u64 last_block_end = 0;
267 struct scatterlist *sg;
268 u64 current_block_end;
269 u64 misalignment_bits;
270 u64 next_block_start;
271 u64 total_len = 0;
272 int i;
273
274 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
275 /*
276 * Initialization - save the first chunk start as the
277 * current_block_start - block means contiguous pages.
278 */
279 if (current_block_len == 0 && current_block_start == 0) {
280 current_block_start = sg_dma_address(sg);
281 first_block_start = current_block_start;
282 /*
283 * Find the bits that are different between the physical
284 * address and the virtual address for the start of the
285 * MR.
286 * umem_get aligned the start_va to a page boundary.
287 * Therefore, we need to align the start va to the same
288 * boundary.
289 * misalignment_bits is needed to handle the case of a
290 * single memory region. In this case, the rest of the
291 * logic will not reduce the block size. If we use a
292 * block size which is bigger than the alignment of the
293 * misalignment bits, we might use the virtual page
294 * number instead of the physical page number, resulting
295 * in access to the wrong data.
296 */
297 misalignment_bits =
298 (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
299 ^ current_block_start;
300 block_shift = min(alignment_of(misalignment_bits),
301 block_shift);
302 }
303
304 /*
305 * Go over the scatter entries and check if they continue the
306 * previous scatter entry.
307 */
308 next_block_start = sg_dma_address(sg);
309 current_block_end = current_block_start + current_block_len;
310 /* If we have a split (non-contig.) between two blocks */
311 if (current_block_end != next_block_start) {
312 block_shift = mlx4_ib_umem_calc_block_mtt
313 (next_block_start,
314 current_block_end,
315 block_shift);
316
317 /*
318 * If we reached the minimum shift for 4k page we stop
319 * the loop.
320 */
321 if (block_shift <= min_shift)
322 goto end;
323
324 /*
325 * If not saved yet we are in first block - we save the
326 * length of first block to calculate the
327 * non_aligned_pages number at the end.
328 */
329 total_len += current_block_len;
330
331 /* Start a new block */
332 current_block_start = next_block_start;
333 current_block_len = sg_dma_len(sg);
334 continue;
335 }
336 /* The scatter entry is another part of the current block,
337 * increase the block size.
338 * An entry in the scatter can be larger than 4k (page) as of
339 * dma mapping which merge some blocks together.
340 */
341 current_block_len += sg_dma_len(sg);
342 }
343
344 /* Account for the last block in the total len */
345 total_len += current_block_len;
346 /* Add to the first block the misalignment that it suffers from. */
347 total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
348 last_block_end = current_block_start + current_block_len;
b03bcde9 349 last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
9901abf5
GL
350 total_len += (last_block_aligned_end - last_block_end);
351
352 if (total_len & ((1ULL << block_shift) - 1ULL))
353 pr_warn("misaligned total length detected (%llu, %llu)!",
354 total_len, block_shift);
355
356 *num_of_mtts = total_len >> block_shift;
357end:
358 if (block_shift < min_shift) {
359 /*
360 * If shift is less than the min we set a warning and return the
361 * min shift.
362 */
363 pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
364
365 block_shift = min_shift;
366 }
367 return block_shift;
368}
369
89944450 370static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
d8f9cc32
JM
371 u64 length, u64 virt_addr,
372 int access_flags)
373{
374 /*
375 * Force registering the memory as writable if the underlying pages
376 * are writable. This is so rereg can change the access permissions
377 * from readable to writable without having to run through ib_umem_get
378 * again
379 */
380 if (!ib_access_writable(access_flags)) {
381 struct vm_area_struct *vma;
382
383 down_read(&current->mm->mmap_sem);
384 /*
385 * FIXME: Ideally this would iterate over all the vmas that
386 * cover the memory, but for now it requires a single vma to
387 * entirely cover the MR to support RO mappings.
388 */
389 vma = find_vma(current->mm, start);
390 if (vma && vma->vm_end >= start + length &&
391 vma->vm_start <= start) {
392 if (vma->vm_flags & VM_WRITE)
393 access_flags |= IB_ACCESS_LOCAL_WRITE;
394 } else {
395 access_flags |= IB_ACCESS_LOCAL_WRITE;
396 }
397
398 up_read(&current->mm->mmap_sem);
399 }
400
b0ea0fa5 401 return ib_umem_get(udata, start, length, access_flags, 0);
d8f9cc32
JM
402}
403
225c7b1f
RD
404struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
405 u64 virt_addr, int access_flags,
406 struct ib_udata *udata)
407{
408 struct mlx4_ib_dev *dev = to_mdev(pd->device);
409 struct mlx4_ib_mr *mr;
410 int shift;
411 int err;
412 int n;
413
1b2cd0fc 414 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
225c7b1f
RD
415 if (!mr)
416 return ERR_PTR(-ENOMEM);
417
89944450
SR
418 mr->umem =
419 mlx4_get_umem_mr(udata, start, length, virt_addr, access_flags);
225c7b1f
RD
420 if (IS_ERR(mr->umem)) {
421 err = PTR_ERR(mr->umem);
422 goto err_free;
423 }
424
425 n = ib_umem_page_count(mr->umem);
9901abf5 426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
225c7b1f
RD
427
428 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
429 convert_access(access_flags), n, shift, &mr->mmr);
430 if (err)
431 goto err_umem;
432
433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
434 if (err)
435 goto err_mr;
436
437 err = mlx4_mr_enable(dev->dev, &mr->mmr);
438 if (err)
439 goto err_mr;
440
441 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
e6f03301
SW
442 mr->ibmr.length = length;
443 mr->ibmr.iova = virt_addr;
444 mr->ibmr.page_size = 1U << shift;
225c7b1f
RD
445
446 return &mr->ibmr;
447
448err_mr:
61083720 449 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
225c7b1f
RD
450
451err_umem:
452 ib_umem_release(mr->umem);
453
454err_free:
455 kfree(mr);
456
457 return ERR_PTR(err);
458}
459
9376932d
MB
460int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
461 u64 start, u64 length, u64 virt_addr,
462 int mr_access_flags, struct ib_pd *pd,
463 struct ib_udata *udata)
464{
465 struct mlx4_ib_dev *dev = to_mdev(mr->device);
466 struct mlx4_ib_mr *mmr = to_mmr(mr);
467 struct mlx4_mpt_entry *mpt_entry;
468 struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
469 int err;
470
471 /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
472 * we assume that the calls can't run concurrently. Otherwise, a
473 * race exists.
474 */
475 err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
476
477 if (err)
478 return err;
479
480 if (flags & IB_MR_REREG_PD) {
481 err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
482 to_mpd(pd)->pdn);
483
484 if (err)
485 goto release_mpt_entry;
486 }
487
488 if (flags & IB_MR_REREG_ACCESS) {
3dc7c7ba
CJ
489 if (ib_access_writable(mr_access_flags) &&
490 !mmr->umem->writable) {
491 err = -EPERM;
492 goto release_mpt_entry;
493 }
d8f9cc32 494
9376932d
MB
495 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
496 convert_access(mr_access_flags));
497
498 if (err)
499 goto release_mpt_entry;
500 }
501
502 if (flags & IB_MR_REREG_TRANS) {
503 int shift;
9376932d
MB
504 int n;
505
506 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
507 ib_umem_release(mmr->umem);
89944450
SR
508 mmr->umem = mlx4_get_umem_mr(udata, start, length, virt_addr,
509 mr_access_flags);
9376932d
MB
510 if (IS_ERR(mmr->umem)) {
511 err = PTR_ERR(mmr->umem);
4ff0acca 512 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
9376932d
MB
513 mmr->umem = NULL;
514 goto release_mpt_entry;
515 }
516 n = ib_umem_page_count(mmr->umem);
3e7e1193 517 shift = mmr->umem->page_shift;
9376932d 518
9376932d
MB
519 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
520 virt_addr, length, n, shift,
521 *pmpt_entry);
522 if (err) {
523 ib_umem_release(mmr->umem);
524 goto release_mpt_entry;
525 }
4ff0acca
MB
526 mmr->mmr.iova = virt_addr;
527 mmr->mmr.size = length;
9376932d
MB
528
529 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
530 if (err) {
531 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
532 ib_umem_release(mmr->umem);
533 goto release_mpt_entry;
534 }
535 }
536
537 /* If we couldn't transfer the MR to the HCA, just remember to
538 * return a failure. But dereg_mr will free the resources.
539 */
540 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
4ff0acca
MB
541 if (!err && flags & IB_MR_REREG_ACCESS)
542 mmr->mmr.access = mr_access_flags;
9376932d
MB
543
544release_mpt_entry:
545 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
546
547 return err;
548}
549
1b2cd0fc
SG
550static int
551mlx4_alloc_priv_pages(struct ib_device *device,
552 struct mlx4_ib_mr *mr,
553 int max_pages)
554{
1b2cd0fc
SG
555 int ret;
556
cbc9355a
CL
557 /* Ensure that size is aligned to DMA cacheline
558 * requirements.
559 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
560 * so page_map_size will never cross PAGE_SIZE.
561 */
562 mr->page_map_size = roundup(max_pages * sizeof(u64),
563 MLX4_MR_PAGES_ALIGN);
1b2cd0fc 564
cbc9355a
CL
565 /* Prevent cross page boundary allocation. */
566 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
567 if (!mr->pages)
1b2cd0fc
SG
568 return -ENOMEM;
569
d66c88a8 570 mr->page_map = dma_map_single(device->dev.parent, mr->pages,
cbc9355a 571 mr->page_map_size, DMA_TO_DEVICE);
1b2cd0fc 572
d66c88a8 573 if (dma_mapping_error(device->dev.parent, mr->page_map)) {
1b2cd0fc
SG
574 ret = -ENOMEM;
575 goto err;
576 }
577
578 return 0;
1b2cd0fc 579
cbc9355a
CL
580err:
581 free_page((unsigned long)mr->pages);
1b2cd0fc
SG
582 return ret;
583}
584
585static void
586mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
587{
588 if (mr->pages) {
589 struct ib_device *device = mr->ibmr.device;
1b2cd0fc 590
d66c88a8 591 dma_unmap_single(device->dev.parent, mr->page_map,
cbc9355a
CL
592 mr->page_map_size, DMA_TO_DEVICE);
593 free_page((unsigned long)mr->pages);
1b2cd0fc
SG
594 mr->pages = NULL;
595 }
596}
597
225c7b1f
RD
598int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
599{
600 struct mlx4_ib_mr *mr = to_mmr(ibmr);
61083720 601 int ret;
225c7b1f 602
1b2cd0fc
SG
603 mlx4_free_priv_pages(mr);
604
61083720
SM
605 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
606 if (ret)
607 return ret;
225c7b1f
RD
608 if (mr->umem)
609 ib_umem_release(mr->umem);
610 kfree(mr);
611
612 return 0;
613}
8ad11fb6 614
b2a239df
MB
615struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
616 struct ib_udata *udata)
804d6a89
SM
617{
618 struct mlx4_ib_dev *dev = to_mdev(pd->device);
619 struct mlx4_ib_mw *mw;
620 int err;
621
622 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
623 if (!mw)
624 return ERR_PTR(-ENOMEM);
625
626 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
627 to_mlx4_type(type), &mw->mmw);
628 if (err)
629 goto err_free;
630
631 err = mlx4_mw_enable(dev->dev, &mw->mmw);
632 if (err)
633 goto err_mw;
634
635 mw->ibmw.rkey = mw->mmw.key;
636
637 return &mw->ibmw;
638
639err_mw:
640 mlx4_mw_free(dev->dev, &mw->mmw);
641
642err_free:
643 kfree(mw);
644
645 return ERR_PTR(err);
646}
647
648int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
649{
650 struct mlx4_ib_mw *mw = to_mmw(ibmw);
651
652 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
653 kfree(mw);
654
655 return 0;
656}
657
679e34d1
SG
658struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
659 enum ib_mr_type mr_type,
660 u32 max_num_sg)
95d04f07
RD
661{
662 struct mlx4_ib_dev *dev = to_mdev(pd->device);
663 struct mlx4_ib_mr *mr;
664 int err;
665
679e34d1
SG
666 if (mr_type != IB_MR_TYPE_MEM_REG ||
667 max_num_sg > MLX4_MAX_FAST_REG_PAGES)
668 return ERR_PTR(-EINVAL);
669
1b2cd0fc 670 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
95d04f07
RD
671 if (!mr)
672 return ERR_PTR(-ENOMEM);
673
674 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
679e34d1 675 max_num_sg, 0, &mr->mmr);
95d04f07
RD
676 if (err)
677 goto err_free;
678
1b2cd0fc
SG
679 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
680 if (err)
681 goto err_free_mr;
682
683 mr->max_pages = max_num_sg;
95d04f07
RD
684 err = mlx4_mr_enable(dev->dev, &mr->mmr);
685 if (err)
1b2cd0fc 686 goto err_free_pl;
95d04f07 687
4c246edd 688 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
7f3abf5c 689 mr->umem = NULL;
4c246edd 690
95d04f07
RD
691 return &mr->ibmr;
692
1b2cd0fc 693err_free_pl:
5a371cf8 694 mr->ibmr.device = pd->device;
1b2cd0fc
SG
695 mlx4_free_priv_pages(mr);
696err_free_mr:
61083720 697 (void) mlx4_mr_free(dev->dev, &mr->mmr);
95d04f07
RD
698err_free:
699 kfree(mr);
700 return ERR_PTR(err);
701}
702
8ad11fb6
JM
703struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
704 struct ib_fmr_attr *fmr_attr)
705{
706 struct mlx4_ib_dev *dev = to_mdev(pd->device);
707 struct mlx4_ib_fmr *fmr;
708 int err = -ENOMEM;
709
710 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
711 if (!fmr)
712 return ERR_PTR(-ENOMEM);
713
714 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
715 fmr_attr->max_pages, fmr_attr->max_maps,
716 fmr_attr->page_shift, &fmr->mfmr);
717 if (err)
718 goto err_free;
719
e6028c0e 720 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
8ad11fb6
JM
721 if (err)
722 goto err_mr;
723
724 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
725
726 return &fmr->ibfmr;
727
728err_mr:
61083720 729 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
8ad11fb6
JM
730
731err_free:
732 kfree(fmr);
733
734 return ERR_PTR(err);
735}
736
737int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
738 int npages, u64 iova)
739{
740 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
741 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
742
743 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
744 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
745}
746
747int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
748{
749 struct ib_fmr *ibfmr;
750 int err;
751 struct mlx4_dev *mdev = NULL;
752
753 list_for_each_entry(ibfmr, fmr_list, list) {
754 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
755 return -EINVAL;
756 mdev = to_mdev(ibfmr->device)->dev;
757 }
758
759 if (!mdev)
760 return 0;
761
762 list_for_each_entry(ibfmr, fmr_list, list) {
763 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
764
765 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
766 }
767
768 /*
769 * Make sure all MPT status updates are visible before issuing
770 * SYNC_TPT firmware command.
771 */
772 wmb();
773
774 err = mlx4_SYNC_TPT(mdev);
775 if (err)
987c8f8f 776 pr_warn("SYNC_TPT error %d when "
8ad11fb6
JM
777 "unmapping FMRs\n", err);
778
779 return 0;
780}
781
782int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
783{
784 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
785 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
786 int err;
787
788 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
789
790 if (!err)
791 kfree(ifmr);
792
793 return err;
794}
1b2cd0fc
SG
795
796static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
797{
798 struct mlx4_ib_mr *mr = to_mmr(ibmr);
799
800 if (unlikely(mr->npages == mr->max_pages))
801 return -ENOMEM;
802
803 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
804
805 return 0;
806}
807
ff2ba993 808int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
9aa8b321 809 unsigned int *sg_offset)
1b2cd0fc
SG
810{
811 struct mlx4_ib_mr *mr = to_mmr(ibmr);
812 int rc;
813
814 mr->npages = 0;
815
816 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
cbc9355a 817 mr->page_map_size, DMA_TO_DEVICE);
1b2cd0fc 818
ff2ba993 819 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
1b2cd0fc
SG
820
821 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
cbc9355a 822 mr->page_map_size, DMA_TO_DEVICE);
1b2cd0fc
SG
823
824 return rc;
825}