]>
Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
5a0e3ad6 | 34 | #include <linux/slab.h> |
b2a239df | 35 | #include <rdma/ib_user_verbs.h> |
5a0e3ad6 | 36 | |
225c7b1f RD |
37 | #include "mlx4_ib.h" |
38 | ||
39 | static u32 convert_access(int acc) | |
40 | { | |
41 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | | |
42 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | | |
43 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | | |
44 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | | |
804d6a89 | 45 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | |
225c7b1f RD |
46 | MLX4_PERM_LOCAL_READ; |
47 | } | |
48 | ||
804d6a89 SM |
49 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) |
50 | { | |
51 | switch (type) { | |
52 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; | |
53 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; | |
54 | default: return -1; | |
55 | } | |
56 | } | |
57 | ||
225c7b1f RD |
58 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
59 | { | |
60 | struct mlx4_ib_mr *mr; | |
61 | int err; | |
62 | ||
1b2cd0fc | 63 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
64 | if (!mr) |
65 | return ERR_PTR(-ENOMEM); | |
66 | ||
67 | err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, | |
68 | ~0ull, convert_access(acc), 0, 0, &mr->mmr); | |
69 | if (err) | |
70 | goto err_free; | |
71 | ||
72 | err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); | |
73 | if (err) | |
74 | goto err_mr; | |
75 | ||
76 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
77 | mr->umem = NULL; | |
78 | ||
79 | return &mr->ibmr; | |
80 | ||
81 | err_mr: | |
61083720 | 82 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
83 | |
84 | err_free: | |
85 | kfree(mr); | |
86 | ||
87 | return ERR_PTR(err); | |
88 | } | |
89 | ||
9901abf5 GL |
90 | enum { |
91 | MLX4_MAX_MTT_SHIFT = 31 | |
92 | }; | |
93 | ||
94 | static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev, | |
95 | struct mlx4_mtt *mtt, | |
96 | u64 mtt_size, u64 mtt_shift, u64 len, | |
97 | u64 cur_start_addr, u64 *pages, | |
98 | int *start_index, int *npages) | |
99 | { | |
100 | u64 cur_end_addr = cur_start_addr + len; | |
101 | u64 cur_end_addr_aligned = 0; | |
102 | u64 mtt_entries; | |
103 | int err = 0; | |
104 | int k; | |
105 | ||
106 | len += (cur_start_addr & (mtt_size - 1ULL)); | |
107 | cur_end_addr_aligned = round_up(cur_end_addr, mtt_size); | |
108 | len += (cur_end_addr_aligned - cur_end_addr); | |
109 | if (len & (mtt_size - 1ULL)) { | |
110 | pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n", | |
111 | len, mtt_size); | |
112 | return -EINVAL; | |
113 | } | |
114 | ||
115 | mtt_entries = (len >> mtt_shift); | |
116 | ||
117 | /* | |
118 | * Align the MTT start address to the mtt_size. | |
119 | * Required to handle cases when the MR starts in the middle of an MTT | |
120 | * record. Was not required in old code since the physical addresses | |
121 | * provided by the dma subsystem were page aligned, which was also the | |
122 | * MTT size. | |
123 | */ | |
124 | cur_start_addr = round_down(cur_start_addr, mtt_size); | |
125 | /* A new block is started ... */ | |
126 | for (k = 0; k < mtt_entries; ++k) { | |
127 | pages[*npages] = cur_start_addr + (mtt_size * k); | |
128 | (*npages)++; | |
129 | /* | |
130 | * Be friendly to mlx4_write_mtt() and pass it chunks of | |
131 | * appropriate size. | |
132 | */ | |
133 | if (*npages == PAGE_SIZE / sizeof(u64)) { | |
134 | err = mlx4_write_mtt(dev->dev, mtt, *start_index, | |
135 | *npages, pages); | |
136 | if (err) | |
137 | return err; | |
138 | ||
139 | (*start_index) += *npages; | |
140 | *npages = 0; | |
141 | } | |
142 | } | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
147 | static inline u64 alignment_of(u64 ptr) | |
148 | { | |
149 | return ilog2(ptr & (~(ptr - 1))); | |
150 | } | |
151 | ||
152 | static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start, | |
153 | u64 current_block_end, | |
154 | u64 block_shift) | |
155 | { | |
156 | /* Check whether the alignment of the new block is aligned as well as | |
157 | * the previous block. | |
158 | * Block address must start with zeros till size of entity_size. | |
159 | */ | |
160 | if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0) | |
161 | /* | |
162 | * It is not as well aligned as the previous block-reduce the | |
163 | * mtt size accordingly. Here we take the last right bit which | |
164 | * is 1. | |
165 | */ | |
166 | block_shift = alignment_of(next_block_start); | |
167 | ||
168 | /* | |
169 | * Check whether the alignment of the end of previous block - is it | |
170 | * aligned as well as the start of the block | |
171 | */ | |
172 | if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0) | |
173 | /* | |
174 | * It is not as well aligned as the start of the block - | |
175 | * reduce the mtt size accordingly. | |
176 | */ | |
177 | block_shift = alignment_of(current_block_end); | |
178 | ||
179 | return block_shift; | |
180 | } | |
181 | ||
225c7b1f RD |
182 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, |
183 | struct ib_umem *umem) | |
184 | { | |
185 | u64 *pages; | |
9901abf5 | 186 | u64 len = 0; |
225c7b1f | 187 | int err = 0; |
9901abf5 GL |
188 | u64 mtt_size; |
189 | u64 cur_start_addr = 0; | |
190 | u64 mtt_shift; | |
191 | int start_index = 0; | |
192 | int npages = 0; | |
eeb8461e | 193 | struct scatterlist *sg; |
9901abf5 | 194 | int i; |
225c7b1f RD |
195 | |
196 | pages = (u64 *) __get_free_page(GFP_KERNEL); | |
197 | if (!pages) | |
198 | return -ENOMEM; | |
199 | ||
9901abf5 GL |
200 | mtt_shift = mtt->page_shift; |
201 | mtt_size = 1ULL << mtt_shift; | |
225c7b1f | 202 | |
9901abf5 GL |
203 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { |
204 | if (cur_start_addr + len == sg_dma_address(sg)) { | |
205 | /* still the same block */ | |
206 | len += sg_dma_len(sg); | |
207 | continue; | |
225c7b1f | 208 | } |
9901abf5 GL |
209 | /* |
210 | * A new block is started ... | |
211 | * If len is malaligned, write an extra mtt entry to cover the | |
212 | * misaligned area (round up the division) | |
213 | */ | |
214 | err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, | |
215 | mtt_shift, len, | |
216 | cur_start_addr, | |
217 | pages, &start_index, | |
218 | &npages); | |
219 | if (err) | |
220 | goto out; | |
221 | ||
222 | cur_start_addr = sg_dma_address(sg); | |
223 | len = sg_dma_len(sg); | |
224 | } | |
225 | ||
226 | /* Handle the last block */ | |
227 | if (len > 0) { | |
228 | /* | |
229 | * If len is malaligned, write an extra mtt entry to cover | |
230 | * the misaligned area (round up the division) | |
231 | */ | |
232 | err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, | |
233 | mtt_shift, len, | |
234 | cur_start_addr, pages, | |
235 | &start_index, &npages); | |
236 | if (err) | |
237 | goto out; | |
eeb8461e | 238 | } |
225c7b1f | 239 | |
9901abf5 GL |
240 | if (npages) |
241 | err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); | |
225c7b1f RD |
242 | |
243 | out: | |
244 | free_page((unsigned long) pages); | |
245 | return err; | |
246 | } | |
247 | ||
9901abf5 GL |
248 | /* |
249 | * Calculate optimal mtt size based on contiguous pages. | |
250 | * Function will return also the number of pages that are not aligned to the | |
251 | * calculated mtt_size to be added to total number of pages. For that we should | |
252 | * check the first chunk length & last chunk length and if not aligned to | |
253 | * mtt_size we should increment the non_aligned_pages number. All chunks in the | |
254 | * middle already handled as part of mtt shift calculation for both their start | |
255 | * & end addresses. | |
256 | */ | |
ed8637d3 GL |
257 | int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, |
258 | int *num_of_mtts) | |
9901abf5 GL |
259 | { |
260 | u64 block_shift = MLX4_MAX_MTT_SHIFT; | |
261 | u64 min_shift = umem->page_shift; | |
262 | u64 last_block_aligned_end = 0; | |
263 | u64 current_block_start = 0; | |
264 | u64 first_block_start = 0; | |
265 | u64 current_block_len = 0; | |
266 | u64 last_block_end = 0; | |
267 | struct scatterlist *sg; | |
268 | u64 current_block_end; | |
269 | u64 misalignment_bits; | |
270 | u64 next_block_start; | |
271 | u64 total_len = 0; | |
272 | int i; | |
273 | ||
274 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { | |
275 | /* | |
276 | * Initialization - save the first chunk start as the | |
277 | * current_block_start - block means contiguous pages. | |
278 | */ | |
279 | if (current_block_len == 0 && current_block_start == 0) { | |
280 | current_block_start = sg_dma_address(sg); | |
281 | first_block_start = current_block_start; | |
282 | /* | |
283 | * Find the bits that are different between the physical | |
284 | * address and the virtual address for the start of the | |
285 | * MR. | |
286 | * umem_get aligned the start_va to a page boundary. | |
287 | * Therefore, we need to align the start va to the same | |
288 | * boundary. | |
289 | * misalignment_bits is needed to handle the case of a | |
290 | * single memory region. In this case, the rest of the | |
291 | * logic will not reduce the block size. If we use a | |
292 | * block size which is bigger than the alignment of the | |
293 | * misalignment bits, we might use the virtual page | |
294 | * number instead of the physical page number, resulting | |
295 | * in access to the wrong data. | |
296 | */ | |
297 | misalignment_bits = | |
298 | (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL))) | |
299 | ^ current_block_start; | |
300 | block_shift = min(alignment_of(misalignment_bits), | |
301 | block_shift); | |
302 | } | |
303 | ||
304 | /* | |
305 | * Go over the scatter entries and check if they continue the | |
306 | * previous scatter entry. | |
307 | */ | |
308 | next_block_start = sg_dma_address(sg); | |
309 | current_block_end = current_block_start + current_block_len; | |
310 | /* If we have a split (non-contig.) between two blocks */ | |
311 | if (current_block_end != next_block_start) { | |
312 | block_shift = mlx4_ib_umem_calc_block_mtt | |
313 | (next_block_start, | |
314 | current_block_end, | |
315 | block_shift); | |
316 | ||
317 | /* | |
318 | * If we reached the minimum shift for 4k page we stop | |
319 | * the loop. | |
320 | */ | |
321 | if (block_shift <= min_shift) | |
322 | goto end; | |
323 | ||
324 | /* | |
325 | * If not saved yet we are in first block - we save the | |
326 | * length of first block to calculate the | |
327 | * non_aligned_pages number at the end. | |
328 | */ | |
329 | total_len += current_block_len; | |
330 | ||
331 | /* Start a new block */ | |
332 | current_block_start = next_block_start; | |
333 | current_block_len = sg_dma_len(sg); | |
334 | continue; | |
335 | } | |
336 | /* The scatter entry is another part of the current block, | |
337 | * increase the block size. | |
338 | * An entry in the scatter can be larger than 4k (page) as of | |
339 | * dma mapping which merge some blocks together. | |
340 | */ | |
341 | current_block_len += sg_dma_len(sg); | |
342 | } | |
343 | ||
344 | /* Account for the last block in the total len */ | |
345 | total_len += current_block_len; | |
346 | /* Add to the first block the misalignment that it suffers from. */ | |
347 | total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); | |
348 | last_block_end = current_block_start + current_block_len; | |
349 | last_block_aligned_end = round_up(last_block_end, 1 << block_shift); | |
350 | total_len += (last_block_aligned_end - last_block_end); | |
351 | ||
352 | if (total_len & ((1ULL << block_shift) - 1ULL)) | |
353 | pr_warn("misaligned total length detected (%llu, %llu)!", | |
354 | total_len, block_shift); | |
355 | ||
356 | *num_of_mtts = total_len >> block_shift; | |
357 | end: | |
358 | if (block_shift < min_shift) { | |
359 | /* | |
360 | * If shift is less than the min we set a warning and return the | |
361 | * min shift. | |
362 | */ | |
363 | pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift); | |
364 | ||
365 | block_shift = min_shift; | |
366 | } | |
367 | return block_shift; | |
368 | } | |
369 | ||
225c7b1f RD |
370 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
371 | u64 virt_addr, int access_flags, | |
372 | struct ib_udata *udata) | |
373 | { | |
374 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
375 | struct mlx4_ib_mr *mr; | |
376 | int shift; | |
377 | int err; | |
378 | int n; | |
379 | ||
1b2cd0fc | 380 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
381 | if (!mr) |
382 | return ERR_PTR(-ENOMEM); | |
383 | ||
9376932d MB |
384 | /* Force registering the memory as writable. */ |
385 | /* Used for memory re-registeration. HCA protects the access */ | |
cb9fbc5c | 386 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
9376932d | 387 | access_flags | IB_ACCESS_LOCAL_WRITE, 0); |
225c7b1f RD |
388 | if (IS_ERR(mr->umem)) { |
389 | err = PTR_ERR(mr->umem); | |
390 | goto err_free; | |
391 | } | |
392 | ||
393 | n = ib_umem_page_count(mr->umem); | |
9901abf5 | 394 | shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); |
225c7b1f RD |
395 | |
396 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, | |
397 | convert_access(access_flags), n, shift, &mr->mmr); | |
398 | if (err) | |
399 | goto err_umem; | |
400 | ||
401 | err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); | |
402 | if (err) | |
403 | goto err_mr; | |
404 | ||
405 | err = mlx4_mr_enable(dev->dev, &mr->mmr); | |
406 | if (err) | |
407 | goto err_mr; | |
408 | ||
409 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
410 | ||
411 | return &mr->ibmr; | |
412 | ||
413 | err_mr: | |
61083720 | 414 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
415 | |
416 | err_umem: | |
417 | ib_umem_release(mr->umem); | |
418 | ||
419 | err_free: | |
420 | kfree(mr); | |
421 | ||
422 | return ERR_PTR(err); | |
423 | } | |
424 | ||
9376932d MB |
425 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, |
426 | u64 start, u64 length, u64 virt_addr, | |
427 | int mr_access_flags, struct ib_pd *pd, | |
428 | struct ib_udata *udata) | |
429 | { | |
430 | struct mlx4_ib_dev *dev = to_mdev(mr->device); | |
431 | struct mlx4_ib_mr *mmr = to_mmr(mr); | |
432 | struct mlx4_mpt_entry *mpt_entry; | |
433 | struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; | |
434 | int err; | |
435 | ||
436 | /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, | |
437 | * we assume that the calls can't run concurrently. Otherwise, a | |
438 | * race exists. | |
439 | */ | |
440 | err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); | |
441 | ||
442 | if (err) | |
443 | return err; | |
444 | ||
445 | if (flags & IB_MR_REREG_PD) { | |
446 | err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, | |
447 | to_mpd(pd)->pdn); | |
448 | ||
449 | if (err) | |
450 | goto release_mpt_entry; | |
451 | } | |
452 | ||
453 | if (flags & IB_MR_REREG_ACCESS) { | |
454 | err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, | |
455 | convert_access(mr_access_flags)); | |
456 | ||
457 | if (err) | |
458 | goto release_mpt_entry; | |
459 | } | |
460 | ||
461 | if (flags & IB_MR_REREG_TRANS) { | |
462 | int shift; | |
9376932d MB |
463 | int n; |
464 | ||
465 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
466 | ib_umem_release(mmr->umem); | |
467 | mmr->umem = ib_umem_get(mr->uobject->context, start, length, | |
468 | mr_access_flags | | |
469 | IB_ACCESS_LOCAL_WRITE, | |
470 | 0); | |
471 | if (IS_ERR(mmr->umem)) { | |
472 | err = PTR_ERR(mmr->umem); | |
4ff0acca | 473 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ |
9376932d MB |
474 | mmr->umem = NULL; |
475 | goto release_mpt_entry; | |
476 | } | |
477 | n = ib_umem_page_count(mmr->umem); | |
3e7e1193 | 478 | shift = mmr->umem->page_shift; |
9376932d | 479 | |
9376932d MB |
480 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
481 | virt_addr, length, n, shift, | |
482 | *pmpt_entry); | |
483 | if (err) { | |
484 | ib_umem_release(mmr->umem); | |
485 | goto release_mpt_entry; | |
486 | } | |
4ff0acca MB |
487 | mmr->mmr.iova = virt_addr; |
488 | mmr->mmr.size = length; | |
9376932d MB |
489 | |
490 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | |
491 | if (err) { | |
492 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
493 | ib_umem_release(mmr->umem); | |
494 | goto release_mpt_entry; | |
495 | } | |
496 | } | |
497 | ||
498 | /* If we couldn't transfer the MR to the HCA, just remember to | |
499 | * return a failure. But dereg_mr will free the resources. | |
500 | */ | |
501 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | |
4ff0acca MB |
502 | if (!err && flags & IB_MR_REREG_ACCESS) |
503 | mmr->mmr.access = mr_access_flags; | |
9376932d MB |
504 | |
505 | release_mpt_entry: | |
506 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | |
507 | ||
508 | return err; | |
509 | } | |
510 | ||
1b2cd0fc SG |
511 | static int |
512 | mlx4_alloc_priv_pages(struct ib_device *device, | |
513 | struct mlx4_ib_mr *mr, | |
514 | int max_pages) | |
515 | { | |
1b2cd0fc SG |
516 | int ret; |
517 | ||
cbc9355a CL |
518 | /* Ensure that size is aligned to DMA cacheline |
519 | * requirements. | |
520 | * max_pages is limited to MLX4_MAX_FAST_REG_PAGES | |
521 | * so page_map_size will never cross PAGE_SIZE. | |
522 | */ | |
523 | mr->page_map_size = roundup(max_pages * sizeof(u64), | |
524 | MLX4_MR_PAGES_ALIGN); | |
1b2cd0fc | 525 | |
cbc9355a CL |
526 | /* Prevent cross page boundary allocation. */ |
527 | mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); | |
528 | if (!mr->pages) | |
1b2cd0fc SG |
529 | return -ENOMEM; |
530 | ||
d66c88a8 | 531 | mr->page_map = dma_map_single(device->dev.parent, mr->pages, |
cbc9355a | 532 | mr->page_map_size, DMA_TO_DEVICE); |
1b2cd0fc | 533 | |
d66c88a8 | 534 | if (dma_mapping_error(device->dev.parent, mr->page_map)) { |
1b2cd0fc SG |
535 | ret = -ENOMEM; |
536 | goto err; | |
537 | } | |
538 | ||
539 | return 0; | |
1b2cd0fc | 540 | |
cbc9355a CL |
541 | err: |
542 | free_page((unsigned long)mr->pages); | |
1b2cd0fc SG |
543 | return ret; |
544 | } | |
545 | ||
546 | static void | |
547 | mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |
548 | { | |
549 | if (mr->pages) { | |
550 | struct ib_device *device = mr->ibmr.device; | |
1b2cd0fc | 551 | |
d66c88a8 | 552 | dma_unmap_single(device->dev.parent, mr->page_map, |
cbc9355a CL |
553 | mr->page_map_size, DMA_TO_DEVICE); |
554 | free_page((unsigned long)mr->pages); | |
1b2cd0fc SG |
555 | mr->pages = NULL; |
556 | } | |
557 | } | |
558 | ||
225c7b1f RD |
559 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
560 | { | |
561 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
61083720 | 562 | int ret; |
225c7b1f | 563 | |
1b2cd0fc SG |
564 | mlx4_free_priv_pages(mr); |
565 | ||
61083720 SM |
566 | ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); |
567 | if (ret) | |
568 | return ret; | |
225c7b1f RD |
569 | if (mr->umem) |
570 | ib_umem_release(mr->umem); | |
571 | kfree(mr); | |
572 | ||
573 | return 0; | |
574 | } | |
8ad11fb6 | 575 | |
b2a239df MB |
576 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
577 | struct ib_udata *udata) | |
804d6a89 SM |
578 | { |
579 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
580 | struct mlx4_ib_mw *mw; | |
581 | int err; | |
582 | ||
583 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); | |
584 | if (!mw) | |
585 | return ERR_PTR(-ENOMEM); | |
586 | ||
587 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, | |
588 | to_mlx4_type(type), &mw->mmw); | |
589 | if (err) | |
590 | goto err_free; | |
591 | ||
592 | err = mlx4_mw_enable(dev->dev, &mw->mmw); | |
593 | if (err) | |
594 | goto err_mw; | |
595 | ||
596 | mw->ibmw.rkey = mw->mmw.key; | |
597 | ||
598 | return &mw->ibmw; | |
599 | ||
600 | err_mw: | |
601 | mlx4_mw_free(dev->dev, &mw->mmw); | |
602 | ||
603 | err_free: | |
604 | kfree(mw); | |
605 | ||
606 | return ERR_PTR(err); | |
607 | } | |
608 | ||
609 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) | |
610 | { | |
611 | struct mlx4_ib_mw *mw = to_mmw(ibmw); | |
612 | ||
613 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | |
614 | kfree(mw); | |
615 | ||
616 | return 0; | |
617 | } | |
618 | ||
679e34d1 SG |
619 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
620 | enum ib_mr_type mr_type, | |
621 | u32 max_num_sg) | |
95d04f07 RD |
622 | { |
623 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
624 | struct mlx4_ib_mr *mr; | |
625 | int err; | |
626 | ||
679e34d1 SG |
627 | if (mr_type != IB_MR_TYPE_MEM_REG || |
628 | max_num_sg > MLX4_MAX_FAST_REG_PAGES) | |
629 | return ERR_PTR(-EINVAL); | |
630 | ||
1b2cd0fc | 631 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
95d04f07 RD |
632 | if (!mr) |
633 | return ERR_PTR(-ENOMEM); | |
634 | ||
635 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, | |
679e34d1 | 636 | max_num_sg, 0, &mr->mmr); |
95d04f07 RD |
637 | if (err) |
638 | goto err_free; | |
639 | ||
1b2cd0fc SG |
640 | err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); |
641 | if (err) | |
642 | goto err_free_mr; | |
643 | ||
644 | mr->max_pages = max_num_sg; | |
95d04f07 RD |
645 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
646 | if (err) | |
1b2cd0fc | 647 | goto err_free_pl; |
95d04f07 | 648 | |
4c246edd | 649 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
7f3abf5c | 650 | mr->umem = NULL; |
4c246edd | 651 | |
95d04f07 RD |
652 | return &mr->ibmr; |
653 | ||
1b2cd0fc | 654 | err_free_pl: |
5a371cf8 | 655 | mr->ibmr.device = pd->device; |
1b2cd0fc SG |
656 | mlx4_free_priv_pages(mr); |
657 | err_free_mr: | |
61083720 | 658 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
95d04f07 RD |
659 | err_free: |
660 | kfree(mr); | |
661 | return ERR_PTR(err); | |
662 | } | |
663 | ||
8ad11fb6 JM |
664 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
665 | struct ib_fmr_attr *fmr_attr) | |
666 | { | |
667 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
668 | struct mlx4_ib_fmr *fmr; | |
669 | int err = -ENOMEM; | |
670 | ||
671 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | |
672 | if (!fmr) | |
673 | return ERR_PTR(-ENOMEM); | |
674 | ||
675 | err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), | |
676 | fmr_attr->max_pages, fmr_attr->max_maps, | |
677 | fmr_attr->page_shift, &fmr->mfmr); | |
678 | if (err) | |
679 | goto err_free; | |
680 | ||
e6028c0e | 681 | err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); |
8ad11fb6 JM |
682 | if (err) |
683 | goto err_mr; | |
684 | ||
685 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; | |
686 | ||
687 | return &fmr->ibfmr; | |
688 | ||
689 | err_mr: | |
61083720 | 690 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
8ad11fb6 JM |
691 | |
692 | err_free: | |
693 | kfree(fmr); | |
694 | ||
695 | return ERR_PTR(err); | |
696 | } | |
697 | ||
698 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |
699 | int npages, u64 iova) | |
700 | { | |
701 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
702 | struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); | |
703 | ||
704 | return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, | |
705 | &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
706 | } | |
707 | ||
708 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list) | |
709 | { | |
710 | struct ib_fmr *ibfmr; | |
711 | int err; | |
712 | struct mlx4_dev *mdev = NULL; | |
713 | ||
714 | list_for_each_entry(ibfmr, fmr_list, list) { | |
715 | if (mdev && to_mdev(ibfmr->device)->dev != mdev) | |
716 | return -EINVAL; | |
717 | mdev = to_mdev(ibfmr->device)->dev; | |
718 | } | |
719 | ||
720 | if (!mdev) | |
721 | return 0; | |
722 | ||
723 | list_for_each_entry(ibfmr, fmr_list, list) { | |
724 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
725 | ||
726 | mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
727 | } | |
728 | ||
729 | /* | |
730 | * Make sure all MPT status updates are visible before issuing | |
731 | * SYNC_TPT firmware command. | |
732 | */ | |
733 | wmb(); | |
734 | ||
735 | err = mlx4_SYNC_TPT(mdev); | |
736 | if (err) | |
987c8f8f | 737 | pr_warn("SYNC_TPT error %d when " |
8ad11fb6 JM |
738 | "unmapping FMRs\n", err); |
739 | ||
740 | return 0; | |
741 | } | |
742 | ||
743 | int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) | |
744 | { | |
745 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
746 | struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); | |
747 | int err; | |
748 | ||
749 | err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); | |
750 | ||
751 | if (!err) | |
752 | kfree(ifmr); | |
753 | ||
754 | return err; | |
755 | } | |
1b2cd0fc SG |
756 | |
757 | static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) | |
758 | { | |
759 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
760 | ||
761 | if (unlikely(mr->npages == mr->max_pages)) | |
762 | return -ENOMEM; | |
763 | ||
764 | mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
ff2ba993 | 769 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 770 | unsigned int *sg_offset) |
1b2cd0fc SG |
771 | { |
772 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
773 | int rc; | |
774 | ||
775 | mr->npages = 0; | |
776 | ||
777 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | |
cbc9355a | 778 | mr->page_map_size, DMA_TO_DEVICE); |
1b2cd0fc | 779 | |
ff2ba993 | 780 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
1b2cd0fc SG |
781 | |
782 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | |
cbc9355a | 783 | mr->page_map_size, DMA_TO_DEVICE); |
1b2cd0fc SG |
784 | |
785 | return rc; | |
786 | } |