]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/ehca/ehca_mrmw.c
IB/ehca: MR/MW structure refactoring
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <rdma/ib_umem.h>
43
44 #include <asm/current.h>
45
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "hcp_if.h"
49 #include "hipz_hw.h"
50
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53 /* max number of rpages (per hcall register_rpages) */
54 #define MAX_RPAGES 512
55
56 static struct kmem_cache *mr_cache;
57 static struct kmem_cache *mw_cache;
58
59 static struct ehca_mr *ehca_mr_new(void)
60 {
61 struct ehca_mr *me;
62
63 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
64 if (me) {
65 spin_lock_init(&me->mrlock);
66 } else
67 ehca_gen_err("alloc failed");
68
69 return me;
70 }
71
72 static void ehca_mr_delete(struct ehca_mr *me)
73 {
74 kmem_cache_free(mr_cache, me);
75 }
76
77 static struct ehca_mw *ehca_mw_new(void)
78 {
79 struct ehca_mw *me;
80
81 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
82 if (me) {
83 spin_lock_init(&me->mwlock);
84 } else
85 ehca_gen_err("alloc failed");
86
87 return me;
88 }
89
90 static void ehca_mw_delete(struct ehca_mw *me)
91 {
92 kmem_cache_free(mw_cache, me);
93 }
94
95 /*----------------------------------------------------------------------*/
96
97 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
98 {
99 struct ib_mr *ib_mr;
100 int ret;
101 struct ehca_mr *e_maxmr;
102 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
103 struct ehca_shca *shca =
104 container_of(pd->device, struct ehca_shca, ib_device);
105
106 if (shca->maxmr) {
107 e_maxmr = ehca_mr_new();
108 if (!e_maxmr) {
109 ehca_err(&shca->ib_device, "out of memory");
110 ib_mr = ERR_PTR(-ENOMEM);
111 goto get_dma_mr_exit0;
112 }
113
114 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
115 mr_access_flags, e_pd,
116 &e_maxmr->ib.ib_mr.lkey,
117 &e_maxmr->ib.ib_mr.rkey);
118 if (ret) {
119 ehca_mr_delete(e_maxmr);
120 ib_mr = ERR_PTR(ret);
121 goto get_dma_mr_exit0;
122 }
123 ib_mr = &e_maxmr->ib.ib_mr;
124 } else {
125 ehca_err(&shca->ib_device, "no internal max-MR exist!");
126 ib_mr = ERR_PTR(-EINVAL);
127 goto get_dma_mr_exit0;
128 }
129
130 get_dma_mr_exit0:
131 if (IS_ERR(ib_mr))
132 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
133 PTR_ERR(ib_mr), pd, mr_access_flags);
134 return ib_mr;
135 } /* end ehca_get_dma_mr() */
136
137 /*----------------------------------------------------------------------*/
138
139 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
140 struct ib_phys_buf *phys_buf_array,
141 int num_phys_buf,
142 int mr_access_flags,
143 u64 *iova_start)
144 {
145 struct ib_mr *ib_mr;
146 int ret;
147 struct ehca_mr *e_mr;
148 struct ehca_shca *shca =
149 container_of(pd->device, struct ehca_shca, ib_device);
150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
151
152 u64 size;
153
154 if ((num_phys_buf <= 0) || !phys_buf_array) {
155 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
156 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
157 ib_mr = ERR_PTR(-EINVAL);
158 goto reg_phys_mr_exit0;
159 }
160 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
161 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
162 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
163 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
164 /*
165 * Remote Write Access requires Local Write Access
166 * Remote Atomic Access requires Local Write Access
167 */
168 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
169 mr_access_flags);
170 ib_mr = ERR_PTR(-EINVAL);
171 goto reg_phys_mr_exit0;
172 }
173
174 /* check physical buffer list and calculate size */
175 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
176 iova_start, &size);
177 if (ret) {
178 ib_mr = ERR_PTR(ret);
179 goto reg_phys_mr_exit0;
180 }
181 if ((size == 0) ||
182 (((u64)iova_start + size) < (u64)iova_start)) {
183 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
184 size, iova_start);
185 ib_mr = ERR_PTR(-EINVAL);
186 goto reg_phys_mr_exit0;
187 }
188
189 e_mr = ehca_mr_new();
190 if (!e_mr) {
191 ehca_err(pd->device, "out of memory");
192 ib_mr = ERR_PTR(-ENOMEM);
193 goto reg_phys_mr_exit0;
194 }
195
196 /* register MR on HCA */
197 if (ehca_mr_is_maxmr(size, iova_start)) {
198 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
199 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
200 e_pd, &e_mr->ib.ib_mr.lkey,
201 &e_mr->ib.ib_mr.rkey);
202 if (ret) {
203 ib_mr = ERR_PTR(ret);
204 goto reg_phys_mr_exit1;
205 }
206 } else {
207 struct ehca_mr_pginfo pginfo;
208 u32 num_kpages;
209 u32 num_hwpages;
210
211 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
212 PAGE_SIZE);
213 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
214 size, EHCA_PAGESIZE);
215 memset(&pginfo, 0, sizeof(pginfo));
216 pginfo.type = EHCA_MR_PGI_PHYS;
217 pginfo.num_kpages = num_kpages;
218 pginfo.num_hwpages = num_hwpages;
219 pginfo.u.phy.num_phys_buf = num_phys_buf;
220 pginfo.u.phy.phys_buf_array = phys_buf_array;
221 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
222 EHCA_PAGESIZE);
223
224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
226 &e_mr->ib.ib_mr.rkey);
227 if (ret) {
228 ib_mr = ERR_PTR(ret);
229 goto reg_phys_mr_exit1;
230 }
231 }
232
233 /* successful registration of all pages */
234 return &e_mr->ib.ib_mr;
235
236 reg_phys_mr_exit1:
237 ehca_mr_delete(e_mr);
238 reg_phys_mr_exit0:
239 if (IS_ERR(ib_mr))
240 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
241 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
242 PTR_ERR(ib_mr), pd, phys_buf_array,
243 num_phys_buf, mr_access_flags, iova_start);
244 return ib_mr;
245 } /* end ehca_reg_phys_mr() */
246
247 /*----------------------------------------------------------------------*/
248
249 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
250 int mr_access_flags, struct ib_udata *udata)
251 {
252 struct ib_mr *ib_mr;
253 struct ehca_mr *e_mr;
254 struct ehca_shca *shca =
255 container_of(pd->device, struct ehca_shca, ib_device);
256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
257 struct ehca_mr_pginfo pginfo;
258 int ret;
259 u32 num_kpages;
260 u32 num_hwpages;
261
262 if (!pd) {
263 ehca_gen_err("bad pd=%p", pd);
264 return ERR_PTR(-EFAULT);
265 }
266
267 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
268 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
269 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
270 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
271 /*
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
274 */
275 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
276 mr_access_flags);
277 ib_mr = ERR_PTR(-EINVAL);
278 goto reg_user_mr_exit0;
279 }
280
281 if (length == 0 || virt + length < virt) {
282 ehca_err(pd->device, "bad input values: length=%lx "
283 "virt_base=%lx", length, virt);
284 ib_mr = ERR_PTR(-EINVAL);
285 goto reg_user_mr_exit0;
286 }
287
288 e_mr = ehca_mr_new();
289 if (!e_mr) {
290 ehca_err(pd->device, "out of memory");
291 ib_mr = ERR_PTR(-ENOMEM);
292 goto reg_user_mr_exit0;
293 }
294
295 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
296 mr_access_flags);
297 if (IS_ERR(e_mr->umem)) {
298 ib_mr = (void *) e_mr->umem;
299 goto reg_user_mr_exit1;
300 }
301
302 if (e_mr->umem->page_size != PAGE_SIZE) {
303 ehca_err(pd->device, "page size not supported, "
304 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
305 ib_mr = ERR_PTR(-EINVAL);
306 goto reg_user_mr_exit2;
307 }
308
309 /* determine number of MR pages */
310 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
311 num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
312 EHCA_PAGESIZE);
313
314 /* register MR on HCA */
315 memset(&pginfo, 0, sizeof(pginfo));
316 pginfo.type = EHCA_MR_PGI_USER;
317 pginfo.num_kpages = num_kpages;
318 pginfo.num_hwpages = num_hwpages;
319 pginfo.u.usr.region = e_mr->umem;
320 pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
321 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
322 (&e_mr->umem->chunk_list),
323 list);
324
325 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
326 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
327 if (ret) {
328 ib_mr = ERR_PTR(ret);
329 goto reg_user_mr_exit2;
330 }
331
332 /* successful registration of all pages */
333 return &e_mr->ib.ib_mr;
334
335 reg_user_mr_exit2:
336 ib_umem_release(e_mr->umem);
337 reg_user_mr_exit1:
338 ehca_mr_delete(e_mr);
339 reg_user_mr_exit0:
340 if (IS_ERR(ib_mr))
341 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
342 " udata=%p",
343 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
344 return ib_mr;
345 } /* end ehca_reg_user_mr() */
346
347 /*----------------------------------------------------------------------*/
348
349 int ehca_rereg_phys_mr(struct ib_mr *mr,
350 int mr_rereg_mask,
351 struct ib_pd *pd,
352 struct ib_phys_buf *phys_buf_array,
353 int num_phys_buf,
354 int mr_access_flags,
355 u64 *iova_start)
356 {
357 int ret;
358
359 struct ehca_shca *shca =
360 container_of(mr->device, struct ehca_shca, ib_device);
361 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
362 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
363 u64 new_size;
364 u64 *new_start;
365 u32 new_acl;
366 struct ehca_pd *new_pd;
367 u32 tmp_lkey, tmp_rkey;
368 unsigned long sl_flags;
369 u32 num_kpages = 0;
370 u32 num_hwpages = 0;
371 struct ehca_mr_pginfo pginfo;
372 u32 cur_pid = current->tgid;
373
374 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
375 (my_pd->ownpid != cur_pid)) {
376 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
377 cur_pid, my_pd->ownpid);
378 ret = -EINVAL;
379 goto rereg_phys_mr_exit0;
380 }
381
382 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
383 /* TODO not supported, because PHYP rereg hCall needs pages */
384 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
385 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
386 ret = -EINVAL;
387 goto rereg_phys_mr_exit0;
388 }
389
390 if (mr_rereg_mask & IB_MR_REREG_PD) {
391 if (!pd) {
392 ehca_err(mr->device, "rereg with bad pd, pd=%p "
393 "mr_rereg_mask=%x", pd, mr_rereg_mask);
394 ret = -EINVAL;
395 goto rereg_phys_mr_exit0;
396 }
397 }
398
399 if ((mr_rereg_mask &
400 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
401 (mr_rereg_mask == 0)) {
402 ret = -EINVAL;
403 goto rereg_phys_mr_exit0;
404 }
405
406 /* check other parameters */
407 if (e_mr == shca->maxmr) {
408 /* should be impossible, however reject to be sure */
409 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
410 "shca->maxmr=%p mr->lkey=%x",
411 mr, shca->maxmr, mr->lkey);
412 ret = -EINVAL;
413 goto rereg_phys_mr_exit0;
414 }
415 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
416 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
417 ehca_err(mr->device, "not supported for FMR, mr=%p "
418 "flags=%x", mr, e_mr->flags);
419 ret = -EINVAL;
420 goto rereg_phys_mr_exit0;
421 }
422 if (!phys_buf_array || num_phys_buf <= 0) {
423 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
424 " phys_buf_array=%p num_phys_buf=%x",
425 mr_rereg_mask, phys_buf_array, num_phys_buf);
426 ret = -EINVAL;
427 goto rereg_phys_mr_exit0;
428 }
429 }
430 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
431 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
432 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
433 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
434 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
435 /*
436 * Remote Write Access requires Local Write Access
437 * Remote Atomic Access requires Local Write Access
438 */
439 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
440 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
441 ret = -EINVAL;
442 goto rereg_phys_mr_exit0;
443 }
444
445 /* set requested values dependent on rereg request */
446 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
447 new_start = e_mr->start; /* new == old address */
448 new_size = e_mr->size; /* new == old length */
449 new_acl = e_mr->acl; /* new == old access control */
450 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
451
452 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
453 new_start = iova_start; /* change address */
454 /* check physical buffer list and calculate size */
455 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
456 num_phys_buf, iova_start,
457 &new_size);
458 if (ret)
459 goto rereg_phys_mr_exit1;
460 if ((new_size == 0) ||
461 (((u64)iova_start + new_size) < (u64)iova_start)) {
462 ehca_err(mr->device, "bad input values: new_size=%lx "
463 "iova_start=%p", new_size, iova_start);
464 ret = -EINVAL;
465 goto rereg_phys_mr_exit1;
466 }
467 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
468 new_size, PAGE_SIZE);
469 num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
470 new_size, EHCA_PAGESIZE);
471 memset(&pginfo, 0, sizeof(pginfo));
472 pginfo.type = EHCA_MR_PGI_PHYS;
473 pginfo.num_kpages = num_kpages;
474 pginfo.num_hwpages = num_hwpages;
475 pginfo.u.phy.num_phys_buf = num_phys_buf;
476 pginfo.u.phy.phys_buf_array = phys_buf_array;
477 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
478 EHCA_PAGESIZE);
479 }
480 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
481 new_acl = mr_access_flags;
482 if (mr_rereg_mask & IB_MR_REREG_PD)
483 new_pd = container_of(pd, struct ehca_pd, ib_pd);
484
485 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
486 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
487 if (ret)
488 goto rereg_phys_mr_exit1;
489
490 /* successful reregistration */
491 if (mr_rereg_mask & IB_MR_REREG_PD)
492 mr->pd = pd;
493 mr->lkey = tmp_lkey;
494 mr->rkey = tmp_rkey;
495
496 rereg_phys_mr_exit1:
497 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
498 rereg_phys_mr_exit0:
499 if (ret)
500 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
501 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
502 "iova_start=%p",
503 ret, mr, mr_rereg_mask, pd, phys_buf_array,
504 num_phys_buf, mr_access_flags, iova_start);
505 return ret;
506 } /* end ehca_rereg_phys_mr() */
507
508 /*----------------------------------------------------------------------*/
509
510 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
511 {
512 int ret = 0;
513 u64 h_ret;
514 struct ehca_shca *shca =
515 container_of(mr->device, struct ehca_shca, ib_device);
516 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
517 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
518 u32 cur_pid = current->tgid;
519 unsigned long sl_flags;
520 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
521
522 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
523 (my_pd->ownpid != cur_pid)) {
524 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
525 cur_pid, my_pd->ownpid);
526 ret = -EINVAL;
527 goto query_mr_exit0;
528 }
529
530 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
531 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
532 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
533 ret = -EINVAL;
534 goto query_mr_exit0;
535 }
536
537 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
538 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
539
540 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
541 if (h_ret != H_SUCCESS) {
542 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
543 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
544 h_ret, mr, shca->ipz_hca_handle.handle,
545 e_mr->ipz_mr_handle.handle, mr->lkey);
546 ret = ehca2ib_return_code(h_ret);
547 goto query_mr_exit1;
548 }
549 mr_attr->pd = mr->pd;
550 mr_attr->device_virt_addr = hipzout.vaddr;
551 mr_attr->size = hipzout.len;
552 mr_attr->lkey = hipzout.lkey;
553 mr_attr->rkey = hipzout.rkey;
554 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
555
556 query_mr_exit1:
557 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
558 query_mr_exit0:
559 if (ret)
560 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
561 ret, mr, mr_attr);
562 return ret;
563 } /* end ehca_query_mr() */
564
565 /*----------------------------------------------------------------------*/
566
567 int ehca_dereg_mr(struct ib_mr *mr)
568 {
569 int ret = 0;
570 u64 h_ret;
571 struct ehca_shca *shca =
572 container_of(mr->device, struct ehca_shca, ib_device);
573 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
574 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
575 u32 cur_pid = current->tgid;
576
577 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
578 (my_pd->ownpid != cur_pid)) {
579 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
580 cur_pid, my_pd->ownpid);
581 ret = -EINVAL;
582 goto dereg_mr_exit0;
583 }
584
585 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
586 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
587 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
588 ret = -EINVAL;
589 goto dereg_mr_exit0;
590 } else if (e_mr == shca->maxmr) {
591 /* should be impossible, however reject to be sure */
592 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
593 "shca->maxmr=%p mr->lkey=%x",
594 mr, shca->maxmr, mr->lkey);
595 ret = -EINVAL;
596 goto dereg_mr_exit0;
597 }
598
599 /* TODO: BUSY: MR still has bound window(s) */
600 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
601 if (h_ret != H_SUCCESS) {
602 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
603 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
604 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
605 e_mr->ipz_mr_handle.handle, mr->lkey);
606 ret = ehca2ib_return_code(h_ret);
607 goto dereg_mr_exit0;
608 }
609
610 if (e_mr->umem)
611 ib_umem_release(e_mr->umem);
612
613 /* successful deregistration */
614 ehca_mr_delete(e_mr);
615
616 dereg_mr_exit0:
617 if (ret)
618 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
619 return ret;
620 } /* end ehca_dereg_mr() */
621
622 /*----------------------------------------------------------------------*/
623
624 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
625 {
626 struct ib_mw *ib_mw;
627 u64 h_ret;
628 struct ehca_mw *e_mw;
629 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
630 struct ehca_shca *shca =
631 container_of(pd->device, struct ehca_shca, ib_device);
632 struct ehca_mw_hipzout_parms hipzout = {{0},0};
633
634 e_mw = ehca_mw_new();
635 if (!e_mw) {
636 ib_mw = ERR_PTR(-ENOMEM);
637 goto alloc_mw_exit0;
638 }
639
640 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
641 e_pd->fw_pd, &hipzout);
642 if (h_ret != H_SUCCESS) {
643 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
644 "shca=%p hca_hndl=%lx mw=%p",
645 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
646 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
647 goto alloc_mw_exit1;
648 }
649 /* successful MW allocation */
650 e_mw->ipz_mw_handle = hipzout.handle;
651 e_mw->ib_mw.rkey = hipzout.rkey;
652 return &e_mw->ib_mw;
653
654 alloc_mw_exit1:
655 ehca_mw_delete(e_mw);
656 alloc_mw_exit0:
657 if (IS_ERR(ib_mw))
658 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
659 return ib_mw;
660 } /* end ehca_alloc_mw() */
661
662 /*----------------------------------------------------------------------*/
663
664 int ehca_bind_mw(struct ib_qp *qp,
665 struct ib_mw *mw,
666 struct ib_mw_bind *mw_bind)
667 {
668 /* TODO: not supported up to now */
669 ehca_gen_err("bind MW currently not supported by HCAD");
670
671 return -EPERM;
672 } /* end ehca_bind_mw() */
673
674 /*----------------------------------------------------------------------*/
675
676 int ehca_dealloc_mw(struct ib_mw *mw)
677 {
678 u64 h_ret;
679 struct ehca_shca *shca =
680 container_of(mw->device, struct ehca_shca, ib_device);
681 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
682
683 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
684 if (h_ret != H_SUCCESS) {
685 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
686 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
687 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
688 e_mw->ipz_mw_handle.handle);
689 return ehca2ib_return_code(h_ret);
690 }
691 /* successful deallocation */
692 ehca_mw_delete(e_mw);
693 return 0;
694 } /* end ehca_dealloc_mw() */
695
696 /*----------------------------------------------------------------------*/
697
698 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
699 int mr_access_flags,
700 struct ib_fmr_attr *fmr_attr)
701 {
702 struct ib_fmr *ib_fmr;
703 struct ehca_shca *shca =
704 container_of(pd->device, struct ehca_shca, ib_device);
705 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
706 struct ehca_mr *e_fmr;
707 int ret;
708 u32 tmp_lkey, tmp_rkey;
709 struct ehca_mr_pginfo pginfo;
710
711 /* check other parameters */
712 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
713 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
714 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
715 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
716 /*
717 * Remote Write Access requires Local Write Access
718 * Remote Atomic Access requires Local Write Access
719 */
720 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
721 mr_access_flags);
722 ib_fmr = ERR_PTR(-EINVAL);
723 goto alloc_fmr_exit0;
724 }
725 if (mr_access_flags & IB_ACCESS_MW_BIND) {
726 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
727 mr_access_flags);
728 ib_fmr = ERR_PTR(-EINVAL);
729 goto alloc_fmr_exit0;
730 }
731 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
732 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
733 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
734 fmr_attr->max_pages, fmr_attr->max_maps,
735 fmr_attr->page_shift);
736 ib_fmr = ERR_PTR(-EINVAL);
737 goto alloc_fmr_exit0;
738 }
739 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
740 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
741 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
742 fmr_attr->page_shift);
743 ib_fmr = ERR_PTR(-EINVAL);
744 goto alloc_fmr_exit0;
745 }
746
747 e_fmr = ehca_mr_new();
748 if (!e_fmr) {
749 ib_fmr = ERR_PTR(-ENOMEM);
750 goto alloc_fmr_exit0;
751 }
752 e_fmr->flags |= EHCA_MR_FLAG_FMR;
753
754 /* register MR on HCA */
755 memset(&pginfo, 0, sizeof(pginfo));
756 ret = ehca_reg_mr(shca, e_fmr, NULL,
757 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
758 mr_access_flags, e_pd, &pginfo,
759 &tmp_lkey, &tmp_rkey);
760 if (ret) {
761 ib_fmr = ERR_PTR(ret);
762 goto alloc_fmr_exit1;
763 }
764
765 /* successful */
766 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
767 e_fmr->fmr_max_pages = fmr_attr->max_pages;
768 e_fmr->fmr_max_maps = fmr_attr->max_maps;
769 e_fmr->fmr_map_cnt = 0;
770 return &e_fmr->ib.ib_fmr;
771
772 alloc_fmr_exit1:
773 ehca_mr_delete(e_fmr);
774 alloc_fmr_exit0:
775 if (IS_ERR(ib_fmr))
776 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
777 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
778 mr_access_flags, fmr_attr);
779 return ib_fmr;
780 } /* end ehca_alloc_fmr() */
781
782 /*----------------------------------------------------------------------*/
783
784 int ehca_map_phys_fmr(struct ib_fmr *fmr,
785 u64 *page_list,
786 int list_len,
787 u64 iova)
788 {
789 int ret;
790 struct ehca_shca *shca =
791 container_of(fmr->device, struct ehca_shca, ib_device);
792 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
793 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
794 struct ehca_mr_pginfo pginfo;
795 u32 tmp_lkey, tmp_rkey;
796
797 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
798 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
799 e_fmr, e_fmr->flags);
800 ret = -EINVAL;
801 goto map_phys_fmr_exit0;
802 }
803 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
804 if (ret)
805 goto map_phys_fmr_exit0;
806 if (iova % e_fmr->fmr_page_size) {
807 /* only whole-numbered pages */
808 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
809 iova, e_fmr->fmr_page_size);
810 ret = -EINVAL;
811 goto map_phys_fmr_exit0;
812 }
813 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
814 /* HCAD does not limit the maps, however trace this anyway */
815 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
816 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
817 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
818 }
819
820 memset(&pginfo, 0, sizeof(pginfo));
821 pginfo.type = EHCA_MR_PGI_FMR;
822 pginfo.num_kpages = list_len;
823 pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
824 pginfo.u.fmr.page_list = page_list;
825 pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
826 EHCA_PAGESIZE);
827
828 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
829 list_len * e_fmr->fmr_page_size,
830 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
831 if (ret)
832 goto map_phys_fmr_exit0;
833
834 /* successful reregistration */
835 e_fmr->fmr_map_cnt++;
836 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
837 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
838 return 0;
839
840 map_phys_fmr_exit0:
841 if (ret)
842 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
843 "iova=%lx",
844 ret, fmr, page_list, list_len, iova);
845 return ret;
846 } /* end ehca_map_phys_fmr() */
847
848 /*----------------------------------------------------------------------*/
849
850 int ehca_unmap_fmr(struct list_head *fmr_list)
851 {
852 int ret = 0;
853 struct ib_fmr *ib_fmr;
854 struct ehca_shca *shca = NULL;
855 struct ehca_shca *prev_shca;
856 struct ehca_mr *e_fmr;
857 u32 num_fmr = 0;
858 u32 unmap_fmr_cnt = 0;
859
860 /* check all FMR belong to same SHCA, and check internal flag */
861 list_for_each_entry(ib_fmr, fmr_list, list) {
862 prev_shca = shca;
863 if (!ib_fmr) {
864 ehca_gen_err("bad fmr=%p in list", ib_fmr);
865 ret = -EINVAL;
866 goto unmap_fmr_exit0;
867 }
868 shca = container_of(ib_fmr->device, struct ehca_shca,
869 ib_device);
870 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
871 if ((shca != prev_shca) && prev_shca) {
872 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
873 "prev_shca=%p e_fmr=%p",
874 shca, prev_shca, e_fmr);
875 ret = -EINVAL;
876 goto unmap_fmr_exit0;
877 }
878 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
879 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
880 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
881 ret = -EINVAL;
882 goto unmap_fmr_exit0;
883 }
884 num_fmr++;
885 }
886
887 /* loop over all FMRs to unmap */
888 list_for_each_entry(ib_fmr, fmr_list, list) {
889 unmap_fmr_cnt++;
890 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
891 shca = container_of(ib_fmr->device, struct ehca_shca,
892 ib_device);
893 ret = ehca_unmap_one_fmr(shca, e_fmr);
894 if (ret) {
895 /* unmap failed, stop unmapping of rest of FMRs */
896 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
897 "stop rest, e_fmr=%p num_fmr=%x "
898 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
899 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
900 goto unmap_fmr_exit0;
901 }
902 }
903
904 unmap_fmr_exit0:
905 if (ret)
906 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
907 ret, fmr_list, num_fmr, unmap_fmr_cnt);
908 return ret;
909 } /* end ehca_unmap_fmr() */
910
911 /*----------------------------------------------------------------------*/
912
913 int ehca_dealloc_fmr(struct ib_fmr *fmr)
914 {
915 int ret;
916 u64 h_ret;
917 struct ehca_shca *shca =
918 container_of(fmr->device, struct ehca_shca, ib_device);
919 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
920
921 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
922 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
923 e_fmr, e_fmr->flags);
924 ret = -EINVAL;
925 goto free_fmr_exit0;
926 }
927
928 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
929 if (h_ret != H_SUCCESS) {
930 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
931 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
932 h_ret, e_fmr, shca->ipz_hca_handle.handle,
933 e_fmr->ipz_mr_handle.handle, fmr->lkey);
934 ret = ehca2ib_return_code(h_ret);
935 goto free_fmr_exit0;
936 }
937 /* successful deregistration */
938 ehca_mr_delete(e_fmr);
939 return 0;
940
941 free_fmr_exit0:
942 if (ret)
943 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
944 return ret;
945 } /* end ehca_dealloc_fmr() */
946
947 /*----------------------------------------------------------------------*/
948
949 int ehca_reg_mr(struct ehca_shca *shca,
950 struct ehca_mr *e_mr,
951 u64 *iova_start,
952 u64 size,
953 int acl,
954 struct ehca_pd *e_pd,
955 struct ehca_mr_pginfo *pginfo,
956 u32 *lkey, /*OUT*/
957 u32 *rkey) /*OUT*/
958 {
959 int ret;
960 u64 h_ret;
961 u32 hipz_acl;
962 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
963
964 ehca_mrmw_map_acl(acl, &hipz_acl);
965 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
966 if (ehca_use_hp_mr == 1)
967 hipz_acl |= 0x00000001;
968
969 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
970 (u64)iova_start, size, hipz_acl,
971 e_pd->fw_pd, &hipzout);
972 if (h_ret != H_SUCCESS) {
973 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
974 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
975 ret = ehca2ib_return_code(h_ret);
976 goto ehca_reg_mr_exit0;
977 }
978
979 e_mr->ipz_mr_handle = hipzout.handle;
980
981 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
982 if (ret)
983 goto ehca_reg_mr_exit1;
984
985 /* successful registration */
986 e_mr->num_kpages = pginfo->num_kpages;
987 e_mr->num_hwpages = pginfo->num_hwpages;
988 e_mr->start = iova_start;
989 e_mr->size = size;
990 e_mr->acl = acl;
991 *lkey = hipzout.lkey;
992 *rkey = hipzout.rkey;
993 return 0;
994
995 ehca_reg_mr_exit1:
996 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
997 if (h_ret != H_SUCCESS) {
998 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
999 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
1000 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
1001 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1002 hipzout.lkey, pginfo, pginfo->num_kpages,
1003 pginfo->num_hwpages, ret);
1004 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1005 "not recoverable");
1006 }
1007 ehca_reg_mr_exit0:
1008 if (ret)
1009 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1010 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1011 "num_kpages=%lx num_hwpages=%lx",
1012 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1013 pginfo->num_kpages, pginfo->num_hwpages);
1014 return ret;
1015 } /* end ehca_reg_mr() */
1016
1017 /*----------------------------------------------------------------------*/
1018
1019 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1020 struct ehca_mr *e_mr,
1021 struct ehca_mr_pginfo *pginfo)
1022 {
1023 int ret = 0;
1024 u64 h_ret;
1025 u32 rnum;
1026 u64 rpage;
1027 u32 i;
1028 u64 *kpage;
1029
1030 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1031 if (!kpage) {
1032 ehca_err(&shca->ib_device, "kpage alloc failed");
1033 ret = -ENOMEM;
1034 goto ehca_reg_mr_rpages_exit0;
1035 }
1036
1037 /* max 512 pages per shot */
1038 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1039
1040 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1041 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1042 if (rnum == 0)
1043 rnum = MAX_RPAGES; /* last shot is full */
1044 } else
1045 rnum = MAX_RPAGES;
1046
1047 if (rnum > 1) {
1048 ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1049 if (ret) {
1050 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1051 "bad rc, ret=%x rnum=%x kpage=%p",
1052 ret, rnum, kpage);
1053 ret = -EFAULT;
1054 goto ehca_reg_mr_rpages_exit1;
1055 }
1056 rpage = virt_to_abs(kpage);
1057 if (!rpage) {
1058 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1059 kpage, i);
1060 ret = -EFAULT;
1061 goto ehca_reg_mr_rpages_exit1;
1062 }
1063 } else { /* rnum==1 */
1064 ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1065 if (ret) {
1066 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1067 "bad rc, ret=%x i=%x", ret, i);
1068 ret = -EFAULT;
1069 goto ehca_reg_mr_rpages_exit1;
1070 }
1071 }
1072
1073 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1074 0, /* pagesize 4k */
1075 0, rpage, rnum);
1076
1077 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1078 /*
1079 * check for 'registration complete'==H_SUCCESS
1080 * and for 'page registered'==H_PAGE_REGISTERED
1081 */
1082 if (h_ret != H_SUCCESS) {
1083 ehca_err(&shca->ib_device, "last "
1084 "hipz_reg_rpage_mr failed, h_ret=%lx "
1085 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1086 " lkey=%x", h_ret, e_mr, i,
1087 shca->ipz_hca_handle.handle,
1088 e_mr->ipz_mr_handle.handle,
1089 e_mr->ib.ib_mr.lkey);
1090 ret = ehca2ib_return_code(h_ret);
1091 break;
1092 } else
1093 ret = 0;
1094 } else if (h_ret != H_PAGE_REGISTERED) {
1095 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1096 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1097 "mr_hndl=%lx", h_ret, e_mr, i,
1098 e_mr->ib.ib_mr.lkey,
1099 shca->ipz_hca_handle.handle,
1100 e_mr->ipz_mr_handle.handle);
1101 ret = ehca2ib_return_code(h_ret);
1102 break;
1103 } else
1104 ret = 0;
1105 } /* end for(i) */
1106
1107
1108 ehca_reg_mr_rpages_exit1:
1109 ehca_free_fw_ctrlblock(kpage);
1110 ehca_reg_mr_rpages_exit0:
1111 if (ret)
1112 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1113 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
1114 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1115 return ret;
1116 } /* end ehca_reg_mr_rpages() */
1117
1118 /*----------------------------------------------------------------------*/
1119
1120 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1121 struct ehca_mr *e_mr,
1122 u64 *iova_start,
1123 u64 size,
1124 u32 acl,
1125 struct ehca_pd *e_pd,
1126 struct ehca_mr_pginfo *pginfo,
1127 u32 *lkey, /*OUT*/
1128 u32 *rkey) /*OUT*/
1129 {
1130 int ret;
1131 u64 h_ret;
1132 u32 hipz_acl;
1133 u64 *kpage;
1134 u64 rpage;
1135 struct ehca_mr_pginfo pginfo_save;
1136 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1137
1138 ehca_mrmw_map_acl(acl, &hipz_acl);
1139 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1140
1141 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1142 if (!kpage) {
1143 ehca_err(&shca->ib_device, "kpage alloc failed");
1144 ret = -ENOMEM;
1145 goto ehca_rereg_mr_rereg1_exit0;
1146 }
1147
1148 pginfo_save = *pginfo;
1149 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_hwpages, kpage);
1150 if (ret) {
1151 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1152 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
1153 "kpage=%p", e_mr, pginfo, pginfo->type,
1154 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1155 goto ehca_rereg_mr_rereg1_exit1;
1156 }
1157 rpage = virt_to_abs(kpage);
1158 if (!rpage) {
1159 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1160 ret = -EFAULT;
1161 goto ehca_rereg_mr_rereg1_exit1;
1162 }
1163 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1164 (u64)iova_start, size, hipz_acl,
1165 e_pd->fw_pd, rpage, &hipzout);
1166 if (h_ret != H_SUCCESS) {
1167 /*
1168 * reregistration unsuccessful, try it again with the 3 hCalls,
1169 * e.g. this is required in case H_MR_CONDITION
1170 * (MW bound or MR is shared)
1171 */
1172 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1173 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1174 *pginfo = pginfo_save;
1175 ret = -EAGAIN;
1176 } else if ((u64*)hipzout.vaddr != iova_start) {
1177 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1178 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1179 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1180 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1181 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1182 ret = -EFAULT;
1183 } else {
1184 /*
1185 * successful reregistration
1186 * note: start and start_out are identical for eServer HCAs
1187 */
1188 e_mr->num_kpages = pginfo->num_kpages;
1189 e_mr->num_hwpages = pginfo->num_hwpages;
1190 e_mr->start = iova_start;
1191 e_mr->size = size;
1192 e_mr->acl = acl;
1193 *lkey = hipzout.lkey;
1194 *rkey = hipzout.rkey;
1195 }
1196
1197 ehca_rereg_mr_rereg1_exit1:
1198 ehca_free_fw_ctrlblock(kpage);
1199 ehca_rereg_mr_rereg1_exit0:
1200 if ( ret && (ret != -EAGAIN) )
1201 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1202 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
1203 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1204 pginfo->num_hwpages);
1205 return ret;
1206 } /* end ehca_rereg_mr_rereg1() */
1207
1208 /*----------------------------------------------------------------------*/
1209
1210 int ehca_rereg_mr(struct ehca_shca *shca,
1211 struct ehca_mr *e_mr,
1212 u64 *iova_start,
1213 u64 size,
1214 int acl,
1215 struct ehca_pd *e_pd,
1216 struct ehca_mr_pginfo *pginfo,
1217 u32 *lkey,
1218 u32 *rkey)
1219 {
1220 int ret = 0;
1221 u64 h_ret;
1222 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1223 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1224
1225 /* first determine reregistration hCall(s) */
1226 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1227 (e_mr->num_hwpages > MAX_RPAGES) ||
1228 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1229 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1230 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
1231 pginfo->num_hwpages, e_mr->num_hwpages);
1232 rereg_1_hcall = 0;
1233 rereg_3_hcall = 1;
1234 }
1235
1236 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1237 rereg_1_hcall = 0;
1238 rereg_3_hcall = 1;
1239 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1240 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1241 e_mr);
1242 }
1243
1244 if (rereg_1_hcall) {
1245 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1246 acl, e_pd, pginfo, lkey, rkey);
1247 if (ret) {
1248 if (ret == -EAGAIN)
1249 rereg_3_hcall = 1;
1250 else
1251 goto ehca_rereg_mr_exit0;
1252 }
1253 }
1254
1255 if (rereg_3_hcall) {
1256 struct ehca_mr save_mr;
1257
1258 /* first deregister old MR */
1259 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1260 if (h_ret != H_SUCCESS) {
1261 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1262 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1263 "mr->lkey=%x",
1264 h_ret, e_mr, shca->ipz_hca_handle.handle,
1265 e_mr->ipz_mr_handle.handle,
1266 e_mr->ib.ib_mr.lkey);
1267 ret = ehca2ib_return_code(h_ret);
1268 goto ehca_rereg_mr_exit0;
1269 }
1270 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1271 save_mr = *e_mr;
1272 ehca_mr_deletenew(e_mr);
1273
1274 /* set some MR values */
1275 e_mr->flags = save_mr.flags;
1276 e_mr->fmr_page_size = save_mr.fmr_page_size;
1277 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1278 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1279 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1280
1281 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1282 e_pd, pginfo, lkey, rkey);
1283 if (ret) {
1284 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1285 memcpy(&e_mr->flags, &(save_mr.flags),
1286 sizeof(struct ehca_mr) - offset);
1287 goto ehca_rereg_mr_exit0;
1288 }
1289 }
1290
1291 ehca_rereg_mr_exit0:
1292 if (ret)
1293 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1294 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1295 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1296 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1297 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1298 rereg_1_hcall, rereg_3_hcall);
1299 return ret;
1300 } /* end ehca_rereg_mr() */
1301
1302 /*----------------------------------------------------------------------*/
1303
1304 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1305 struct ehca_mr *e_fmr)
1306 {
1307 int ret = 0;
1308 u64 h_ret;
1309 int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1310 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1311 struct ehca_pd *e_pd =
1312 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1313 struct ehca_mr save_fmr;
1314 u32 tmp_lkey, tmp_rkey;
1315 struct ehca_mr_pginfo pginfo;
1316 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1317
1318 /* first check if reregistration hCall can be used for unmap */
1319 if (e_fmr->fmr_max_pages > MAX_RPAGES) {
1320 rereg_1_hcall = 0;
1321 rereg_3_hcall = 1;
1322 }
1323
1324 if (rereg_1_hcall) {
1325 /*
1326 * note: after using rereg hcall with len=0,
1327 * rereg hcall must be used again for registering pages
1328 */
1329 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1330 0, 0, e_pd->fw_pd, 0, &hipzout);
1331 if (h_ret != H_SUCCESS) {
1332 /*
1333 * should not happen, because length checked above,
1334 * FMRs are not shared and no MW bound to FMRs
1335 */
1336 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1337 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1338 "mr_hndl=%lx lkey=%x lkey_out=%x",
1339 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1340 e_fmr->ipz_mr_handle.handle,
1341 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1342 rereg_3_hcall = 1;
1343 } else {
1344 /* successful reregistration */
1345 e_fmr->start = NULL;
1346 e_fmr->size = 0;
1347 tmp_lkey = hipzout.lkey;
1348 tmp_rkey = hipzout.rkey;
1349 }
1350 }
1351
1352 if (rereg_3_hcall) {
1353 struct ehca_mr save_mr;
1354
1355 /* first free old FMR */
1356 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1357 if (h_ret != H_SUCCESS) {
1358 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1359 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1360 "lkey=%x",
1361 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1362 e_fmr->ipz_mr_handle.handle,
1363 e_fmr->ib.ib_fmr.lkey);
1364 ret = ehca2ib_return_code(h_ret);
1365 goto ehca_unmap_one_fmr_exit0;
1366 }
1367 /* clean ehca_mr_t, without changing lock */
1368 save_fmr = *e_fmr;
1369 ehca_mr_deletenew(e_fmr);
1370
1371 /* set some MR values */
1372 e_fmr->flags = save_fmr.flags;
1373 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1374 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1375 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1376 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1377 e_fmr->acl = save_fmr.acl;
1378
1379 memset(&pginfo, 0, sizeof(pginfo));
1380 pginfo.type = EHCA_MR_PGI_FMR;
1381 pginfo.num_kpages = 0;
1382 pginfo.num_hwpages = 0;
1383 ret = ehca_reg_mr(shca, e_fmr, NULL,
1384 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1385 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1386 &tmp_rkey);
1387 if (ret) {
1388 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1389 memcpy(&e_fmr->flags, &(save_mr.flags),
1390 sizeof(struct ehca_mr) - offset);
1391 goto ehca_unmap_one_fmr_exit0;
1392 }
1393 }
1394
1395 ehca_unmap_one_fmr_exit0:
1396 if (ret)
1397 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1398 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1399 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1400 rereg_1_hcall, rereg_3_hcall);
1401 return ret;
1402 } /* end ehca_unmap_one_fmr() */
1403
1404 /*----------------------------------------------------------------------*/
1405
1406 int ehca_reg_smr(struct ehca_shca *shca,
1407 struct ehca_mr *e_origmr,
1408 struct ehca_mr *e_newmr,
1409 u64 *iova_start,
1410 int acl,
1411 struct ehca_pd *e_pd,
1412 u32 *lkey, /*OUT*/
1413 u32 *rkey) /*OUT*/
1414 {
1415 int ret = 0;
1416 u64 h_ret;
1417 u32 hipz_acl;
1418 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1419
1420 ehca_mrmw_map_acl(acl, &hipz_acl);
1421 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1422
1423 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1424 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1425 &hipzout);
1426 if (h_ret != H_SUCCESS) {
1427 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1428 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1429 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1430 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1431 shca->ipz_hca_handle.handle,
1432 e_origmr->ipz_mr_handle.handle,
1433 e_origmr->ib.ib_mr.lkey);
1434 ret = ehca2ib_return_code(h_ret);
1435 goto ehca_reg_smr_exit0;
1436 }
1437 /* successful registration */
1438 e_newmr->num_kpages = e_origmr->num_kpages;
1439 e_newmr->num_hwpages = e_origmr->num_hwpages;
1440 e_newmr->start = iova_start;
1441 e_newmr->size = e_origmr->size;
1442 e_newmr->acl = acl;
1443 e_newmr->ipz_mr_handle = hipzout.handle;
1444 *lkey = hipzout.lkey;
1445 *rkey = hipzout.rkey;
1446 return 0;
1447
1448 ehca_reg_smr_exit0:
1449 if (ret)
1450 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1451 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1452 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1453 return ret;
1454 } /* end ehca_reg_smr() */
1455
1456 /*----------------------------------------------------------------------*/
1457
1458 /* register internal max-MR to internal SHCA */
1459 int ehca_reg_internal_maxmr(
1460 struct ehca_shca *shca,
1461 struct ehca_pd *e_pd,
1462 struct ehca_mr **e_maxmr) /*OUT*/
1463 {
1464 int ret;
1465 struct ehca_mr *e_mr;
1466 u64 *iova_start;
1467 u64 size_maxmr;
1468 struct ehca_mr_pginfo pginfo;
1469 struct ib_phys_buf ib_pbuf;
1470 u32 num_kpages;
1471 u32 num_hwpages;
1472
1473 e_mr = ehca_mr_new();
1474 if (!e_mr) {
1475 ehca_err(&shca->ib_device, "out of memory");
1476 ret = -ENOMEM;
1477 goto ehca_reg_internal_maxmr_exit0;
1478 }
1479 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1480
1481 /* register internal max-MR on HCA */
1482 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1483 iova_start = (u64*)KERNELBASE;
1484 ib_pbuf.addr = 0;
1485 ib_pbuf.size = size_maxmr;
1486 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1487 PAGE_SIZE);
1488 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
1489 EHCA_PAGESIZE);
1490
1491 memset(&pginfo, 0, sizeof(pginfo));
1492 pginfo.type = EHCA_MR_PGI_PHYS;
1493 pginfo.num_kpages = num_kpages;
1494 pginfo.num_hwpages = num_hwpages;
1495 pginfo.u.phy.num_phys_buf = 1;
1496 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1497
1498 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1499 &pginfo, &e_mr->ib.ib_mr.lkey,
1500 &e_mr->ib.ib_mr.rkey);
1501 if (ret) {
1502 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1503 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
1504 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1505 num_kpages, num_hwpages);
1506 goto ehca_reg_internal_maxmr_exit1;
1507 }
1508
1509 /* successful registration of all pages */
1510 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1511 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1512 e_mr->ib.ib_mr.uobject = NULL;
1513 atomic_inc(&(e_pd->ib_pd.usecnt));
1514 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1515 *e_maxmr = e_mr;
1516 return 0;
1517
1518 ehca_reg_internal_maxmr_exit1:
1519 ehca_mr_delete(e_mr);
1520 ehca_reg_internal_maxmr_exit0:
1521 if (ret)
1522 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1523 ret, shca, e_pd, e_maxmr);
1524 return ret;
1525 } /* end ehca_reg_internal_maxmr() */
1526
1527 /*----------------------------------------------------------------------*/
1528
1529 int ehca_reg_maxmr(struct ehca_shca *shca,
1530 struct ehca_mr *e_newmr,
1531 u64 *iova_start,
1532 int acl,
1533 struct ehca_pd *e_pd,
1534 u32 *lkey,
1535 u32 *rkey)
1536 {
1537 u64 h_ret;
1538 struct ehca_mr *e_origmr = shca->maxmr;
1539 u32 hipz_acl;
1540 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1541
1542 ehca_mrmw_map_acl(acl, &hipz_acl);
1543 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1544
1545 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1546 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1547 &hipzout);
1548 if (h_ret != H_SUCCESS) {
1549 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1550 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1551 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1552 e_origmr->ipz_mr_handle.handle,
1553 e_origmr->ib.ib_mr.lkey);
1554 return ehca2ib_return_code(h_ret);
1555 }
1556 /* successful registration */
1557 e_newmr->num_kpages = e_origmr->num_kpages;
1558 e_newmr->num_hwpages = e_origmr->num_hwpages;
1559 e_newmr->start = iova_start;
1560 e_newmr->size = e_origmr->size;
1561 e_newmr->acl = acl;
1562 e_newmr->ipz_mr_handle = hipzout.handle;
1563 *lkey = hipzout.lkey;
1564 *rkey = hipzout.rkey;
1565 return 0;
1566 } /* end ehca_reg_maxmr() */
1567
1568 /*----------------------------------------------------------------------*/
1569
1570 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1571 {
1572 int ret;
1573 struct ehca_mr *e_maxmr;
1574 struct ib_pd *ib_pd;
1575
1576 if (!shca->maxmr) {
1577 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1578 ret = -EINVAL;
1579 goto ehca_dereg_internal_maxmr_exit0;
1580 }
1581
1582 e_maxmr = shca->maxmr;
1583 ib_pd = e_maxmr->ib.ib_mr.pd;
1584 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1585
1586 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1587 if (ret) {
1588 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1589 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1590 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1591 shca->maxmr = e_maxmr;
1592 goto ehca_dereg_internal_maxmr_exit0;
1593 }
1594
1595 atomic_dec(&ib_pd->usecnt);
1596
1597 ehca_dereg_internal_maxmr_exit0:
1598 if (ret)
1599 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1600 ret, shca, shca->maxmr);
1601 return ret;
1602 } /* end ehca_dereg_internal_maxmr() */
1603
1604 /*----------------------------------------------------------------------*/
1605
1606 /*
1607 * check physical buffer array of MR verbs for validness and
1608 * calculates MR size
1609 */
1610 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1611 int num_phys_buf,
1612 u64 *iova_start,
1613 u64 *size)
1614 {
1615 struct ib_phys_buf *pbuf = phys_buf_array;
1616 u64 size_count = 0;
1617 u32 i;
1618
1619 if (num_phys_buf == 0) {
1620 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1621 return -EINVAL;
1622 }
1623 /* check first buffer */
1624 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1625 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1626 "pbuf->addr=%lx pbuf->size=%lx",
1627 iova_start, pbuf->addr, pbuf->size);
1628 return -EINVAL;
1629 }
1630 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1631 (num_phys_buf > 1)) {
1632 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1633 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1634 return -EINVAL;
1635 }
1636
1637 for (i = 0; i < num_phys_buf; i++) {
1638 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1639 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1640 "pbuf->size=%lx",
1641 i, pbuf->addr, pbuf->size);
1642 return -EINVAL;
1643 }
1644 if (((i > 0) && /* not 1st */
1645 (i < (num_phys_buf - 1)) && /* not last */
1646 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1647 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1648 i, pbuf->size);
1649 return -EINVAL;
1650 }
1651 size_count += pbuf->size;
1652 pbuf++;
1653 }
1654
1655 *size = size_count;
1656 return 0;
1657 } /* end ehca_mr_chk_buf_and_calc_size() */
1658
1659 /*----------------------------------------------------------------------*/
1660
1661 /* check page list of map FMR verb for validness */
1662 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1663 u64 *page_list,
1664 int list_len)
1665 {
1666 u32 i;
1667 u64 *page;
1668
1669 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1670 ehca_gen_err("bad list_len, list_len=%x "
1671 "e_fmr->fmr_max_pages=%x fmr=%p",
1672 list_len, e_fmr->fmr_max_pages, e_fmr);
1673 return -EINVAL;
1674 }
1675
1676 /* each page must be aligned */
1677 page = page_list;
1678 for (i = 0; i < list_len; i++) {
1679 if (*page % e_fmr->fmr_page_size) {
1680 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1681 "fmr_page_size=%x", i, *page, page, e_fmr,
1682 e_fmr->fmr_page_size);
1683 return -EINVAL;
1684 }
1685 page++;
1686 }
1687
1688 return 0;
1689 } /* end ehca_fmr_check_page_list() */
1690
1691 /*----------------------------------------------------------------------*/
1692
1693 /* setup page buffer from page info */
1694 int ehca_set_pagebuf(struct ehca_mr *e_mr,
1695 struct ehca_mr_pginfo *pginfo,
1696 u32 number,
1697 u64 *kpage)
1698 {
1699 int ret = 0;
1700 struct ib_umem_chunk *prev_chunk;
1701 struct ib_umem_chunk *chunk;
1702 struct ib_phys_buf *pbuf;
1703 u64 *fmrlist;
1704 u64 num_hw, pgaddr, offs_hw;
1705 u32 i = 0;
1706 u32 j = 0;
1707
1708 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1709 /* loop over desired phys_buf_array entries */
1710 while (i < number) {
1711 pbuf = pginfo->u.phy.phys_buf_array
1712 + pginfo->u.phy.next_buf;
1713 num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
1714 pbuf->size, EHCA_PAGESIZE);
1715 offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1716 while (pginfo->next_hwpage < offs_hw + num_hw) {
1717 /* sanity check */
1718 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1719 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1720 ehca_gen_err("kpage_cnt >= num_kpages, "
1721 "kpage_cnt=%lx "
1722 "num_kpages=%lx "
1723 "hwpage_cnt=%lx "
1724 "num_hwpages=%lx i=%x",
1725 pginfo->kpage_cnt,
1726 pginfo->num_kpages,
1727 pginfo->hwpage_cnt,
1728 pginfo->num_hwpages, i);
1729 ret = -EFAULT;
1730 goto ehca_set_pagebuf_exit0;
1731 }
1732 *kpage = phys_to_abs(
1733 (pbuf->addr & EHCA_PAGEMASK)
1734 + (pginfo->next_hwpage * EHCA_PAGESIZE));
1735 if ( !(*kpage) && pbuf->addr ) {
1736 ehca_gen_err("pbuf->addr=%lx "
1737 "pbuf->size=%lx "
1738 "next_hwpage=%lx", pbuf->addr,
1739 pbuf->size,
1740 pginfo->next_hwpage);
1741 ret = -EFAULT;
1742 goto ehca_set_pagebuf_exit0;
1743 }
1744 (pginfo->hwpage_cnt)++;
1745 (pginfo->next_hwpage)++;
1746 if (pginfo->next_hwpage %
1747 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1748 (pginfo->kpage_cnt)++;
1749 kpage++;
1750 i++;
1751 if (i >= number) break;
1752 }
1753 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1754 (pginfo->u.phy.next_buf)++;
1755 pginfo->next_hwpage = 0;
1756 }
1757 }
1758 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1759 /* loop over desired chunk entries */
1760 chunk = pginfo->u.usr.next_chunk;
1761 prev_chunk = pginfo->u.usr.next_chunk;
1762 list_for_each_entry_continue(chunk,
1763 (&(pginfo->u.usr.region->chunk_list)),
1764 list) {
1765 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1766 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1767 << PAGE_SHIFT );
1768 *kpage = phys_to_abs(pgaddr +
1769 (pginfo->next_hwpage *
1770 EHCA_PAGESIZE));
1771 if ( !(*kpage) ) {
1772 ehca_gen_err("pgaddr=%lx "
1773 "chunk->page_list[i]=%lx "
1774 "i=%x next_hwpage=%lx mr=%p",
1775 pgaddr,
1776 (u64)sg_dma_address(
1777 &chunk->
1778 page_list[i]),
1779 i, pginfo->next_hwpage, e_mr);
1780 ret = -EFAULT;
1781 goto ehca_set_pagebuf_exit0;
1782 }
1783 (pginfo->hwpage_cnt)++;
1784 (pginfo->next_hwpage)++;
1785 kpage++;
1786 if (pginfo->next_hwpage %
1787 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1788 (pginfo->kpage_cnt)++;
1789 (pginfo->u.usr.next_nmap)++;
1790 pginfo->next_hwpage = 0;
1791 i++;
1792 }
1793 j++;
1794 if (j >= number) break;
1795 }
1796 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1797 (j >= number)) {
1798 pginfo->u.usr.next_nmap = 0;
1799 prev_chunk = chunk;
1800 break;
1801 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1802 pginfo->u.usr.next_nmap = 0;
1803 prev_chunk = chunk;
1804 } else if (j >= number)
1805 break;
1806 else
1807 prev_chunk = chunk;
1808 }
1809 pginfo->u.usr.next_chunk =
1810 list_prepare_entry(prev_chunk,
1811 (&(pginfo->u.usr.region->chunk_list)),
1812 list);
1813 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1814 /* loop over desired page_list entries */
1815 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1816 for (i = 0; i < number; i++) {
1817 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1818 pginfo->next_hwpage * EHCA_PAGESIZE);
1819 if ( !(*kpage) ) {
1820 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1821 "next_listelem=%lx next_hwpage=%lx",
1822 *fmrlist, fmrlist,
1823 pginfo->u.fmr.next_listelem,
1824 pginfo->next_hwpage);
1825 ret = -EFAULT;
1826 goto ehca_set_pagebuf_exit0;
1827 }
1828 (pginfo->hwpage_cnt)++;
1829 (pginfo->next_hwpage)++;
1830 kpage++;
1831 if (pginfo->next_hwpage %
1832 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1833 (pginfo->kpage_cnt)++;
1834 (pginfo->u.fmr.next_listelem)++;
1835 fmrlist++;
1836 pginfo->next_hwpage = 0;
1837 }
1838 }
1839 } else {
1840 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1841 ret = -EFAULT;
1842 goto ehca_set_pagebuf_exit0;
1843 }
1844
1845 ehca_set_pagebuf_exit0:
1846 if (ret)
1847 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
1848 "num_hwpages=%lx next_buf=%lx next_hwpage=%lx number=%x "
1849 "kpage=%p kpage_cnt=%lx hwpage_cnt=%lx i=%x "
1850 "next_listelem=%lx region=%p next_chunk=%p "
1851 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1852 pginfo->num_kpages, pginfo->num_hwpages,
1853 pginfo->u.phy.next_buf, pginfo->next_hwpage, number, kpage,
1854 pginfo->kpage_cnt, pginfo->hwpage_cnt, i,
1855 pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
1856 pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
1857 return ret;
1858 } /* end ehca_set_pagebuf() */
1859
1860 /*----------------------------------------------------------------------*/
1861
1862 /* setup 1 page from page info page buffer */
1863 int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1864 struct ehca_mr_pginfo *pginfo,
1865 u64 *rpage)
1866 {
1867 int ret = 0;
1868 struct ib_phys_buf *tmp_pbuf;
1869 u64 *fmrlist;
1870 struct ib_umem_chunk *chunk;
1871 struct ib_umem_chunk *prev_chunk;
1872 u64 pgaddr, num_hw, offs_hw;
1873
1874 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1875 /* sanity check */
1876 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1877 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1878 ehca_gen_err("kpage_cnt >= num_hwpages, kpage_cnt=%lx "
1879 "num_hwpages=%lx hwpage_cnt=%lx num_hwpages=%lx",
1880 pginfo->kpage_cnt, pginfo->num_kpages,
1881 pginfo->hwpage_cnt, pginfo->num_hwpages);
1882 ret = -EFAULT;
1883 goto ehca_set_pagebuf_1_exit0;
1884 }
1885 tmp_pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1886 num_hw = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) +
1887 tmp_pbuf->size, EHCA_PAGESIZE);
1888 offs_hw = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1889 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1890 (pginfo->next_hwpage * EHCA_PAGESIZE));
1891 if ( !(*rpage) && tmp_pbuf->addr ) {
1892 ehca_gen_err("tmp_pbuf->addr=%lx"
1893 " tmp_pbuf->size=%lx next_hwpage=%lx",
1894 tmp_pbuf->addr, tmp_pbuf->size,
1895 pginfo->next_hwpage);
1896 ret = -EFAULT;
1897 goto ehca_set_pagebuf_1_exit0;
1898 }
1899 (pginfo->hwpage_cnt)++;
1900 (pginfo->next_hwpage)++;
1901 if (pginfo->next_hwpage % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1902 (pginfo->kpage_cnt)++;
1903 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1904 (pginfo->u.phy.next_buf)++;
1905 pginfo->next_hwpage = 0;
1906 }
1907 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1908 chunk = pginfo->u.usr.next_chunk;
1909 prev_chunk = pginfo->u.usr.next_chunk;
1910 list_for_each_entry_continue(chunk,
1911 (&(pginfo->u.usr.region->chunk_list)),
1912 list) {
1913 pgaddr = ( page_to_pfn(chunk->page_list[
1914 pginfo->u.usr.next_nmap].page)
1915 << PAGE_SHIFT);
1916 *rpage = phys_to_abs(pgaddr +
1917 (pginfo->next_hwpage * EHCA_PAGESIZE));
1918 if ( !(*rpage) ) {
1919 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1920 " next_nmap=%lx next_hwpage=%lx mr=%p",
1921 pgaddr, (u64)sg_dma_address(
1922 &chunk->page_list[
1923 pginfo->u.usr.
1924 next_nmap]),
1925 pginfo->u.usr.next_nmap, pginfo->next_hwpage,
1926 e_mr);
1927 ret = -EFAULT;
1928 goto ehca_set_pagebuf_1_exit0;
1929 }
1930 (pginfo->hwpage_cnt)++;
1931 (pginfo->next_hwpage)++;
1932 if (pginfo->next_hwpage %
1933 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1934 (pginfo->kpage_cnt)++;
1935 (pginfo->u.usr.next_nmap)++;
1936 pginfo->next_hwpage = 0;
1937 }
1938 if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1939 pginfo->u.usr.next_nmap = 0;
1940 prev_chunk = chunk;
1941 }
1942 break;
1943 }
1944 pginfo->u.usr.next_chunk =
1945 list_prepare_entry(prev_chunk,
1946 (&(pginfo->u.usr.region->chunk_list)),
1947 list);
1948 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1949 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1950 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1951 pginfo->next_hwpage * EHCA_PAGESIZE);
1952 if ( !(*rpage) ) {
1953 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1954 "next_listelem=%lx next_hwpage=%lx",
1955 *fmrlist, fmrlist, pginfo->u.fmr.next_listelem,
1956 pginfo->next_hwpage);
1957 ret = -EFAULT;
1958 goto ehca_set_pagebuf_1_exit0;
1959 }
1960 (pginfo->hwpage_cnt)++;
1961 (pginfo->next_hwpage)++;
1962 if (pginfo->next_hwpage %
1963 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1964 (pginfo->kpage_cnt)++;
1965 (pginfo->u.fmr.next_listelem)++;
1966 pginfo->next_hwpage = 0;
1967 }
1968 } else {
1969 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1970 ret = -EFAULT;
1971 goto ehca_set_pagebuf_1_exit0;
1972 }
1973
1974 ehca_set_pagebuf_1_exit0:
1975 if (ret)
1976 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
1977 "num_hwpages=%lx next_buf=%lx next_hwpage=%lx rpage=%p "
1978 "kpage_cnt=%lx hwpage_cnt=%lx next_listelem=%lx "
1979 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1980 pginfo, pginfo->type, pginfo->num_kpages,
1981 pginfo->num_hwpages, pginfo->u.phy.next_buf, pginfo->next_hwpage,
1982 rpage, pginfo->kpage_cnt, pginfo->hwpage_cnt,
1983 pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
1984 pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
1985 return ret;
1986 } /* end ehca_set_pagebuf_1() */
1987
1988 /*----------------------------------------------------------------------*/
1989
1990 /*
1991 * check MR if it is a max-MR, i.e. uses whole memory
1992 * in case it's a max-MR 1 is returned, else 0
1993 */
1994 int ehca_mr_is_maxmr(u64 size,
1995 u64 *iova_start)
1996 {
1997 /* a MR is treated as max-MR only if it fits following: */
1998 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1999 (iova_start == (void*)KERNELBASE)) {
2000 ehca_gen_dbg("this is a max-MR");
2001 return 1;
2002 } else
2003 return 0;
2004 } /* end ehca_mr_is_maxmr() */
2005
2006 /*----------------------------------------------------------------------*/
2007
2008 /* map access control for MR/MW. This routine is used for MR and MW. */
2009 void ehca_mrmw_map_acl(int ib_acl,
2010 u32 *hipz_acl)
2011 {
2012 *hipz_acl = 0;
2013 if (ib_acl & IB_ACCESS_REMOTE_READ)
2014 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2015 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2016 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2017 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2018 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2019 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2020 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2021 if (ib_acl & IB_ACCESS_MW_BIND)
2022 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2023 } /* end ehca_mrmw_map_acl() */
2024
2025 /*----------------------------------------------------------------------*/
2026
2027 /* sets page size in hipz access control for MR/MW. */
2028 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2029 {
2030 return; /* HCA supports only 4k */
2031 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2032
2033 /*----------------------------------------------------------------------*/
2034
2035 /*
2036 * reverse map access control for MR/MW.
2037 * This routine is used for MR and MW.
2038 */
2039 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2040 int *ib_acl) /*OUT*/
2041 {
2042 *ib_acl = 0;
2043 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2044 *ib_acl |= IB_ACCESS_REMOTE_READ;
2045 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2046 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2047 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2048 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2049 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2050 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2051 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2052 *ib_acl |= IB_ACCESS_MW_BIND;
2053 } /* end ehca_mrmw_reverse_map_acl() */
2054
2055
2056 /*----------------------------------------------------------------------*/
2057
2058 /*
2059 * MR destructor and constructor
2060 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2061 * except struct ib_mr and spinlock
2062 */
2063 void ehca_mr_deletenew(struct ehca_mr *mr)
2064 {
2065 mr->flags = 0;
2066 mr->num_kpages = 0;
2067 mr->num_hwpages = 0;
2068 mr->acl = 0;
2069 mr->start = NULL;
2070 mr->fmr_page_size = 0;
2071 mr->fmr_max_pages = 0;
2072 mr->fmr_max_maps = 0;
2073 mr->fmr_map_cnt = 0;
2074 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2075 memset(&mr->galpas, 0, sizeof(mr->galpas));
2076 } /* end ehca_mr_deletenew() */
2077
2078 int ehca_init_mrmw_cache(void)
2079 {
2080 mr_cache = kmem_cache_create("ehca_cache_mr",
2081 sizeof(struct ehca_mr), 0,
2082 SLAB_HWCACHE_ALIGN,
2083 NULL, NULL);
2084 if (!mr_cache)
2085 return -ENOMEM;
2086 mw_cache = kmem_cache_create("ehca_cache_mw",
2087 sizeof(struct ehca_mw), 0,
2088 SLAB_HWCACHE_ALIGN,
2089 NULL, NULL);
2090 if (!mw_cache) {
2091 kmem_cache_destroy(mr_cache);
2092 mr_cache = NULL;
2093 return -ENOMEM;
2094 }
2095 return 0;
2096 }
2097
2098 void ehca_cleanup_mrmw_cache(void)
2099 {
2100 if (mr_cache)
2101 kmem_cache_destroy(mr_cache);
2102 if (mw_cache)
2103 kmem_cache_destroy(mw_cache);
2104 }