1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 HGST, a Western Digital Company.
5 #include <linux/moduleparam.h>
6 #include <linux/slab.h>
7 #include <linux/pci-p2pdma.h>
8 #include <rdma/mr_pool.h>
18 static bool rdma_rw_force_mr
;
19 module_param_named(force_mr
, rdma_rw_force_mr
, bool, 0);
20 MODULE_PARM_DESC(force_mr
, "Force usage of MRs for RDMA READ/WRITE operations");
23 * Check if the device might use memory registration. This is currently only
24 * true for iWarp devices. In the future we can hopefully fine tune this based
25 * on HCA driver input.
27 static inline bool rdma_rw_can_use_mr(struct ib_device
*dev
, u8 port_num
)
29 if (rdma_protocol_iwarp(dev
, port_num
))
31 if (unlikely(rdma_rw_force_mr
))
37 * Check if the device will use memory registration for this RW operation.
38 * We currently always use memory registrations for iWarp RDMA READs, and
39 * have a debug option to force usage of MRs.
41 * XXX: In the future we can hopefully fine tune this based on HCA driver
44 static inline bool rdma_rw_io_needs_mr(struct ib_device
*dev
, u8 port_num
,
45 enum dma_data_direction dir
, int dma_nents
)
47 if (rdma_protocol_iwarp(dev
, port_num
) && dir
== DMA_FROM_DEVICE
)
49 if (unlikely(rdma_rw_force_mr
))
54 static inline u32
rdma_rw_fr_page_list_len(struct ib_device
*dev
)
56 /* arbitrary limit to avoid allocating gigantic resources */
57 return min_t(u32
, dev
->attrs
.max_fast_reg_page_list_len
, 256);
60 /* Caller must have zero-initialized *reg. */
61 static int rdma_rw_init_one_mr(struct ib_qp
*qp
, u8 port_num
,
62 struct rdma_rw_reg_ctx
*reg
, struct scatterlist
*sg
,
63 u32 sg_cnt
, u32 offset
)
65 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
66 u32 nents
= min(sg_cnt
, pages_per_mr
);
69 reg
->mr
= ib_mr_pool_get(qp
, &qp
->rdma_mrs
);
73 if (reg
->mr
->need_inval
) {
74 reg
->inv_wr
.opcode
= IB_WR_LOCAL_INV
;
75 reg
->inv_wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
76 reg
->inv_wr
.next
= ®
->reg_wr
.wr
;
79 reg
->inv_wr
.next
= NULL
;
82 ret
= ib_map_mr_sg(reg
->mr
, sg
, nents
, &offset
, PAGE_SIZE
);
83 if (ret
< 0 || ret
< nents
) {
84 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, reg
->mr
);
88 reg
->reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
89 reg
->reg_wr
.mr
= reg
->mr
;
90 reg
->reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
91 if (rdma_protocol_iwarp(qp
->device
, port_num
))
92 reg
->reg_wr
.access
|= IB_ACCESS_REMOTE_WRITE
;
95 reg
->sge
.addr
= reg
->mr
->iova
;
96 reg
->sge
.length
= reg
->mr
->length
;
100 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
101 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
102 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
104 struct rdma_rw_reg_ctx
*prev
= NULL
;
105 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
106 int i
, j
, ret
= 0, count
= 0;
108 ctx
->nr_ops
= (sg_cnt
+ pages_per_mr
- 1) / pages_per_mr
;
109 ctx
->reg
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->reg
), GFP_KERNEL
);
115 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
116 struct rdma_rw_reg_ctx
*reg
= &ctx
->reg
[i
];
117 u32 nents
= min(sg_cnt
, pages_per_mr
);
119 ret
= rdma_rw_init_one_mr(qp
, port_num
, reg
, sg
, sg_cnt
,
126 if (reg
->mr
->need_inval
)
127 prev
->wr
.wr
.next
= ®
->inv_wr
;
129 prev
->wr
.wr
.next
= ®
->reg_wr
.wr
;
132 reg
->reg_wr
.wr
.next
= ®
->wr
.wr
;
134 reg
->wr
.wr
.sg_list
= ®
->sge
;
135 reg
->wr
.wr
.num_sge
= 1;
136 reg
->wr
.remote_addr
= remote_addr
;
138 if (dir
== DMA_TO_DEVICE
) {
139 reg
->wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
140 } else if (!rdma_cap_read_inv(qp
->device
, port_num
)) {
141 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ
;
143 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
144 reg
->wr
.wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
148 remote_addr
+= reg
->sge
.length
;
150 for (j
= 0; j
< nents
; j
++)
157 prev
->wr
.wr
.next
= NULL
;
159 ctx
->type
= RDMA_RW_MR
;
164 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
170 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
171 struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
172 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
174 u32 max_sge
= dir
== DMA_TO_DEVICE
? qp
->max_write_sge
:
177 u32 total_len
= 0, i
, j
;
179 ctx
->nr_ops
= DIV_ROUND_UP(sg_cnt
, max_sge
);
181 ctx
->map
.sges
= sge
= kcalloc(sg_cnt
, sizeof(*sge
), GFP_KERNEL
);
185 ctx
->map
.wrs
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->map
.wrs
), GFP_KERNEL
);
189 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
190 struct ib_rdma_wr
*rdma_wr
= &ctx
->map
.wrs
[i
];
191 u32 nr_sge
= min(sg_cnt
, max_sge
);
193 if (dir
== DMA_TO_DEVICE
)
194 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
196 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
197 rdma_wr
->remote_addr
= remote_addr
+ total_len
;
198 rdma_wr
->rkey
= rkey
;
199 rdma_wr
->wr
.num_sge
= nr_sge
;
200 rdma_wr
->wr
.sg_list
= sge
;
202 for (j
= 0; j
< nr_sge
; j
++, sg
= sg_next(sg
)) {
203 sge
->addr
= sg_dma_address(sg
) + offset
;
204 sge
->length
= sg_dma_len(sg
) - offset
;
205 sge
->lkey
= qp
->pd
->local_dma_lkey
;
207 total_len
+= sge
->length
;
213 rdma_wr
->wr
.next
= i
+ 1 < ctx
->nr_ops
?
214 &ctx
->map
.wrs
[i
+ 1].wr
: NULL
;
217 ctx
->type
= RDMA_RW_MULTI_WR
;
221 kfree(ctx
->map
.sges
);
226 static int rdma_rw_init_single_wr(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
227 struct scatterlist
*sg
, u32 offset
, u64 remote_addr
, u32 rkey
,
228 enum dma_data_direction dir
)
230 struct ib_rdma_wr
*rdma_wr
= &ctx
->single
.wr
;
234 ctx
->single
.sge
.lkey
= qp
->pd
->local_dma_lkey
;
235 ctx
->single
.sge
.addr
= sg_dma_address(sg
) + offset
;
236 ctx
->single
.sge
.length
= sg_dma_len(sg
) - offset
;
238 memset(rdma_wr
, 0, sizeof(*rdma_wr
));
239 if (dir
== DMA_TO_DEVICE
)
240 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
242 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
243 rdma_wr
->wr
.sg_list
= &ctx
->single
.sge
;
244 rdma_wr
->wr
.num_sge
= 1;
245 rdma_wr
->remote_addr
= remote_addr
;
246 rdma_wr
->rkey
= rkey
;
248 ctx
->type
= RDMA_RW_SINGLE_WR
;
253 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
254 * @ctx: context to initialize
255 * @qp: queue pair to operate on
256 * @port_num: port num to which the connection is bound
257 * @sg: scatterlist to READ/WRITE from/to
258 * @sg_cnt: number of entries in @sg
259 * @sg_offset: current byte offset into @sg
260 * @remote_addr:remote address to read/write (relative to @rkey)
261 * @rkey: remote key to operate on
262 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
264 * Returns the number of WQEs that will be needed on the workqueue if
265 * successful, or a negative error code.
267 int rdma_rw_ctx_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
268 struct scatterlist
*sg
, u32 sg_cnt
, u32 sg_offset
,
269 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
271 struct ib_device
*dev
= qp
->pd
->device
;
274 if (is_pci_p2pdma_page(sg_page(sg
)))
275 ret
= pci_p2pdma_map_sg(dev
->dma_device
, sg
, sg_cnt
, dir
);
277 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
284 * Skip to the S/G entry that sg_offset falls into:
287 u32 len
= sg_dma_len(sg
);
298 if (WARN_ON_ONCE(sg_cnt
== 0))
301 if (rdma_rw_io_needs_mr(qp
->device
, port_num
, dir
, sg_cnt
)) {
302 ret
= rdma_rw_init_mr_wrs(ctx
, qp
, port_num
, sg
, sg_cnt
,
303 sg_offset
, remote_addr
, rkey
, dir
);
304 } else if (sg_cnt
> 1) {
305 ret
= rdma_rw_init_map_wrs(ctx
, qp
, sg
, sg_cnt
, sg_offset
,
306 remote_addr
, rkey
, dir
);
308 ret
= rdma_rw_init_single_wr(ctx
, qp
, sg
, sg_offset
,
309 remote_addr
, rkey
, dir
);
317 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
320 EXPORT_SYMBOL(rdma_rw_ctx_init
);
323 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
324 * @ctx: context to initialize
325 * @qp: queue pair to operate on
326 * @port_num: port num to which the connection is bound
327 * @sg: scatterlist to READ/WRITE from/to
328 * @sg_cnt: number of entries in @sg
329 * @prot_sg: scatterlist to READ/WRITE protection information from/to
330 * @prot_sg_cnt: number of entries in @prot_sg
331 * @sig_attrs: signature offloading algorithms
332 * @remote_addr:remote address to read/write (relative to @rkey)
333 * @rkey: remote key to operate on
334 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
336 * Returns the number of WQEs that will be needed on the workqueue if
337 * successful, or a negative error code.
339 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
340 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
341 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
342 struct ib_sig_attrs
*sig_attrs
,
343 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
345 struct ib_device
*dev
= qp
->pd
->device
;
346 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
347 struct ib_rdma_wr
*rdma_wr
;
348 struct ib_send_wr
*prev_wr
= NULL
;
351 if (sg_cnt
> pages_per_mr
|| prot_sg_cnt
> pages_per_mr
) {
352 pr_err("SG count too large\n");
356 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
361 ret
= ib_dma_map_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
368 ctx
->type
= RDMA_RW_SIG_MR
;
370 ctx
->sig
= kcalloc(1, sizeof(*ctx
->sig
), GFP_KERNEL
);
373 goto out_unmap_prot_sg
;
376 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->data
, sg
, sg_cnt
, 0);
380 prev_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
382 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->prot
,
383 prot_sg
, prot_sg_cnt
, 0);
385 goto out_destroy_data_mr
;
388 if (ctx
->sig
->prot
.inv_wr
.next
)
389 prev_wr
->next
= &ctx
->sig
->prot
.inv_wr
;
391 prev_wr
->next
= &ctx
->sig
->prot
.reg_wr
.wr
;
392 prev_wr
= &ctx
->sig
->prot
.reg_wr
.wr
;
394 ctx
->sig
->sig_mr
= ib_mr_pool_get(qp
, &qp
->sig_mrs
);
395 if (!ctx
->sig
->sig_mr
) {
397 goto out_destroy_prot_mr
;
400 if (ctx
->sig
->sig_mr
->need_inval
) {
401 memset(&ctx
->sig
->sig_inv_wr
, 0, sizeof(ctx
->sig
->sig_inv_wr
));
403 ctx
->sig
->sig_inv_wr
.opcode
= IB_WR_LOCAL_INV
;
404 ctx
->sig
->sig_inv_wr
.ex
.invalidate_rkey
= ctx
->sig
->sig_mr
->rkey
;
406 prev_wr
->next
= &ctx
->sig
->sig_inv_wr
;
407 prev_wr
= &ctx
->sig
->sig_inv_wr
;
410 ctx
->sig
->sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
411 ctx
->sig
->sig_wr
.wr
.wr_cqe
= NULL
;
412 ctx
->sig
->sig_wr
.wr
.sg_list
= &ctx
->sig
->data
.sge
;
413 ctx
->sig
->sig_wr
.wr
.num_sge
= 1;
414 ctx
->sig
->sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
415 ctx
->sig
->sig_wr
.sig_attrs
= sig_attrs
;
416 ctx
->sig
->sig_wr
.sig_mr
= ctx
->sig
->sig_mr
;
418 ctx
->sig
->sig_wr
.prot
= &ctx
->sig
->prot
.sge
;
419 prev_wr
->next
= &ctx
->sig
->sig_wr
.wr
;
420 prev_wr
= &ctx
->sig
->sig_wr
.wr
;
423 ctx
->sig
->sig_sge
.addr
= 0;
424 ctx
->sig
->sig_sge
.length
= ctx
->sig
->data
.sge
.length
;
425 if (sig_attrs
->wire
.sig_type
!= IB_SIG_TYPE_NONE
)
426 ctx
->sig
->sig_sge
.length
+= ctx
->sig
->prot
.sge
.length
;
428 rdma_wr
= &ctx
->sig
->data
.wr
;
429 rdma_wr
->wr
.sg_list
= &ctx
->sig
->sig_sge
;
430 rdma_wr
->wr
.num_sge
= 1;
431 rdma_wr
->remote_addr
= remote_addr
;
432 rdma_wr
->rkey
= rkey
;
433 if (dir
== DMA_TO_DEVICE
)
434 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
436 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
437 prev_wr
->next
= &rdma_wr
->wr
;
438 prev_wr
= &rdma_wr
->wr
;
445 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
447 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
451 ib_dma_unmap_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
453 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
456 EXPORT_SYMBOL(rdma_rw_ctx_signature_init
);
459 * Now that we are going to post the WRs we can update the lkey and need_inval
460 * state on the MRs. If we were doing this at init time, we would get double
461 * or missing invalidations if a context was initialized but not actually
464 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx
*reg
, bool need_inval
)
466 reg
->mr
->need_inval
= need_inval
;
467 ib_update_fast_reg_key(reg
->mr
, ib_inc_rkey(reg
->mr
->lkey
));
468 reg
->reg_wr
.key
= reg
->mr
->lkey
;
469 reg
->sge
.lkey
= reg
->mr
->lkey
;
473 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
474 * @ctx: context to operate on
475 * @qp: queue pair to operate on
476 * @port_num: port num to which the connection is bound
477 * @cqe: completion queue entry for the last WR
478 * @chain_wr: WR to append to the posted chain
480 * Return the WR chain for the set of RDMA READ/WRITE operations described by
481 * @ctx, as well as any memory registration operations needed. If @chain_wr
482 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
483 * If @chain_wr is not set @cqe must be set so that the caller gets a
484 * completion notification.
486 struct ib_send_wr
*rdma_rw_ctx_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
487 u8 port_num
, struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
489 struct ib_send_wr
*first_wr
, *last_wr
;
494 rdma_rw_update_lkey(&ctx
->sig
->data
, true);
495 if (ctx
->sig
->prot
.mr
)
496 rdma_rw_update_lkey(&ctx
->sig
->prot
, true);
498 ctx
->sig
->sig_mr
->need_inval
= true;
499 ib_update_fast_reg_key(ctx
->sig
->sig_mr
,
500 ib_inc_rkey(ctx
->sig
->sig_mr
->lkey
));
501 ctx
->sig
->sig_sge
.lkey
= ctx
->sig
->sig_mr
->lkey
;
503 if (ctx
->sig
->data
.inv_wr
.next
)
504 first_wr
= &ctx
->sig
->data
.inv_wr
;
506 first_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
507 last_wr
= &ctx
->sig
->data
.wr
.wr
;
510 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
511 rdma_rw_update_lkey(&ctx
->reg
[i
],
512 ctx
->reg
[i
].wr
.wr
.opcode
!=
513 IB_WR_RDMA_READ_WITH_INV
);
516 if (ctx
->reg
[0].inv_wr
.next
)
517 first_wr
= &ctx
->reg
[0].inv_wr
;
519 first_wr
= &ctx
->reg
[0].reg_wr
.wr
;
520 last_wr
= &ctx
->reg
[ctx
->nr_ops
- 1].wr
.wr
;
522 case RDMA_RW_MULTI_WR
:
523 first_wr
= &ctx
->map
.wrs
[0].wr
;
524 last_wr
= &ctx
->map
.wrs
[ctx
->nr_ops
- 1].wr
;
526 case RDMA_RW_SINGLE_WR
:
527 first_wr
= &ctx
->single
.wr
.wr
;
528 last_wr
= &ctx
->single
.wr
.wr
;
535 last_wr
->next
= chain_wr
;
537 last_wr
->wr_cqe
= cqe
;
538 last_wr
->send_flags
|= IB_SEND_SIGNALED
;
543 EXPORT_SYMBOL(rdma_rw_ctx_wrs
);
546 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
547 * @ctx: context to operate on
548 * @qp: queue pair to operate on
549 * @port_num: port num to which the connection is bound
550 * @cqe: completion queue entry for the last WR
551 * @chain_wr: WR to append to the posted chain
553 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
554 * any memory registration operations needed. If @chain_wr is non-NULL the
555 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
556 * is not set @cqe must be set so that the caller gets a completion
559 int rdma_rw_ctx_post(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
560 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
562 struct ib_send_wr
*first_wr
;
564 first_wr
= rdma_rw_ctx_wrs(ctx
, qp
, port_num
, cqe
, chain_wr
);
565 return ib_post_send(qp
, first_wr
, NULL
);
567 EXPORT_SYMBOL(rdma_rw_ctx_post
);
570 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
571 * @ctx: context to release
572 * @qp: queue pair to operate on
573 * @port_num: port num to which the connection is bound
574 * @sg: scatterlist that was used for the READ/WRITE
575 * @sg_cnt: number of entries in @sg
576 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
578 void rdma_rw_ctx_destroy(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
579 struct scatterlist
*sg
, u32 sg_cnt
, enum dma_data_direction dir
)
585 for (i
= 0; i
< ctx
->nr_ops
; i
++)
586 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
589 case RDMA_RW_MULTI_WR
:
591 kfree(ctx
->map
.sges
);
593 case RDMA_RW_SINGLE_WR
:
600 /* P2PDMA contexts do not need to be unmapped */
601 if (!is_pci_p2pdma_page(sg_page(sg
)))
602 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
604 EXPORT_SYMBOL(rdma_rw_ctx_destroy
);
607 * rdma_rw_ctx_destroy_signature - release all resources allocated by
608 * rdma_rw_ctx_init_signature
609 * @ctx: context to release
610 * @qp: queue pair to operate on
611 * @port_num: port num to which the connection is bound
612 * @sg: scatterlist that was used for the READ/WRITE
613 * @sg_cnt: number of entries in @sg
614 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
615 * @prot_sg_cnt: number of entries in @prot_sg
616 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
618 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
619 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
620 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
621 enum dma_data_direction dir
)
623 if (WARN_ON_ONCE(ctx
->type
!= RDMA_RW_SIG_MR
))
626 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
627 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
629 if (ctx
->sig
->prot
.mr
) {
630 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
631 ib_dma_unmap_sg(qp
->pd
->device
, prot_sg
, prot_sg_cnt
, dir
);
634 ib_mr_pool_put(qp
, &qp
->sig_mrs
, ctx
->sig
->sig_mr
);
637 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature
);
640 * rdma_rw_mr_factor - return number of MRs required for a payload
641 * @device: device handling the connection
642 * @port_num: port num to which the connection is bound
643 * @maxpages: maximum payload pages per rdma_rw_ctx
645 * Returns the number of MRs the device requires to move @maxpayload
646 * bytes. The returned value is used during transport creation to
647 * compute max_rdma_ctxts and the size of the transport's Send and
648 * Send Completion Queues.
650 unsigned int rdma_rw_mr_factor(struct ib_device
*device
, u8 port_num
,
651 unsigned int maxpages
)
653 unsigned int mr_pages
;
655 if (rdma_rw_can_use_mr(device
, port_num
))
656 mr_pages
= rdma_rw_fr_page_list_len(device
);
658 mr_pages
= device
->attrs
.max_sge_rd
;
659 return DIV_ROUND_UP(maxpages
, mr_pages
);
661 EXPORT_SYMBOL(rdma_rw_mr_factor
);
663 void rdma_rw_init_qp(struct ib_device
*dev
, struct ib_qp_init_attr
*attr
)
667 WARN_ON_ONCE(attr
->port_num
== 0);
670 * Each context needs at least one RDMA READ or WRITE WR.
672 * For some hardware we might need more, eventually we should ask the
673 * HCA driver for a multiplier here.
678 * If the devices needs MRs to perform RDMA READ or WRITE operations,
679 * we'll need two additional MRs for the registrations and the
682 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
683 factor
+= 6; /* (inv + reg) * (data + prot + sig) */
684 else if (rdma_rw_can_use_mr(dev
, attr
->port_num
))
685 factor
+= 2; /* inv + reg */
687 attr
->cap
.max_send_wr
+= factor
* attr
->cap
.max_rdma_ctxs
;
690 * But maybe we were just too high in the sky and the device doesn't
691 * even support all we need, and we'll have to live with what we get..
693 attr
->cap
.max_send_wr
=
694 min_t(u32
, attr
->cap
.max_send_wr
, dev
->attrs
.max_qp_wr
);
697 int rdma_rw_init_mrs(struct ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
699 struct ib_device
*dev
= qp
->pd
->device
;
700 u32 nr_mrs
= 0, nr_sig_mrs
= 0;
703 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
) {
704 nr_sig_mrs
= attr
->cap
.max_rdma_ctxs
;
705 nr_mrs
= attr
->cap
.max_rdma_ctxs
* 2;
706 } else if (rdma_rw_can_use_mr(dev
, attr
->port_num
)) {
707 nr_mrs
= attr
->cap
.max_rdma_ctxs
;
711 ret
= ib_mr_pool_init(qp
, &qp
->rdma_mrs
, nr_mrs
,
713 rdma_rw_fr_page_list_len(dev
));
715 pr_err("%s: failed to allocated %d MRs\n",
722 ret
= ib_mr_pool_init(qp
, &qp
->sig_mrs
, nr_sig_mrs
,
723 IB_MR_TYPE_SIGNATURE
, 2);
725 pr_err("%s: failed to allocated %d SIG MRs\n",
727 goto out_free_rdma_mrs
;
734 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);
738 void rdma_rw_cleanup_mrs(struct ib_qp
*qp
)
740 ib_mr_pool_destroy(qp
, &qp
->sig_mrs
);
741 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);