1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
84 * frwr_is_supported - Check if device supports FRWR
85 * @ia: interface adapter to check
87 * Returns true if device supports FRWR, otherwise false
89 bool frwr_is_supported(struct rpcrdma_ia
*ia
)
91 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
93 if (!(attrs
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
94 goto out_not_supported
;
95 if (attrs
->max_fast_reg_page_list_len
== 0)
96 goto out_not_supported
;
100 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
101 ia
->ri_device
->name
);
106 * frwr_release_mr - Destroy one MR
107 * @mr: MR allocated by frwr_init_mr
110 void frwr_release_mr(struct rpcrdma_mr
*mr
)
114 rc
= ib_dereg_mr(mr
->frwr
.fr_mr
);
116 trace_xprtrdma_frwr_dereg(mr
, rc
);
121 /* MRs are dynamically allocated, so simply clean up and release the MR.
122 * A replacement MR will subsequently be allocated on demand.
125 frwr_mr_recycle_worker(struct work_struct
*work
)
127 struct rpcrdma_mr
*mr
= container_of(work
, struct rpcrdma_mr
, mr_recycle
);
128 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
130 trace_xprtrdma_mr_recycle(mr
);
132 if (mr
->mr_dir
!= DMA_NONE
) {
133 trace_xprtrdma_mr_unmap(mr
);
134 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
135 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
136 mr
->mr_dir
= DMA_NONE
;
139 spin_lock(&r_xprt
->rx_buf
.rb_mrlock
);
140 list_del(&mr
->mr_all
);
141 r_xprt
->rx_stats
.mrs_recycled
++;
142 spin_unlock(&r_xprt
->rx_buf
.rb_mrlock
);
148 * frwr_init_mr - Initialize one MR
149 * @ia: interface adapter
150 * @mr: generic MR to prepare for FRWR
152 * Returns zero if successful. Otherwise a negative errno
155 int frwr_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
157 unsigned int depth
= ia
->ri_max_frwr_depth
;
158 struct scatterlist
*sg
;
162 frmr
= ib_alloc_mr(ia
->ri_pd
, ia
->ri_mrtype
, depth
);
166 sg
= kcalloc(depth
, sizeof(*sg
), GFP_KERNEL
);
170 mr
->frwr
.fr_mr
= frmr
;
171 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
172 mr
->mr_dir
= DMA_NONE
;
173 INIT_LIST_HEAD(&mr
->mr_list
);
174 INIT_WORK(&mr
->mr_recycle
, frwr_mr_recycle_worker
);
175 init_completion(&mr
->frwr
.fr_linv_done
);
177 sg_init_table(sg
, depth
);
183 trace_xprtrdma_frwr_alloc(mr
, rc
);
187 dprintk("RPC: %s: sg allocation failure\n",
194 * frwr_open - Prepare an endpoint for use with FRWR
195 * @ia: interface adapter this endpoint will use
196 * @ep: endpoint to prepare
197 * @cdata: transport parameters
200 * ep->rep_attr.cap.max_send_wr
201 * ep->rep_attr.cap.max_recv_wr
202 * cdata->max_requests
205 * And these FRWR-related fields:
206 * ia->ri_max_frwr_depth
209 * On failure, a negative errno is returned.
211 int frwr_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
212 struct rpcrdma_create_data_internal
*cdata
)
214 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
215 int max_qp_wr
, depth
, delta
;
217 ia
->ri_mrtype
= IB_MR_TYPE_MEM_REG
;
218 if (attrs
->device_cap_flags
& IB_DEVICE_SG_GAPS_REG
)
219 ia
->ri_mrtype
= IB_MR_TYPE_SG_GAPS
;
221 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
222 * capability, but perform optimally when the MRs are not larger
225 if (attrs
->max_sge_rd
> 1)
226 ia
->ri_max_frwr_depth
= attrs
->max_sge_rd
;
228 ia
->ri_max_frwr_depth
= attrs
->max_fast_reg_page_list_len
;
229 if (ia
->ri_max_frwr_depth
> RPCRDMA_MAX_DATA_SEGS
)
230 ia
->ri_max_frwr_depth
= RPCRDMA_MAX_DATA_SEGS
;
231 dprintk("RPC: %s: max FR page list depth = %u\n",
232 __func__
, ia
->ri_max_frwr_depth
);
234 /* Add room for frwr register and invalidate WRs.
235 * 1. FRWR reg WR for head
236 * 2. FRWR invalidate WR for head
237 * 3. N FRWR reg WRs for pagelist
238 * 4. N FRWR invalidate WRs for pagelist
239 * 5. FRWR reg WR for tail
240 * 6. FRWR invalidate WR for tail
241 * 7. The RDMA_SEND WR
245 /* Calculate N if the device max FRWR depth is smaller than
246 * RPCRDMA_MAX_DATA_SEGS.
248 if (ia
->ri_max_frwr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
249 delta
= RPCRDMA_MAX_DATA_SEGS
- ia
->ri_max_frwr_depth
;
251 depth
+= 2; /* FRWR reg + invalidate */
252 delta
-= ia
->ri_max_frwr_depth
;
256 max_qp_wr
= ia
->ri_device
->attrs
.max_qp_wr
;
257 max_qp_wr
-= RPCRDMA_BACKWARD_WRS
;
259 if (max_qp_wr
< RPCRDMA_MIN_SLOT_TABLE
)
261 if (cdata
->max_requests
> max_qp_wr
)
262 cdata
->max_requests
= max_qp_wr
;
263 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
* depth
;
264 if (ep
->rep_attr
.cap
.max_send_wr
> max_qp_wr
) {
265 cdata
->max_requests
= max_qp_wr
/ depth
;
266 if (!cdata
->max_requests
)
268 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
271 ep
->rep_attr
.cap
.max_send_wr
+= RPCRDMA_BACKWARD_WRS
;
272 ep
->rep_attr
.cap
.max_send_wr
+= 1; /* for ib_drain_sq */
273 ep
->rep_attr
.cap
.max_recv_wr
= cdata
->max_requests
;
274 ep
->rep_attr
.cap
.max_recv_wr
+= RPCRDMA_BACKWARD_WRS
;
275 ep
->rep_attr
.cap
.max_recv_wr
+= 1; /* for ib_drain_rq */
277 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
278 ia
->ri_max_frwr_depth
);
279 /* Reply chunks require segments for head and tail buffers */
280 ia
->ri_max_segs
+= 2;
281 if (ia
->ri_max_segs
> RPCRDMA_MAX_HDR_SEGS
)
282 ia
->ri_max_segs
= RPCRDMA_MAX_HDR_SEGS
;
287 * frwr_maxpages - Compute size of largest payload
290 * Returns maximum size of an RPC message, in pages.
292 * FRWR mode conveys a list of pages per chunk segment. The
293 * maximum length of that list is the FRWR page list depth.
295 size_t frwr_maxpages(struct rpcrdma_xprt
*r_xprt
)
297 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
299 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
300 (ia
->ri_max_segs
- 2) * ia
->ri_max_frwr_depth
);
304 __frwr_sendcompletion_flush(struct ib_wc
*wc
, const char *wr
)
306 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
307 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
308 wr
, ib_wc_status_msg(wc
->status
),
309 wc
->status
, wc
->vendor_err
);
313 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
314 * @cq: completion queue (ignored)
319 frwr_wc_fastreg(struct ib_cq
*cq
, struct ib_wc
*wc
)
321 struct ib_cqe
*cqe
= wc
->wr_cqe
;
322 struct rpcrdma_frwr
*frwr
=
323 container_of(cqe
, struct rpcrdma_frwr
, fr_cqe
);
325 /* WARNING: Only wr_cqe and status are reliable at this point */
326 if (wc
->status
!= IB_WC_SUCCESS
) {
327 frwr
->fr_state
= FRWR_FLUSHED_FR
;
328 __frwr_sendcompletion_flush(wc
, "fastreg");
330 trace_xprtrdma_wc_fastreg(wc
, frwr
);
334 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
335 * @cq: completion queue (ignored)
340 frwr_wc_localinv(struct ib_cq
*cq
, struct ib_wc
*wc
)
342 struct ib_cqe
*cqe
= wc
->wr_cqe
;
343 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
346 /* WARNING: Only wr_cqe and status are reliable at this point */
347 if (wc
->status
!= IB_WC_SUCCESS
) {
348 frwr
->fr_state
= FRWR_FLUSHED_LI
;
349 __frwr_sendcompletion_flush(wc
, "localinv");
351 trace_xprtrdma_wc_li(wc
, frwr
);
355 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
356 * @cq: completion queue (ignored)
359 * Awaken anyone waiting for an MR to finish being fenced.
362 frwr_wc_localinv_wake(struct ib_cq
*cq
, struct ib_wc
*wc
)
364 struct ib_cqe
*cqe
= wc
->wr_cqe
;
365 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
368 /* WARNING: Only wr_cqe and status are reliable at this point */
369 if (wc
->status
!= IB_WC_SUCCESS
) {
370 frwr
->fr_state
= FRWR_FLUSHED_LI
;
371 __frwr_sendcompletion_flush(wc
, "localinv");
373 complete(&frwr
->fr_linv_done
);
374 trace_xprtrdma_wc_li_wake(wc
, frwr
);
378 * frwr_map - Register a memory region
379 * @r_xprt: controlling transport
380 * @seg: memory region co-ordinates
381 * @nsegs: number of segments remaining
382 * @writing: true when RDMA Write will be used
383 * @xid: XID of RPC using the registered memory
384 * @out: initialized MR
386 * Prepare a REG_MR Work Request to register a memory region
387 * for remote access via RDMA READ or RDMA WRITE.
389 * Returns the next segment or a negative errno pointer.
390 * On success, the prepared MR is planted in @out.
392 struct rpcrdma_mr_seg
*frwr_map(struct rpcrdma_xprt
*r_xprt
,
393 struct rpcrdma_mr_seg
*seg
,
394 int nsegs
, bool writing
, u32 xid
,
395 struct rpcrdma_mr
**out
)
397 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
398 bool holes_ok
= ia
->ri_mrtype
== IB_MR_TYPE_SG_GAPS
;
399 struct rpcrdma_frwr
*frwr
;
400 struct rpcrdma_mr
*mr
;
402 struct ib_reg_wr
*reg_wr
;
409 rpcrdma_mr_recycle(mr
);
410 mr
= rpcrdma_mr_get(r_xprt
);
412 return ERR_PTR(-EAGAIN
);
413 } while (mr
->frwr
.fr_state
!= FRWR_IS_INVALID
);
415 frwr
->fr_state
= FRWR_IS_VALID
;
417 if (nsegs
> ia
->ri_max_frwr_depth
)
418 nsegs
= ia
->ri_max_frwr_depth
;
419 for (i
= 0; i
< nsegs
;) {
421 sg_set_page(&mr
->mr_sg
[i
],
424 offset_in_page(seg
->mr_offset
));
426 sg_set_buf(&mr
->mr_sg
[i
], seg
->mr_offset
,
433 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
434 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
437 mr
->mr_dir
= rpcrdma_data_dir(writing
);
439 mr
->mr_nents
= ib_dma_map_sg(ia
->ri_device
, mr
->mr_sg
, i
, mr
->mr_dir
);
444 n
= ib_map_mr_sg(ibmr
, mr
->mr_sg
, mr
->mr_nents
, NULL
, PAGE_SIZE
);
445 if (unlikely(n
!= mr
->mr_nents
))
448 ibmr
->iova
&= 0x00000000ffffffff;
449 ibmr
->iova
|= ((u64
)cpu_to_be32(xid
)) << 32;
450 key
= (u8
)(ibmr
->rkey
& 0x000000FF);
451 ib_update_fast_reg_key(ibmr
, ++key
);
453 reg_wr
= &frwr
->fr_regwr
;
455 reg_wr
->key
= ibmr
->rkey
;
456 reg_wr
->access
= writing
?
457 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
458 IB_ACCESS_REMOTE_READ
;
460 mr
->mr_handle
= ibmr
->rkey
;
461 mr
->mr_length
= ibmr
->length
;
462 mr
->mr_offset
= ibmr
->iova
;
463 trace_xprtrdma_mr_map(mr
);
469 frwr
->fr_state
= FRWR_IS_INVALID
;
470 trace_xprtrdma_frwr_sgerr(mr
, i
);
472 return ERR_PTR(-EIO
);
475 trace_xprtrdma_frwr_maperr(mr
, n
);
476 rpcrdma_mr_recycle(mr
);
477 return ERR_PTR(-EIO
);
481 * frwr_send - post Send WR containing the RPC Call message
482 * @ia: interface adapter
483 * @req: Prepared RPC Call
485 * For FRWR, chain any FastReg WRs to the Send WR. Only a
486 * single ib_post_send call is needed to register memory
487 * and then post the Send WR.
489 * Returns the result of ib_post_send.
491 int frwr_send(struct rpcrdma_ia
*ia
, struct rpcrdma_req
*req
)
493 struct ib_send_wr
*post_wr
;
494 struct rpcrdma_mr
*mr
;
496 post_wr
= &req
->rl_sendctx
->sc_wr
;
497 list_for_each_entry(mr
, &req
->rl_registered
, mr_list
) {
498 struct rpcrdma_frwr
*frwr
;
502 frwr
->fr_cqe
.done
= frwr_wc_fastreg
;
503 frwr
->fr_regwr
.wr
.next
= post_wr
;
504 frwr
->fr_regwr
.wr
.wr_cqe
= &frwr
->fr_cqe
;
505 frwr
->fr_regwr
.wr
.num_sge
= 0;
506 frwr
->fr_regwr
.wr
.opcode
= IB_WR_REG_MR
;
507 frwr
->fr_regwr
.wr
.send_flags
= 0;
509 post_wr
= &frwr
->fr_regwr
.wr
;
512 /* If ib_post_send fails, the next ->send_request for
513 * @req will queue these MRs for recovery.
515 return ib_post_send(ia
->ri_id
->qp
, post_wr
, NULL
);
519 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
520 * @rep: Received reply
521 * @mrs: list of MRs to check
524 void frwr_reminv(struct rpcrdma_rep
*rep
, struct list_head
*mrs
)
526 struct rpcrdma_mr
*mr
;
528 list_for_each_entry(mr
, mrs
, mr_list
)
529 if (mr
->mr_handle
== rep
->rr_inv_rkey
) {
530 list_del_init(&mr
->mr_list
);
531 trace_xprtrdma_mr_remoteinv(mr
);
532 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
533 rpcrdma_mr_unmap_and_put(mr
);
534 break; /* only one invalidated MR per RPC */
539 * frwr_unmap_sync - invalidate memory regions that were registered for @req
540 * @r_xprt: controlling transport
541 * @mrs: list of MRs to process
543 * Sleeps until it is safe for the host CPU to access the
544 * previously mapped memory regions.
546 * Caller ensures that @mrs is not empty before the call. This
547 * function empties the list.
549 void frwr_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mrs
)
551 struct ib_send_wr
*first
, **prev
, *last
;
552 const struct ib_send_wr
*bad_wr
;
553 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
554 struct rpcrdma_frwr
*frwr
;
555 struct rpcrdma_mr
*mr
;
558 /* ORDER: Invalidate all of the MRs first
560 * Chain the LOCAL_INV Work Requests and post them with
561 * a single ib_post_send() call.
566 list_for_each_entry(mr
, mrs
, mr_list
) {
567 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
570 trace_xprtrdma_mr_localinv(mr
);
572 frwr
->fr_cqe
.done
= frwr_wc_localinv
;
573 last
= &frwr
->fr_invwr
;
574 memset(last
, 0, sizeof(*last
));
575 last
->wr_cqe
= &frwr
->fr_cqe
;
576 last
->opcode
= IB_WR_LOCAL_INV
;
577 last
->ex
.invalidate_rkey
= mr
->mr_handle
;
586 /* Strong send queue ordering guarantees that when the
587 * last WR in the chain completes, all WRs in the chain
590 last
->send_flags
= IB_SEND_SIGNALED
;
591 frwr
->fr_cqe
.done
= frwr_wc_localinv_wake
;
592 reinit_completion(&frwr
->fr_linv_done
);
594 /* Transport disconnect drains the receive CQ before it
595 * replaces the QP. The RPC reply handler won't call us
596 * unless ri_id->qp is a valid pointer.
598 r_xprt
->rx_stats
.local_inv_needed
++;
600 rc
= ib_post_send(ia
->ri_id
->qp
, first
, &bad_wr
);
602 wait_for_completion(&frwr
->fr_linv_done
);
606 /* ORDER: Now DMA unmap all of the MRs, and return
607 * them to the free MR list.
610 while (!list_empty(mrs
)) {
611 mr
= rpcrdma_mr_pop(mrs
);
612 rpcrdma_mr_unmap_and_put(mr
);
617 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc
);
619 /* Unmap and release the MRs in the LOCAL_INV WRs that did not
623 frwr
= container_of(bad_wr
, struct rpcrdma_frwr
,
625 mr
= container_of(frwr
, struct rpcrdma_mr
, frwr
);
626 bad_wr
= bad_wr
->next
;
628 list_del_init(&mr
->mr_list
);
629 rpcrdma_mr_recycle(mr
);