1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
23 #include "xprt_rdma.h"
25 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
26 # define RPCDBG_FACILITY RPCDBG_TRANS
29 /* Maximum scatter/gather per FMR */
30 #define RPCRDMA_MAX_FMR_SGES (64)
32 /* Access mode of externally registered pages */
34 RPCRDMA_FMR_ACCESS_FLAGS
= IB_ACCESS_REMOTE_WRITE
|
35 IB_ACCESS_REMOTE_READ
,
39 fmr_is_supported(struct rpcrdma_ia
*ia
)
41 if (!ia
->ri_device
->alloc_fmr
) {
42 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
50 fmr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mw
*mw
)
52 static struct ib_fmr_attr fmr_attr
= {
53 .max_pages
= RPCRDMA_MAX_FMR_SGES
,
55 .page_shift
= PAGE_SHIFT
58 mw
->fmr
.fm_physaddrs
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
59 sizeof(u64
), GFP_KERNEL
);
60 if (!mw
->fmr
.fm_physaddrs
)
63 mw
->mw_sg
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
64 sizeof(*mw
->mw_sg
), GFP_KERNEL
);
68 sg_init_table(mw
->mw_sg
, RPCRDMA_MAX_FMR_SGES
);
70 mw
->fmr
.fm_mr
= ib_alloc_fmr(ia
->ri_pd
, RPCRDMA_FMR_ACCESS_FLAGS
,
72 if (IS_ERR(mw
->fmr
.fm_mr
))
78 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__
,
79 PTR_ERR(mw
->fmr
.fm_mr
));
83 kfree(mw
->fmr
.fm_physaddrs
);
88 __fmr_unmap(struct rpcrdma_mw
*mw
)
93 list_add(&mw
->fmr
.fm_mr
->list
, &l
);
94 rc
= ib_unmap_fmr(&l
);
95 list_del(&mw
->fmr
.fm_mr
->list
);
100 fmr_op_release_mr(struct rpcrdma_mw
*r
)
102 LIST_HEAD(unmap_list
);
105 /* Ensure MW is not on any rl_registered list */
106 if (!list_empty(&r
->mw_list
))
107 list_del(&r
->mw_list
);
109 kfree(r
->fmr
.fm_physaddrs
);
112 /* In case this one was left mapped, try to unmap it
113 * to prevent dealloc_fmr from failing with EBUSY
117 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
120 rc
= ib_dealloc_fmr(r
->fmr
.fm_mr
);
122 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
128 /* Reset of a single FMR.
131 fmr_op_recover_mr(struct rpcrdma_mw
*mw
)
133 struct rpcrdma_xprt
*r_xprt
= mw
->mw_xprt
;
136 /* ORDER: invalidate first */
137 rc
= __fmr_unmap(mw
);
139 /* ORDER: then DMA unmap */
140 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
141 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
145 rpcrdma_put_mw(r_xprt
, mw
);
146 r_xprt
->rx_stats
.mrs_recovered
++;
150 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc
, mw
);
151 r_xprt
->rx_stats
.mrs_orphaned
++;
153 spin_lock(&r_xprt
->rx_buf
.rb_mwlock
);
154 list_del(&mw
->mw_all
);
155 spin_unlock(&r_xprt
->rx_buf
.rb_mwlock
);
157 fmr_op_release_mr(mw
);
161 fmr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
162 struct rpcrdma_create_data_internal
*cdata
)
164 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
165 RPCRDMA_MAX_FMR_SGES
);
169 /* FMR mode conveys up to 64 pages of payload per chunk segment.
172 fmr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
174 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
175 RPCRDMA_MAX_HDR_SEGS
* RPCRDMA_MAX_FMR_SGES
);
178 /* Use the ib_map_phys_fmr() verb to register a memory region
179 * for remote access via RDMA READ or RDMA WRITE.
181 static struct rpcrdma_mr_seg
*
182 fmr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
183 int nsegs
, bool writing
, struct rpcrdma_mw
**out
)
185 struct rpcrdma_mr_seg
*seg1
= seg
;
186 int len
, pageoff
, i
, rc
;
187 struct rpcrdma_mw
*mw
;
190 mw
= rpcrdma_get_mw(r_xprt
);
192 return ERR_PTR(-ENOBUFS
);
194 pageoff
= offset_in_page(seg1
->mr_offset
);
195 seg1
->mr_offset
-= pageoff
; /* start of page */
196 seg1
->mr_len
+= pageoff
;
198 if (nsegs
> RPCRDMA_MAX_FMR_SGES
)
199 nsegs
= RPCRDMA_MAX_FMR_SGES
;
200 for (i
= 0; i
< nsegs
;) {
202 sg_set_page(&mw
->mw_sg
[i
],
205 offset_in_page(seg
->mr_offset
));
207 sg_set_buf(&mw
->mw_sg
[i
], seg
->mr_offset
,
212 /* Check for holes */
213 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
214 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
217 mw
->mw_dir
= rpcrdma_data_dir(writing
);
219 mw
->mw_nents
= ib_dma_map_sg(r_xprt
->rx_ia
.ri_device
,
220 mw
->mw_sg
, i
, mw
->mw_dir
);
224 for (i
= 0, dma_pages
= mw
->fmr
.fm_physaddrs
; i
< mw
->mw_nents
; i
++)
225 dma_pages
[i
] = sg_dma_address(&mw
->mw_sg
[i
]);
226 rc
= ib_map_phys_fmr(mw
->fmr
.fm_mr
, dma_pages
, mw
->mw_nents
,
231 mw
->mw_handle
= mw
->fmr
.fm_mr
->rkey
;
233 mw
->mw_offset
= dma_pages
[0] + pageoff
;
239 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
241 rpcrdma_put_mw(r_xprt
, mw
);
242 return ERR_PTR(-EIO
);
245 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
246 len
, (unsigned long long)dma_pages
[0],
247 pageoff
, mw
->mw_nents
, rc
);
248 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
249 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
250 rpcrdma_put_mw(r_xprt
, mw
);
251 return ERR_PTR(-EIO
);
254 /* Invalidate all memory regions that were registered for "req".
256 * Sleeps until it is safe for the host CPU to access the
257 * previously mapped memory regions.
259 * Caller ensures that @mws is not empty before the call. This
260 * function empties the list.
263 fmr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mws
)
265 struct rpcrdma_mw
*mw
;
266 LIST_HEAD(unmap_list
);
269 /* ORDER: Invalidate all of the req's MRs first
271 * ib_unmap_fmr() is slow, so use a single call instead
272 * of one call per mapped FMR.
274 list_for_each_entry(mw
, mws
, mw_list
) {
275 dprintk("RPC: %s: unmapping fmr %p\n",
277 list_add_tail(&mw
->fmr
.fm_mr
->list
, &unmap_list
);
279 r_xprt
->rx_stats
.local_inv_needed
++;
280 rc
= ib_unmap_fmr(&unmap_list
);
284 /* ORDER: Now DMA unmap all of the req's MRs, and return
285 * them to the free MW list.
287 while (!list_empty(mws
)) {
288 mw
= rpcrdma_pop_mw(mws
);
289 dprintk("RPC: %s: DMA unmapping fmr %p\n",
291 list_del(&mw
->fmr
.fm_mr
->list
);
292 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
293 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
294 rpcrdma_put_mw(r_xprt
, mw
);
300 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc
);
302 while (!list_empty(mws
)) {
303 mw
= rpcrdma_pop_mw(mws
);
304 list_del(&mw
->fmr
.fm_mr
->list
);
305 fmr_op_recover_mr(mw
);
309 /* Use a slow, safe mechanism to invalidate all memory regions
310 * that were registered for "req".
313 fmr_op_unmap_safe(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
316 struct rpcrdma_mw
*mw
;
318 while (!list_empty(&req
->rl_registered
)) {
319 mw
= rpcrdma_pop_mw(&req
->rl_registered
);
321 fmr_op_recover_mr(mw
);
323 rpcrdma_defer_mr_recovery(mw
);
327 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops
= {
328 .ro_map
= fmr_op_map
,
329 .ro_unmap_sync
= fmr_op_unmap_sync
,
330 .ro_unmap_safe
= fmr_op_unmap_safe
,
331 .ro_recover_mr
= fmr_op_recover_mr
,
332 .ro_open
= fmr_op_open
,
333 .ro_maxpages
= fmr_op_maxpages
,
334 .ro_init_mr
= fmr_op_init_mr
,
335 .ro_release_mr
= fmr_op_release_mr
,
336 .ro_displayname
= "fmr",
337 .ro_send_w_inv_ok
= 0,