]>
Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
e9601828 TT |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | /* | |
41 | * rpc_rdma.c | |
42 | * | |
43 | * This file contains the guts of the RPC RDMA protocol, and | |
44 | * does marshaling/unmarshaling, etc. It is also where interfacing | |
45 | * to the Linux RPC framework lives. | |
f58851e6 TT |
46 | */ |
47 | ||
48 | #include "xprt_rdma.h" | |
49 | ||
e9601828 TT |
50 | #include <linux/highmem.h> |
51 | ||
f895b252 | 52 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
e9601828 TT |
53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
54 | #endif | |
55 | ||
e9601828 | 56 | static const char transfertypes[][12] = { |
94f58c58 CL |
57 | "inline", /* no chunks */ |
58 | "read list", /* some argument via rdma read */ | |
59 | "*read list", /* entire request via rdma read */ | |
60 | "write list", /* some result via rdma write */ | |
e9601828 TT |
61 | "reply chunk" /* entire reply via rdma write */ |
62 | }; | |
302d3deb CL |
63 | |
64 | /* Returns size of largest RPC-over-RDMA header in a Call message | |
65 | * | |
94f58c58 CL |
66 | * The largest Call header contains a full-size Read list and a |
67 | * minimal Reply chunk. | |
302d3deb CL |
68 | */ |
69 | static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) | |
70 | { | |
71 | unsigned int size; | |
72 | ||
73 | /* Fixed header fields and list discriminators */ | |
74 | size = RPCRDMA_HDRLEN_MIN; | |
75 | ||
76 | /* Maximum Read list size */ | |
77 | maxsegs += 2; /* segment for head and tail buffers */ | |
78 | size = maxsegs * sizeof(struct rpcrdma_read_chunk); | |
79 | ||
94f58c58 CL |
80 | /* Minimal Read chunk size */ |
81 | size += sizeof(__be32); /* segment count */ | |
82 | size += sizeof(struct rpcrdma_segment); | |
83 | size += sizeof(__be32); /* list discriminator */ | |
84 | ||
302d3deb CL |
85 | dprintk("RPC: %s: max call header size = %u\n", |
86 | __func__, size); | |
87 | return size; | |
88 | } | |
89 | ||
90 | /* Returns size of largest RPC-over-RDMA header in a Reply message | |
91 | * | |
92 | * There is only one Write list or one Reply chunk per Reply | |
93 | * message. The larger list is the Write list. | |
94 | */ | |
95 | static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) | |
96 | { | |
97 | unsigned int size; | |
98 | ||
99 | /* Fixed header fields and list discriminators */ | |
100 | size = RPCRDMA_HDRLEN_MIN; | |
101 | ||
102 | /* Maximum Write list size */ | |
103 | maxsegs += 2; /* segment for head and tail buffers */ | |
104 | size = sizeof(__be32); /* segment count */ | |
105 | size += maxsegs * sizeof(struct rpcrdma_segment); | |
106 | size += sizeof(__be32); /* list discriminator */ | |
107 | ||
108 | dprintk("RPC: %s: max reply header size = %u\n", | |
109 | __func__, size); | |
110 | return size; | |
111 | } | |
112 | ||
87cfb9a0 | 113 | void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) |
302d3deb | 114 | { |
87cfb9a0 CL |
115 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
116 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
117 | unsigned int maxsegs = ia->ri_max_segs; | |
118 | ||
302d3deb CL |
119 | ia->ri_max_inline_write = cdata->inline_wsize - |
120 | rpcrdma_max_call_header_size(maxsegs); | |
121 | ia->ri_max_inline_read = cdata->inline_rsize - | |
122 | rpcrdma_max_reply_header_size(maxsegs); | |
123 | } | |
e9601828 | 124 | |
5457ced0 CL |
125 | /* The client can send a request inline as long as the RPCRDMA header |
126 | * plus the RPC call fit under the transport's inline limit. If the | |
127 | * combined call message size exceeds that limit, the client must use | |
16f906d6 CL |
128 | * a Read chunk for this operation. |
129 | * | |
130 | * A Read chunk is also required if sending the RPC call inline would | |
131 | * exceed this device's max_sge limit. | |
5457ced0 | 132 | */ |
302d3deb CL |
133 | static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, |
134 | struct rpc_rqst *rqst) | |
5457ced0 | 135 | { |
16f906d6 CL |
136 | struct xdr_buf *xdr = &rqst->rq_snd_buf; |
137 | unsigned int count, remaining, offset; | |
138 | ||
139 | if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) | |
140 | return false; | |
141 | ||
142 | if (xdr->page_len) { | |
143 | remaining = xdr->page_len; | |
d933cc32 | 144 | offset = offset_in_page(xdr->page_base); |
16f906d6 CL |
145 | count = 0; |
146 | while (remaining) { | |
147 | remaining -= min_t(unsigned int, | |
148 | PAGE_SIZE - offset, remaining); | |
149 | offset = 0; | |
150 | if (++count > r_xprt->rx_ia.ri_max_send_sges) | |
151 | return false; | |
152 | } | |
153 | } | |
154 | ||
155 | return true; | |
5457ced0 CL |
156 | } |
157 | ||
158 | /* The client can't know how large the actual reply will be. Thus it | |
159 | * plans for the largest possible reply for that particular ULP | |
160 | * operation. If the maximum combined reply message size exceeds that | |
161 | * limit, the client must provide a write list or a reply chunk for | |
162 | * this request. | |
163 | */ | |
302d3deb CL |
164 | static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, |
165 | struct rpc_rqst *rqst) | |
5457ced0 | 166 | { |
302d3deb | 167 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
5457ced0 | 168 | |
302d3deb | 169 | return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read; |
5457ced0 CL |
170 | } |
171 | ||
821c791a CL |
172 | /* Split "vec" on page boundaries into segments. FMR registers pages, |
173 | * not a byte range. Other modes coalesce these segments into a single | |
174 | * MR when they can. | |
175 | */ | |
176 | static int | |
5ab81428 | 177 | rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) |
821c791a CL |
178 | { |
179 | size_t page_offset; | |
180 | u32 remaining; | |
181 | char *base; | |
182 | ||
183 | base = vec->iov_base; | |
184 | page_offset = offset_in_page(base); | |
185 | remaining = vec->iov_len; | |
5ab81428 | 186 | while (remaining && n < RPCRDMA_MAX_SEGS) { |
821c791a CL |
187 | seg[n].mr_page = NULL; |
188 | seg[n].mr_offset = base; | |
189 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); | |
190 | remaining -= seg[n].mr_len; | |
191 | base += seg[n].mr_len; | |
192 | ++n; | |
193 | page_offset = 0; | |
194 | } | |
195 | return n; | |
196 | } | |
197 | ||
e9601828 TT |
198 | /* |
199 | * Chunk assembly from upper layer xdr_buf. | |
200 | * | |
201 | * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk | |
202 | * elements. Segments are then coalesced when registered, if possible | |
203 | * within the selected memreg mode. | |
c93c6223 CL |
204 | * |
205 | * Returns positive number of segments converted, or a negative errno. | |
e9601828 TT |
206 | */ |
207 | ||
208 | static int | |
b5f0afbe CL |
209 | rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, |
210 | unsigned int pos, enum rpcrdma_chunktype type, | |
211 | struct rpcrdma_mr_seg *seg) | |
e9601828 | 212 | { |
5ab81428 | 213 | int len, n, p, page_base; |
bd7ea31b | 214 | struct page **ppages; |
e9601828 | 215 | |
5ab81428 | 216 | n = 0; |
821c791a | 217 | if (pos == 0) { |
5ab81428 CL |
218 | n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n); |
219 | if (n == RPCRDMA_MAX_SEGS) | |
220 | goto out_overflow; | |
e9601828 TT |
221 | } |
222 | ||
bd7ea31b TT |
223 | len = xdrbuf->page_len; |
224 | ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); | |
d933cc32 | 225 | page_base = offset_in_page(xdrbuf->page_base); |
bd7ea31b | 226 | p = 0; |
5ab81428 | 227 | while (len && n < RPCRDMA_MAX_SEGS) { |
196c6998 SM |
228 | if (!ppages[p]) { |
229 | /* alloc the pagelist for receiving buffer */ | |
230 | ppages[p] = alloc_page(GFP_ATOMIC); | |
231 | if (!ppages[p]) | |
7a89f9c6 | 232 | return -EAGAIN; |
196c6998 | 233 | } |
bd7ea31b TT |
234 | seg[n].mr_page = ppages[p]; |
235 | seg[n].mr_offset = (void *)(unsigned long) page_base; | |
236 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); | |
c93c6223 | 237 | if (seg[n].mr_len > PAGE_SIZE) |
5ab81428 | 238 | goto out_overflow; |
bd7ea31b | 239 | len -= seg[n].mr_len; |
e9601828 | 240 | ++n; |
bd7ea31b TT |
241 | ++p; |
242 | page_base = 0; /* page offset only applies to first page */ | |
e9601828 TT |
243 | } |
244 | ||
bd7ea31b | 245 | /* Message overflows the seg array */ |
5ab81428 CL |
246 | if (len && n == RPCRDMA_MAX_SEGS) |
247 | goto out_overflow; | |
bd7ea31b | 248 | |
24abdf1b CL |
249 | /* When encoding a Read chunk, the tail iovec contains an |
250 | * XDR pad and may be omitted. | |
251 | */ | |
b5f0afbe | 252 | if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) |
677eb17e CL |
253 | return n; |
254 | ||
b5f0afbe CL |
255 | /* When encoding a Write chunk, some servers need to see an |
256 | * extra segment for non-XDR-aligned Write chunks. The upper | |
257 | * layer provides space in the tail iovec that may be used | |
258 | * for this purpose. | |
c8b920bb | 259 | */ |
b5f0afbe | 260 | if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) |
c8b920bb CL |
261 | return n; |
262 | ||
50e1092b | 263 | if (xdrbuf->tail[0].iov_len) { |
5ab81428 CL |
264 | n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); |
265 | if (n == RPCRDMA_MAX_SEGS) | |
266 | goto out_overflow; | |
e9601828 TT |
267 | } |
268 | ||
e9601828 | 269 | return n; |
5ab81428 CL |
270 | |
271 | out_overflow: | |
272 | pr_err("rpcrdma: segment array overflow\n"); | |
273 | return -EIO; | |
e9601828 TT |
274 | } |
275 | ||
94f58c58 | 276 | static inline __be32 * |
9d6b0409 | 277 | xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw) |
94f58c58 | 278 | { |
9d6b0409 CL |
279 | *iptr++ = cpu_to_be32(mw->mw_handle); |
280 | *iptr++ = cpu_to_be32(mw->mw_length); | |
281 | return xdr_encode_hyper(iptr, mw->mw_offset); | |
94f58c58 CL |
282 | } |
283 | ||
284 | /* XDR-encode the Read list. Supports encoding a list of read | |
285 | * segments that belong to a single read chunk. | |
286 | * | |
287 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): | |
288 | * | |
289 | * Read chunklist (a linked list): | |
290 | * N elements, position P (same P for all chunks of same arg!): | |
291 | * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 | |
292 | * | |
293 | * Returns a pointer to the XDR word in the RDMA header following | |
294 | * the end of the Read list, or an error pointer. | |
295 | */ | |
296 | static __be32 * | |
297 | rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, | |
298 | struct rpcrdma_req *req, struct rpc_rqst *rqst, | |
299 | __be32 *iptr, enum rpcrdma_chunktype rtype) | |
300 | { | |
5ab81428 | 301 | struct rpcrdma_mr_seg *seg; |
9d6b0409 | 302 | struct rpcrdma_mw *mw; |
94f58c58 CL |
303 | unsigned int pos; |
304 | int n, nsegs; | |
305 | ||
306 | if (rtype == rpcrdma_noch) { | |
307 | *iptr++ = xdr_zero; /* item not present */ | |
308 | return iptr; | |
309 | } | |
310 | ||
311 | pos = rqst->rq_snd_buf.head[0].iov_len; | |
312 | if (rtype == rpcrdma_areadch) | |
313 | pos = 0; | |
5ab81428 | 314 | seg = req->rl_segments; |
b5f0afbe CL |
315 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, |
316 | rtype, seg); | |
94f58c58 CL |
317 | if (nsegs < 0) |
318 | return ERR_PTR(nsegs); | |
319 | ||
320 | do { | |
9d6b0409 CL |
321 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
322 | false, &mw); | |
a54d4059 | 323 | if (n < 0) |
94f58c58 | 324 | return ERR_PTR(n); |
9a5c63e9 | 325 | rpcrdma_push_mw(mw, &req->rl_registered); |
94f58c58 CL |
326 | |
327 | *iptr++ = xdr_one; /* item present */ | |
328 | ||
329 | /* All read segments in this chunk | |
330 | * have the same "position". | |
331 | */ | |
332 | *iptr++ = cpu_to_be32(pos); | |
9d6b0409 | 333 | iptr = xdr_encode_rdma_segment(iptr, mw); |
94f58c58 | 334 | |
9d6b0409 | 335 | dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", |
94f58c58 | 336 | rqst->rq_task->tk_pid, __func__, pos, |
9d6b0409 CL |
337 | mw->mw_length, (unsigned long long)mw->mw_offset, |
338 | mw->mw_handle, n < nsegs ? "more" : "last"); | |
94f58c58 CL |
339 | |
340 | r_xprt->rx_stats.read_chunk_count++; | |
94f58c58 CL |
341 | seg += n; |
342 | nsegs -= n; | |
343 | } while (nsegs); | |
94f58c58 CL |
344 | |
345 | /* Finish Read list */ | |
346 | *iptr++ = xdr_zero; /* Next item not present */ | |
347 | return iptr; | |
348 | } | |
349 | ||
350 | /* XDR-encode the Write list. Supports encoding a list containing | |
351 | * one array of plain segments that belong to a single write chunk. | |
352 | * | |
353 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): | |
354 | * | |
355 | * Write chunklist (a list of (one) counted array): | |
356 | * N elements: | |
357 | * 1 - N - HLOO - HLOO - ... - HLOO - 0 | |
358 | * | |
359 | * Returns a pointer to the XDR word in the RDMA header following | |
360 | * the end of the Write list, or an error pointer. | |
361 | */ | |
362 | static __be32 * | |
363 | rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |
364 | struct rpc_rqst *rqst, __be32 *iptr, | |
365 | enum rpcrdma_chunktype wtype) | |
366 | { | |
5ab81428 | 367 | struct rpcrdma_mr_seg *seg; |
9d6b0409 | 368 | struct rpcrdma_mw *mw; |
94f58c58 CL |
369 | int n, nsegs, nchunks; |
370 | __be32 *segcount; | |
371 | ||
372 | if (wtype != rpcrdma_writech) { | |
373 | *iptr++ = xdr_zero; /* no Write list present */ | |
374 | return iptr; | |
375 | } | |
376 | ||
5ab81428 | 377 | seg = req->rl_segments; |
b5f0afbe | 378 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, |
94f58c58 | 379 | rqst->rq_rcv_buf.head[0].iov_len, |
b5f0afbe | 380 | wtype, seg); |
94f58c58 CL |
381 | if (nsegs < 0) |
382 | return ERR_PTR(nsegs); | |
383 | ||
384 | *iptr++ = xdr_one; /* Write list present */ | |
385 | segcount = iptr++; /* save location of segment count */ | |
386 | ||
387 | nchunks = 0; | |
388 | do { | |
9d6b0409 CL |
389 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
390 | true, &mw); | |
a54d4059 | 391 | if (n < 0) |
94f58c58 | 392 | return ERR_PTR(n); |
9a5c63e9 | 393 | rpcrdma_push_mw(mw, &req->rl_registered); |
94f58c58 | 394 | |
9d6b0409 | 395 | iptr = xdr_encode_rdma_segment(iptr, mw); |
94f58c58 | 396 | |
9d6b0409 | 397 | dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", |
94f58c58 | 398 | rqst->rq_task->tk_pid, __func__, |
9d6b0409 CL |
399 | mw->mw_length, (unsigned long long)mw->mw_offset, |
400 | mw->mw_handle, n < nsegs ? "more" : "last"); | |
94f58c58 CL |
401 | |
402 | r_xprt->rx_stats.write_chunk_count++; | |
403 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; | |
94f58c58 CL |
404 | nchunks++; |
405 | seg += n; | |
406 | nsegs -= n; | |
407 | } while (nsegs); | |
94f58c58 CL |
408 | |
409 | /* Update count of segments in this Write chunk */ | |
410 | *segcount = cpu_to_be32(nchunks); | |
411 | ||
412 | /* Finish Write list */ | |
413 | *iptr++ = xdr_zero; /* Next item not present */ | |
414 | return iptr; | |
415 | } | |
416 | ||
417 | /* XDR-encode the Reply chunk. Supports encoding an array of plain | |
418 | * segments that belong to a single write (reply) chunk. | |
419 | * | |
420 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): | |
421 | * | |
422 | * Reply chunk (a counted array): | |
423 | * N elements: | |
424 | * 1 - N - HLOO - HLOO - ... - HLOO | |
425 | * | |
426 | * Returns a pointer to the XDR word in the RDMA header following | |
427 | * the end of the Reply chunk, or an error pointer. | |
428 | */ | |
429 | static __be32 * | |
430 | rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, | |
431 | struct rpcrdma_req *req, struct rpc_rqst *rqst, | |
432 | __be32 *iptr, enum rpcrdma_chunktype wtype) | |
433 | { | |
5ab81428 | 434 | struct rpcrdma_mr_seg *seg; |
9d6b0409 | 435 | struct rpcrdma_mw *mw; |
94f58c58 CL |
436 | int n, nsegs, nchunks; |
437 | __be32 *segcount; | |
438 | ||
439 | if (wtype != rpcrdma_replych) { | |
440 | *iptr++ = xdr_zero; /* no Reply chunk present */ | |
441 | return iptr; | |
442 | } | |
443 | ||
5ab81428 | 444 | seg = req->rl_segments; |
b5f0afbe | 445 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); |
94f58c58 CL |
446 | if (nsegs < 0) |
447 | return ERR_PTR(nsegs); | |
448 | ||
449 | *iptr++ = xdr_one; /* Reply chunk present */ | |
450 | segcount = iptr++; /* save location of segment count */ | |
451 | ||
452 | nchunks = 0; | |
453 | do { | |
9d6b0409 CL |
454 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
455 | true, &mw); | |
a54d4059 | 456 | if (n < 0) |
94f58c58 | 457 | return ERR_PTR(n); |
9a5c63e9 | 458 | rpcrdma_push_mw(mw, &req->rl_registered); |
94f58c58 | 459 | |
9d6b0409 | 460 | iptr = xdr_encode_rdma_segment(iptr, mw); |
94f58c58 | 461 | |
9d6b0409 | 462 | dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", |
94f58c58 | 463 | rqst->rq_task->tk_pid, __func__, |
9d6b0409 CL |
464 | mw->mw_length, (unsigned long long)mw->mw_offset, |
465 | mw->mw_handle, n < nsegs ? "more" : "last"); | |
94f58c58 CL |
466 | |
467 | r_xprt->rx_stats.reply_chunk_count++; | |
468 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; | |
94f58c58 CL |
469 | nchunks++; |
470 | seg += n; | |
471 | nsegs -= n; | |
472 | } while (nsegs); | |
94f58c58 CL |
473 | |
474 | /* Update count of segments in the Reply chunk */ | |
475 | *segcount = cpu_to_be32(nchunks); | |
476 | ||
477 | return iptr; | |
478 | } | |
479 | ||
655fec69 | 480 | /* Prepare the RPC-over-RDMA header SGE. |
e9601828 | 481 | */ |
655fec69 CL |
482 | static bool |
483 | rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |
484 | u32 len) | |
e9601828 | 485 | { |
655fec69 CL |
486 | struct rpcrdma_regbuf *rb = req->rl_rdmabuf; |
487 | struct ib_sge *sge = &req->rl_send_sge[0]; | |
488 | ||
489 | if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { | |
490 | if (!__rpcrdma_dma_map_regbuf(ia, rb)) | |
491 | return false; | |
492 | sge->addr = rdmab_addr(rb); | |
493 | sge->lkey = rdmab_lkey(rb); | |
494 | } | |
495 | sge->length = len; | |
496 | ||
91a10c52 | 497 | ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, |
655fec69 CL |
498 | sge->length, DMA_TO_DEVICE); |
499 | req->rl_send_wr.num_sge++; | |
500 | return true; | |
501 | } | |
502 | ||
503 | /* Prepare the Send SGEs. The head and tail iovec, and each entry | |
504 | * in the page list, gets its own SGE. | |
505 | */ | |
506 | static bool | |
507 | rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |
508 | struct xdr_buf *xdr, enum rpcrdma_chunktype rtype) | |
509 | { | |
510 | unsigned int sge_no, page_base, len, remaining; | |
511 | struct rpcrdma_regbuf *rb = req->rl_sendbuf; | |
512 | struct ib_device *device = ia->ri_device; | |
513 | struct ib_sge *sge = req->rl_send_sge; | |
514 | u32 lkey = ia->ri_pd->local_dma_lkey; | |
515 | struct page *page, **ppages; | |
516 | ||
517 | /* The head iovec is straightforward, as it is already | |
518 | * DMA-mapped. Sync the content that has changed. | |
519 | */ | |
520 | if (!rpcrdma_dma_map_regbuf(ia, rb)) | |
521 | return false; | |
522 | sge_no = 1; | |
523 | sge[sge_no].addr = rdmab_addr(rb); | |
524 | sge[sge_no].length = xdr->head[0].iov_len; | |
525 | sge[sge_no].lkey = rdmab_lkey(rb); | |
91a10c52 | 526 | ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr, |
655fec69 CL |
527 | sge[sge_no].length, DMA_TO_DEVICE); |
528 | ||
529 | /* If there is a Read chunk, the page list is being handled | |
530 | * via explicit RDMA, and thus is skipped here. However, the | |
531 | * tail iovec may include an XDR pad for the page list, as | |
532 | * well as additional content, and may not reside in the | |
533 | * same page as the head iovec. | |
534 | */ | |
535 | if (rtype == rpcrdma_readch) { | |
536 | len = xdr->tail[0].iov_len; | |
e9601828 | 537 | |
655fec69 CL |
538 | /* Do not include the tail if it is only an XDR pad */ |
539 | if (len < 4) | |
540 | goto out; | |
e9601828 | 541 | |
655fec69 | 542 | page = virt_to_page(xdr->tail[0].iov_base); |
d933cc32 | 543 | page_base = offset_in_page(xdr->tail[0].iov_base); |
e9601828 | 544 | |
655fec69 CL |
545 | /* If the content in the page list is an odd length, |
546 | * xdr_write_pages() has added a pad at the beginning | |
547 | * of the tail iovec. Force the tail's non-pad content | |
548 | * to land at the next XDR position in the Send message. | |
549 | */ | |
550 | page_base += len & 3; | |
551 | len -= len & 3; | |
552 | goto map_tail; | |
553 | } | |
b38ab40a | 554 | |
655fec69 CL |
555 | /* If there is a page list present, temporarily DMA map |
556 | * and prepare an SGE for each page to be sent. | |
557 | */ | |
558 | if (xdr->page_len) { | |
559 | ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); | |
d933cc32 | 560 | page_base = offset_in_page(xdr->page_base); |
655fec69 CL |
561 | remaining = xdr->page_len; |
562 | while (remaining) { | |
563 | sge_no++; | |
564 | if (sge_no > RPCRDMA_MAX_SEND_SGES - 2) | |
565 | goto out_mapping_overflow; | |
566 | ||
567 | len = min_t(u32, PAGE_SIZE - page_base, remaining); | |
568 | sge[sge_no].addr = ib_dma_map_page(device, *ppages, | |
569 | page_base, len, | |
570 | DMA_TO_DEVICE); | |
571 | if (ib_dma_mapping_error(device, sge[sge_no].addr)) | |
572 | goto out_mapping_err; | |
573 | sge[sge_no].length = len; | |
574 | sge[sge_no].lkey = lkey; | |
575 | ||
576 | req->rl_mapped_sges++; | |
577 | ppages++; | |
578 | remaining -= len; | |
579 | page_base = 0; | |
b38ab40a | 580 | } |
b38ab40a | 581 | } |
655fec69 CL |
582 | |
583 | /* The tail iovec is not always constructed in the same | |
584 | * page where the head iovec resides (see, for example, | |
585 | * gss_wrap_req_priv). To neatly accommodate that case, | |
586 | * DMA map it separately. | |
587 | */ | |
588 | if (xdr->tail[0].iov_len) { | |
589 | page = virt_to_page(xdr->tail[0].iov_base); | |
d933cc32 | 590 | page_base = offset_in_page(xdr->tail[0].iov_base); |
655fec69 CL |
591 | len = xdr->tail[0].iov_len; |
592 | ||
593 | map_tail: | |
594 | sge_no++; | |
595 | sge[sge_no].addr = ib_dma_map_page(device, page, | |
596 | page_base, len, | |
597 | DMA_TO_DEVICE); | |
598 | if (ib_dma_mapping_error(device, sge[sge_no].addr)) | |
599 | goto out_mapping_err; | |
600 | sge[sge_no].length = len; | |
601 | sge[sge_no].lkey = lkey; | |
602 | req->rl_mapped_sges++; | |
e9601828 | 603 | } |
655fec69 CL |
604 | |
605 | out: | |
606 | req->rl_send_wr.num_sge = sge_no + 1; | |
607 | return true; | |
608 | ||
609 | out_mapping_overflow: | |
610 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); | |
611 | return false; | |
612 | ||
613 | out_mapping_err: | |
614 | pr_err("rpcrdma: Send mapping error\n"); | |
615 | return false; | |
616 | } | |
617 | ||
618 | bool | |
619 | rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |
620 | u32 hdrlen, struct xdr_buf *xdr, | |
621 | enum rpcrdma_chunktype rtype) | |
622 | { | |
623 | req->rl_send_wr.num_sge = 0; | |
624 | req->rl_mapped_sges = 0; | |
625 | ||
626 | if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen)) | |
627 | goto out_map; | |
628 | ||
629 | if (rtype != rpcrdma_areadch) | |
630 | if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype)) | |
631 | goto out_map; | |
632 | ||
633 | return true; | |
634 | ||
635 | out_map: | |
636 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); | |
637 | return false; | |
638 | } | |
639 | ||
640 | void | |
641 | rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req) | |
642 | { | |
643 | struct ib_device *device = ia->ri_device; | |
644 | struct ib_sge *sge; | |
645 | int count; | |
646 | ||
647 | sge = &req->rl_send_sge[2]; | |
648 | for (count = req->rl_mapped_sges; count--; sge++) | |
649 | ib_dma_unmap_page(device, sge->addr, sge->length, | |
650 | DMA_TO_DEVICE); | |
651 | req->rl_mapped_sges = 0; | |
e9601828 TT |
652 | } |
653 | ||
654 | /* | |
655 | * Marshal a request: the primary job of this routine is to choose | |
656 | * the transfer modes. See comments below. | |
657 | * | |
c93c6223 | 658 | * Returns zero on success, otherwise a negative errno. |
e9601828 TT |
659 | */ |
660 | ||
661 | int | |
662 | rpcrdma_marshal_req(struct rpc_rqst *rqst) | |
663 | { | |
a4f0835c | 664 | struct rpc_xprt *xprt = rqst->rq_xprt; |
e9601828 TT |
665 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
666 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | |
e2377945 | 667 | enum rpcrdma_chunktype rtype, wtype; |
e9601828 | 668 | struct rpcrdma_msg *headerp; |
65b80179 | 669 | bool ddp_allowed; |
94f58c58 CL |
670 | ssize_t hdrlen; |
671 | size_t rpclen; | |
672 | __be32 *iptr; | |
e9601828 | 673 | |
83128a60 CL |
674 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
675 | if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)) | |
676 | return rpcrdma_bc_marshal_reply(rqst); | |
677 | #endif | |
678 | ||
85275c87 | 679 | headerp = rdmab_to_msg(req->rl_rdmabuf); |
284f4902 | 680 | /* don't byte-swap XID, it's already done in request */ |
e9601828 | 681 | headerp->rm_xid = rqst->rq_xid; |
284f4902 CL |
682 | headerp->rm_vers = rpcrdma_version; |
683 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); | |
684 | headerp->rm_type = rdma_msg; | |
e9601828 | 685 | |
65b80179 CL |
686 | /* When the ULP employs a GSS flavor that guarantees integrity |
687 | * or privacy, direct data placement of individual data items | |
688 | * is not allowed. | |
689 | */ | |
690 | ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags & | |
691 | RPCAUTH_AUTH_DATATOUCH); | |
692 | ||
e9601828 TT |
693 | /* |
694 | * Chunks needed for results? | |
695 | * | |
696 | * o If the expected result is under the inline threshold, all ops | |
33943b29 | 697 | * return as inline. |
cce6deeb CL |
698 | * o Large read ops return data as write chunk(s), header as |
699 | * inline. | |
e9601828 | 700 | * o Large non-read ops return as a single reply chunk. |
e9601828 | 701 | */ |
cce6deeb | 702 | if (rpcrdma_results_inline(r_xprt, rqst)) |
02eb57d8 | 703 | wtype = rpcrdma_noch; |
65b80179 | 704 | else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) |
cce6deeb | 705 | wtype = rpcrdma_writech; |
e9601828 | 706 | else |
e2377945 | 707 | wtype = rpcrdma_replych; |
e9601828 TT |
708 | |
709 | /* | |
710 | * Chunks needed for arguments? | |
711 | * | |
712 | * o If the total request is under the inline threshold, all ops | |
713 | * are sent as inline. | |
e9601828 TT |
714 | * o Large write ops transmit data as read chunk(s), header as |
715 | * inline. | |
2fcc213a CL |
716 | * o Large non-write ops are sent with the entire message as a |
717 | * single read chunk (protocol 0-position special case). | |
e9601828 | 718 | * |
2fcc213a CL |
719 | * This assumes that the upper layer does not present a request |
720 | * that both has a data payload, and whose non-data arguments | |
721 | * by themselves are larger than the inline threshold. | |
e9601828 | 722 | */ |
302d3deb | 723 | if (rpcrdma_args_inline(r_xprt, rqst)) { |
e2377945 | 724 | rtype = rpcrdma_noch; |
655fec69 | 725 | rpclen = rqst->rq_snd_buf.len; |
65b80179 | 726 | } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) { |
e2377945 | 727 | rtype = rpcrdma_readch; |
655fec69 CL |
728 | rpclen = rqst->rq_snd_buf.head[0].iov_len + |
729 | rqst->rq_snd_buf.tail[0].iov_len; | |
2fcc213a | 730 | } else { |
860477d1 | 731 | r_xprt->rx_stats.nomsg_call_count++; |
2fcc213a CL |
732 | headerp->rm_type = htonl(RDMA_NOMSG); |
733 | rtype = rpcrdma_areadch; | |
734 | rpclen = 0; | |
735 | } | |
e9601828 | 736 | |
431af645 CL |
737 | req->rl_xid = rqst->rq_xid; |
738 | rpcrdma_insert_req(&r_xprt->rx_buf, req); | |
739 | ||
94f58c58 CL |
740 | /* This implementation supports the following combinations |
741 | * of chunk lists in one RPC-over-RDMA Call message: | |
742 | * | |
743 | * - Read list | |
744 | * - Write list | |
745 | * - Reply chunk | |
746 | * - Read list + Reply chunk | |
747 | * | |
748 | * It might not yet support the following combinations: | |
749 | * | |
750 | * - Read list + Write list | |
751 | * | |
752 | * It does not support the following combinations: | |
753 | * | |
754 | * - Write list + Reply chunk | |
755 | * - Read list + Write list + Reply chunk | |
756 | * | |
757 | * This implementation supports only a single chunk in each | |
758 | * Read or Write list. Thus for example the client cannot | |
759 | * send a Call message with a Position Zero Read chunk and a | |
760 | * regular Read chunk at the same time. | |
e9601828 | 761 | */ |
94f58c58 CL |
762 | iptr = headerp->rm_body.rm_chunks; |
763 | iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); | |
764 | if (IS_ERR(iptr)) | |
18c0fb31 | 765 | goto out_err; |
94f58c58 CL |
766 | iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); |
767 | if (IS_ERR(iptr)) | |
18c0fb31 | 768 | goto out_err; |
94f58c58 CL |
769 | iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); |
770 | if (IS_ERR(iptr)) | |
18c0fb31 | 771 | goto out_err; |
94f58c58 | 772 | hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; |
e9601828 | 773 | |
94f58c58 CL |
774 | dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", |
775 | rqst->rq_task->tk_pid, __func__, | |
776 | transfertypes[rtype], transfertypes[wtype], | |
777 | hdrlen, rpclen); | |
e9601828 | 778 | |
655fec69 CL |
779 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, |
780 | &rqst->rq_snd_buf, rtype)) { | |
781 | iptr = ERR_PTR(-EIO); | |
18c0fb31 | 782 | goto out_err; |
655fec69 | 783 | } |
e9601828 | 784 | return 0; |
302d3deb | 785 | |
18c0fb31 | 786 | out_err: |
0031e47c CL |
787 | if (PTR_ERR(iptr) != -ENOBUFS) { |
788 | pr_err("rpcrdma: rpcrdma_marshal_req failed, status %ld\n", | |
789 | PTR_ERR(iptr)); | |
790 | r_xprt->rx_stats.failed_marshal_count++; | |
791 | } | |
94f58c58 | 792 | return PTR_ERR(iptr); |
e9601828 TT |
793 | } |
794 | ||
795 | /* | |
796 | * Chase down a received write or reply chunklist to get length | |
797 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | |
798 | */ | |
799 | static int | |
9d6b0409 | 800 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp) |
e9601828 TT |
801 | { |
802 | unsigned int i, total_len; | |
803 | struct rpcrdma_write_chunk *cur_wchunk; | |
6b1184cd | 804 | char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); |
e9601828 | 805 | |
284f4902 | 806 | i = be32_to_cpu(**iptrp); |
e9601828 TT |
807 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); |
808 | total_len = 0; | |
809 | while (i--) { | |
810 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | |
811 | ifdebug(FACILITY) { | |
812 | u64 off; | |
2d8a9726 | 813 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
c351f943 | 814 | dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n", |
e9601828 | 815 | __func__, |
284f4902 | 816 | be32_to_cpu(seg->rs_length), |
e08a132b | 817 | (unsigned long long)off, |
284f4902 | 818 | be32_to_cpu(seg->rs_handle)); |
e9601828 | 819 | } |
284f4902 | 820 | total_len += be32_to_cpu(seg->rs_length); |
e9601828 TT |
821 | ++cur_wchunk; |
822 | } | |
823 | /* check and adjust for properly terminated write chunk */ | |
824 | if (wrchunk) { | |
2d8a9726 | 825 | __be32 *w = (__be32 *) cur_wchunk; |
e9601828 TT |
826 | if (*w++ != xdr_zero) |
827 | return -1; | |
828 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | |
829 | } | |
6b1184cd | 830 | if ((char *)cur_wchunk > base + rep->rr_len) |
e9601828 TT |
831 | return -1; |
832 | ||
2d8a9726 | 833 | *iptrp = (__be32 *) cur_wchunk; |
e9601828 TT |
834 | return total_len; |
835 | } | |
836 | ||
cb0ae1fb CL |
837 | /** |
838 | * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs | |
839 | * @rqst: controlling RPC request | |
840 | * @srcp: points to RPC message payload in receive buffer | |
841 | * @copy_len: remaining length of receive buffer content | |
842 | * @pad: Write chunk pad bytes needed (zero for pure inline) | |
843 | * | |
844 | * The upper layer has set the maximum number of bytes it can | |
845 | * receive in each component of rq_rcv_buf. These values are set in | |
846 | * the head.iov_len, page_len, tail.iov_len, and buflen fields. | |
cfabe2c6 CL |
847 | * |
848 | * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in | |
849 | * many cases this function simply updates iov_base pointers in | |
850 | * rq_rcv_buf to point directly to the received reply data, to | |
851 | * avoid copying reply data. | |
64695bde CL |
852 | * |
853 | * Returns the count of bytes which had to be memcopied. | |
e9601828 | 854 | */ |
64695bde | 855 | static unsigned long |
9191ca3b | 856 | rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) |
e9601828 | 857 | { |
64695bde CL |
858 | unsigned long fixup_copy_count; |
859 | int i, npages, curlen; | |
e9601828 | 860 | char *destp; |
bd7ea31b TT |
861 | struct page **ppages; |
862 | int page_base; | |
e9601828 | 863 | |
cb0ae1fb CL |
864 | /* The head iovec is redirected to the RPC reply message |
865 | * in the receive buffer, to avoid a memcopy. | |
866 | */ | |
867 | rqst->rq_rcv_buf.head[0].iov_base = srcp; | |
cfabe2c6 | 868 | rqst->rq_private_buf.head[0].iov_base = srcp; |
cb0ae1fb CL |
869 | |
870 | /* The contents of the receive buffer that follow | |
871 | * head.iov_len bytes are copied into the page list. | |
872 | */ | |
e9601828 | 873 | curlen = rqst->rq_rcv_buf.head[0].iov_len; |
cb0ae1fb | 874 | if (curlen > copy_len) |
e9601828 | 875 | curlen = copy_len; |
e9601828 TT |
876 | dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", |
877 | __func__, srcp, copy_len, curlen); | |
e9601828 TT |
878 | srcp += curlen; |
879 | copy_len -= curlen; | |
880 | ||
d933cc32 CL |
881 | ppages = rqst->rq_rcv_buf.pages + |
882 | (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT); | |
883 | page_base = offset_in_page(rqst->rq_rcv_buf.page_base); | |
64695bde | 884 | fixup_copy_count = 0; |
e9601828 | 885 | if (copy_len && rqst->rq_rcv_buf.page_len) { |
80414abc CL |
886 | int pagelist_len; |
887 | ||
888 | pagelist_len = rqst->rq_rcv_buf.page_len; | |
889 | if (pagelist_len > copy_len) | |
890 | pagelist_len = copy_len; | |
891 | npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; | |
64695bde | 892 | for (i = 0; i < npages; i++) { |
bd7ea31b | 893 | curlen = PAGE_SIZE - page_base; |
80414abc CL |
894 | if (curlen > pagelist_len) |
895 | curlen = pagelist_len; | |
896 | ||
e9601828 TT |
897 | dprintk("RPC: %s: page %d" |
898 | " srcp 0x%p len %d curlen %d\n", | |
899 | __func__, i, srcp, copy_len, curlen); | |
b8541786 | 900 | destp = kmap_atomic(ppages[i]); |
bd7ea31b TT |
901 | memcpy(destp + page_base, srcp, curlen); |
902 | flush_dcache_page(ppages[i]); | |
b8541786 | 903 | kunmap_atomic(destp); |
e9601828 TT |
904 | srcp += curlen; |
905 | copy_len -= curlen; | |
64695bde | 906 | fixup_copy_count += curlen; |
80414abc CL |
907 | pagelist_len -= curlen; |
908 | if (!pagelist_len) | |
e9601828 | 909 | break; |
bd7ea31b | 910 | page_base = 0; |
e9601828 | 911 | } |
e9601828 | 912 | |
cb0ae1fb CL |
913 | /* Implicit padding for the last segment in a Write |
914 | * chunk is inserted inline at the front of the tail | |
915 | * iovec. The upper layer ignores the content of | |
916 | * the pad. Simply ensure inline content in the tail | |
917 | * that follows the Write chunk is properly aligned. | |
918 | */ | |
919 | if (pad) | |
920 | srcp -= pad; | |
9191ca3b TT |
921 | } |
922 | ||
cb0ae1fb CL |
923 | /* The tail iovec is redirected to the remaining data |
924 | * in the receive buffer, to avoid a memcopy. | |
925 | */ | |
cfabe2c6 | 926 | if (copy_len || pad) { |
cb0ae1fb | 927 | rqst->rq_rcv_buf.tail[0].iov_base = srcp; |
cfabe2c6 CL |
928 | rqst->rq_private_buf.tail[0].iov_base = srcp; |
929 | } | |
cb0ae1fb | 930 | |
64695bde | 931 | return fixup_copy_count; |
e9601828 TT |
932 | } |
933 | ||
4b196dc6 CL |
934 | /* Caller must guarantee @rep remains stable during this call. |
935 | */ | |
936 | static void | |
937 | rpcrdma_mark_remote_invalidation(struct list_head *mws, | |
938 | struct rpcrdma_rep *rep) | |
939 | { | |
940 | struct rpcrdma_mw *mw; | |
941 | ||
942 | if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)) | |
943 | return; | |
944 | ||
945 | list_for_each_entry(mw, mws, mw_list) | |
946 | if (mw->mw_handle == rep->rr_inv_rkey) { | |
947 | mw->mw_flags = RPCRDMA_MW_F_RI; | |
948 | break; /* only one invalidated MR per RPC */ | |
949 | } | |
950 | } | |
951 | ||
63cae470 CL |
952 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
953 | /* By convention, backchannel calls arrive via rdma_msg type | |
954 | * messages, and never populate the chunk lists. This makes | |
955 | * the RPC/RDMA header small and fixed in size, so it is | |
956 | * straightforward to check the RPC header's direction field. | |
957 | */ | |
958 | static bool | |
959 | rpcrdma_is_bcall(struct rpcrdma_msg *headerp) | |
960 | { | |
961 | __be32 *p = (__be32 *)headerp; | |
962 | ||
963 | if (headerp->rm_type != rdma_msg) | |
964 | return false; | |
965 | if (headerp->rm_body.rm_chunks[0] != xdr_zero) | |
966 | return false; | |
967 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) | |
968 | return false; | |
969 | if (headerp->rm_body.rm_chunks[2] != xdr_zero) | |
970 | return false; | |
971 | ||
972 | /* sanity */ | |
973 | if (p[7] != headerp->rm_xid) | |
974 | return false; | |
975 | /* call direction */ | |
976 | if (p[8] != cpu_to_be32(RPC_CALL)) | |
977 | return false; | |
978 | ||
979 | return true; | |
980 | } | |
981 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
982 | ||
fe97b47c CL |
983 | /* Process received RPC/RDMA messages. |
984 | * | |
e9601828 TT |
985 | * Errors must result in the RPC task either being awakened, or |
986 | * allowed to timeout, to discover the errors at that time. | |
987 | */ | |
988 | void | |
496b77a5 | 989 | rpcrdma_reply_handler(struct work_struct *work) |
e9601828 | 990 | { |
496b77a5 CL |
991 | struct rpcrdma_rep *rep = |
992 | container_of(work, struct rpcrdma_rep, rr_work); | |
431af645 CL |
993 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
994 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
995 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; | |
e9601828 TT |
996 | struct rpcrdma_msg *headerp; |
997 | struct rpcrdma_req *req; | |
998 | struct rpc_rqst *rqst; | |
2d8a9726 | 999 | __be32 *iptr; |
59aa1f9a | 1000 | int rdmalen, status, rmerr; |
e7ce710a | 1001 | unsigned long cwnd; |
451d26e1 | 1002 | struct list_head mws; |
e9601828 | 1003 | |
b0e178a2 CL |
1004 | dprintk("RPC: %s: incoming rep %p\n", __func__, rep); |
1005 | ||
1006 | if (rep->rr_len == RPCRDMA_BAD_LEN) | |
1007 | goto out_badstatus; | |
59aa1f9a | 1008 | if (rep->rr_len < RPCRDMA_HDRLEN_ERR) |
b0e178a2 CL |
1009 | goto out_shortreply; |
1010 | ||
6b1184cd | 1011 | headerp = rdmab_to_msg(rep->rr_rdmabuf); |
63cae470 CL |
1012 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1013 | if (rpcrdma_is_bcall(headerp)) | |
1014 | goto out_bcall; | |
1015 | #endif | |
e9601828 | 1016 | |
fe97b47c CL |
1017 | /* Match incoming rpcrdma_rep to an rpcrdma_req to |
1018 | * get context for handling any incoming chunks. | |
1019 | */ | |
431af645 CL |
1020 | spin_lock(&buf->rb_lock); |
1021 | req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf, | |
1022 | headerp->rm_xid); | |
1023 | if (!req) | |
b0e178a2 | 1024 | goto out_nomatch; |
b0e178a2 CL |
1025 | if (req->rl_reply) |
1026 | goto out_duplicate; | |
e9601828 | 1027 | |
451d26e1 CL |
1028 | list_replace_init(&req->rl_registered, &mws); |
1029 | rpcrdma_mark_remote_invalidation(&mws, rep); | |
431af645 CL |
1030 | |
1031 | /* Avoid races with signals and duplicate replies | |
1032 | * by marking this req as matched. | |
1033 | */ | |
4b196dc6 | 1034 | req->rl_reply = rep; |
431af645 CL |
1035 | spin_unlock(&buf->rb_lock); |
1036 | ||
af0f16e8 CL |
1037 | dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", |
1038 | __func__, rep, req, be32_to_cpu(headerp->rm_xid)); | |
e9601828 | 1039 | |
431af645 CL |
1040 | /* Invalidate and unmap the data payloads before waking the |
1041 | * waiting application. This guarantees the memory regions | |
1042 | * are properly fenced from the server before the application | |
1043 | * accesses the data. It also ensures proper send flow control: | |
1044 | * waking the next RPC waits until this RPC has relinquished | |
1045 | * all its Send Queue entries. | |
1046 | */ | |
1047 | if (!list_empty(&mws)) | |
1048 | r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws); | |
e9601828 | 1049 | |
431af645 CL |
1050 | /* Perform XID lookup, reconstruction of the RPC reply, and |
1051 | * RPC completion while holding the transport lock to ensure | |
1052 | * the rep, rqst, and rq_task pointers remain stable. | |
1053 | */ | |
1054 | spin_lock_bh(&xprt->transport_lock); | |
1055 | rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); | |
1056 | if (!rqst) | |
1057 | goto out_norqst; | |
1058 | xprt->reestablish_timeout = 0; | |
59aa1f9a CL |
1059 | if (headerp->rm_vers != rpcrdma_version) |
1060 | goto out_badversion; | |
1061 | ||
e9601828 TT |
1062 | /* check for expected message types */ |
1063 | /* The order of some of these tests is important. */ | |
1064 | switch (headerp->rm_type) { | |
284f4902 | 1065 | case rdma_msg: |
e9601828 TT |
1066 | /* never expect read chunks */ |
1067 | /* never expect reply chunks (two ways to check) */ | |
e9601828 TT |
1068 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
1069 | (headerp->rm_body.rm_chunks[1] == xdr_zero && | |
451d26e1 | 1070 | headerp->rm_body.rm_chunks[2] != xdr_zero)) |
e9601828 TT |
1071 | goto badheader; |
1072 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) { | |
1073 | /* count any expected write chunks in read reply */ | |
1074 | /* start at write chunk array count */ | |
1075 | iptr = &headerp->rm_body.rm_chunks[2]; | |
9d6b0409 | 1076 | rdmalen = rpcrdma_count_chunks(rep, 1, &iptr); |
e9601828 TT |
1077 | /* check for validity, and no reply chunk after */ |
1078 | if (rdmalen < 0 || *iptr++ != xdr_zero) | |
1079 | goto badheader; | |
1080 | rep->rr_len -= | |
1081 | ((unsigned char *)iptr - (unsigned char *)headerp); | |
1082 | status = rep->rr_len + rdmalen; | |
1083 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
9191ca3b TT |
1084 | /* special case - last chunk may omit padding */ |
1085 | if (rdmalen &= 3) { | |
1086 | rdmalen = 4 - rdmalen; | |
1087 | status += rdmalen; | |
1088 | } | |
e9601828 TT |
1089 | } else { |
1090 | /* else ordinary inline */ | |
9191ca3b | 1091 | rdmalen = 0; |
f2846481 CL |
1092 | iptr = (__be32 *)((unsigned char *)headerp + |
1093 | RPCRDMA_HDRLEN_MIN); | |
1094 | rep->rr_len -= RPCRDMA_HDRLEN_MIN; | |
e9601828 TT |
1095 | status = rep->rr_len; |
1096 | } | |
64695bde CL |
1097 | |
1098 | r_xprt->rx_stats.fixup_copy_count += | |
1099 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, | |
1100 | rdmalen); | |
e9601828 TT |
1101 | break; |
1102 | ||
284f4902 | 1103 | case rdma_nomsg: |
e9601828 TT |
1104 | /* never expect read or write chunks, always reply chunks */ |
1105 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | |
1106 | headerp->rm_body.rm_chunks[1] != xdr_zero || | |
451d26e1 | 1107 | headerp->rm_body.rm_chunks[2] != xdr_one) |
e9601828 | 1108 | goto badheader; |
f2846481 CL |
1109 | iptr = (__be32 *)((unsigned char *)headerp + |
1110 | RPCRDMA_HDRLEN_MIN); | |
9d6b0409 | 1111 | rdmalen = rpcrdma_count_chunks(rep, 0, &iptr); |
e9601828 TT |
1112 | if (rdmalen < 0) |
1113 | goto badheader; | |
1114 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
1115 | /* Reply chunk buffer already is the reply vector - no fixup. */ | |
1116 | status = rdmalen; | |
1117 | break; | |
1118 | ||
59aa1f9a CL |
1119 | case rdma_error: |
1120 | goto out_rdmaerr; | |
1121 | ||
e9601828 TT |
1122 | badheader: |
1123 | default: | |
9d6b0409 CL |
1124 | dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n", |
1125 | rqst->rq_task->tk_pid, __func__, | |
1126 | be32_to_cpu(headerp->rm_type)); | |
e9601828 TT |
1127 | status = -EIO; |
1128 | r_xprt->rx_stats.bad_reply_count++; | |
1129 | break; | |
1130 | } | |
1131 | ||
59aa1f9a | 1132 | out: |
e7ce710a | 1133 | cwnd = xprt->cwnd; |
23826c7a | 1134 | xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT; |
e7ce710a CL |
1135 | if (xprt->cwnd > cwnd) |
1136 | xprt_release_rqst_cong(rqst->rq_task); | |
1137 | ||
b0e178a2 | 1138 | xprt_complete_rqst(rqst->rq_task, status); |
fe97b47c | 1139 | spin_unlock_bh(&xprt->transport_lock); |
e9601828 | 1140 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", |
431af645 | 1141 | __func__, xprt, rqst, status); |
b0e178a2 CL |
1142 | return; |
1143 | ||
1144 | out_badstatus: | |
1145 | rpcrdma_recv_buffer_put(rep); | |
1146 | if (r_xprt->rx_ep.rep_connected == 1) { | |
1147 | r_xprt->rx_ep.rep_connected = -EIO; | |
1148 | rpcrdma_conn_func(&r_xprt->rx_ep); | |
1149 | } | |
1150 | return; | |
1151 | ||
63cae470 CL |
1152 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
1153 | out_bcall: | |
1154 | rpcrdma_bc_receive_call(r_xprt, rep); | |
1155 | return; | |
1156 | #endif | |
1157 | ||
59aa1f9a CL |
1158 | /* If the incoming reply terminated a pending RPC, the next |
1159 | * RPC call will post a replacement receive buffer as it is | |
1160 | * being marshaled. | |
1161 | */ | |
b0e178a2 CL |
1162 | out_badversion: |
1163 | dprintk("RPC: %s: invalid version %d\n", | |
1164 | __func__, be32_to_cpu(headerp->rm_vers)); | |
59aa1f9a CL |
1165 | status = -EIO; |
1166 | r_xprt->rx_stats.bad_reply_count++; | |
1167 | goto out; | |
1168 | ||
1169 | out_rdmaerr: | |
1170 | rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err); | |
1171 | switch (rmerr) { | |
1172 | case ERR_VERS: | |
1173 | pr_err("%s: server reports header version error (%u-%u)\n", | |
1174 | __func__, | |
1175 | be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low), | |
1176 | be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high)); | |
1177 | break; | |
1178 | case ERR_CHUNK: | |
1179 | pr_err("%s: server reports header decoding error\n", | |
1180 | __func__); | |
1181 | break; | |
1182 | default: | |
1183 | pr_err("%s: server reports unknown error %d\n", | |
1184 | __func__, rmerr); | |
1185 | } | |
1186 | status = -EREMOTEIO; | |
1187 | r_xprt->rx_stats.bad_reply_count++; | |
1188 | goto out; | |
1189 | ||
431af645 CL |
1190 | /* The req was still available, but by the time the transport_lock |
1191 | * was acquired, the rqst and task had been released. Thus the RPC | |
1192 | * has already been terminated. | |
59aa1f9a | 1193 | */ |
431af645 CL |
1194 | out_norqst: |
1195 | spin_unlock_bh(&xprt->transport_lock); | |
1196 | rpcrdma_buffer_put(req); | |
1197 | dprintk("RPC: %s: race, no rqst left for req %p\n", | |
1198 | __func__, req); | |
1199 | return; | |
1200 | ||
59aa1f9a CL |
1201 | out_shortreply: |
1202 | dprintk("RPC: %s: short/invalid reply\n", __func__); | |
b0e178a2 CL |
1203 | goto repost; |
1204 | ||
1205 | out_nomatch: | |
431af645 | 1206 | spin_unlock(&buf->rb_lock); |
b0e178a2 CL |
1207 | dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n", |
1208 | __func__, be32_to_cpu(headerp->rm_xid), | |
1209 | rep->rr_len); | |
1210 | goto repost; | |
1211 | ||
1212 | out_duplicate: | |
431af645 | 1213 | spin_unlock(&buf->rb_lock); |
b0e178a2 CL |
1214 | dprintk("RPC: %s: " |
1215 | "duplicate reply %p to RPC request %p: xid 0x%08x\n", | |
1216 | __func__, rep, req, be32_to_cpu(headerp->rm_xid)); | |
1217 | ||
431af645 CL |
1218 | /* If no pending RPC transaction was matched, post a replacement |
1219 | * receive buffer before returning. | |
1220 | */ | |
b0e178a2 CL |
1221 | repost: |
1222 | r_xprt->rx_stats.bad_reply_count++; | |
b157380a | 1223 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) |
b0e178a2 | 1224 | rpcrdma_recv_buffer_put(rep); |
e9601828 | 1225 | } |