]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/sunrpc/xprtrdma/transport.c
ACPI / Fan: Use bus id as the name for non PNP0C0B (Fan) devices
[mirror_ubuntu-focal-kernel.git] / net / sunrpc / xprtrdma / transport.c
1 /*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * transport.c
42 *
43 * This file contains the top-level implementation of an RPC RDMA
44 * transport.
45 *
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
48 */
49
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/slab.h>
53 #include <linux/seq_file.h>
54 #include <linux/sunrpc/addr.h>
55
56 #include "xprt_rdma.h"
57
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY RPCDBG_TRANS
60 #endif
61
62 MODULE_LICENSE("Dual BSD/GPL");
63
64 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
65 MODULE_AUTHOR("Network Appliance, Inc.");
66
67 /*
68 * tunables
69 */
70
71 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
72 static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
73 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
74 static unsigned int xprt_rdma_inline_write_padding;
75 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
76 int xprt_rdma_pad_optimize = 0;
77
78 #ifdef RPC_DEBUG
79
80 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
82 static unsigned int zero;
83 static unsigned int max_padding = PAGE_SIZE;
84 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
85 static unsigned int max_memreg = RPCRDMA_LAST - 1;
86
87 static struct ctl_table_header *sunrpc_table_header;
88
89 static struct ctl_table xr_tunables_table[] = {
90 {
91 .procname = "rdma_slot_table_entries",
92 .data = &xprt_rdma_slot_table_entries,
93 .maxlen = sizeof(unsigned int),
94 .mode = 0644,
95 .proc_handler = proc_dointvec_minmax,
96 .extra1 = &min_slot_table_size,
97 .extra2 = &max_slot_table_size
98 },
99 {
100 .procname = "rdma_max_inline_read",
101 .data = &xprt_rdma_max_inline_read,
102 .maxlen = sizeof(unsigned int),
103 .mode = 0644,
104 .proc_handler = proc_dointvec,
105 },
106 {
107 .procname = "rdma_max_inline_write",
108 .data = &xprt_rdma_max_inline_write,
109 .maxlen = sizeof(unsigned int),
110 .mode = 0644,
111 .proc_handler = proc_dointvec,
112 },
113 {
114 .procname = "rdma_inline_write_padding",
115 .data = &xprt_rdma_inline_write_padding,
116 .maxlen = sizeof(unsigned int),
117 .mode = 0644,
118 .proc_handler = proc_dointvec_minmax,
119 .extra1 = &zero,
120 .extra2 = &max_padding,
121 },
122 {
123 .procname = "rdma_memreg_strategy",
124 .data = &xprt_rdma_memreg_strategy,
125 .maxlen = sizeof(unsigned int),
126 .mode = 0644,
127 .proc_handler = proc_dointvec_minmax,
128 .extra1 = &min_memreg,
129 .extra2 = &max_memreg,
130 },
131 {
132 .procname = "rdma_pad_optimize",
133 .data = &xprt_rdma_pad_optimize,
134 .maxlen = sizeof(unsigned int),
135 .mode = 0644,
136 .proc_handler = proc_dointvec,
137 },
138 { },
139 };
140
141 static struct ctl_table sunrpc_table[] = {
142 {
143 .procname = "sunrpc",
144 .mode = 0555,
145 .child = xr_tunables_table
146 },
147 { },
148 };
149
150 #endif
151
152 #define RPCRDMA_BIND_TO (60U * HZ)
153 #define RPCRDMA_INIT_REEST_TO (5U * HZ)
154 #define RPCRDMA_MAX_REEST_TO (30U * HZ)
155 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
156
157 static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
158
159 static void
160 xprt_rdma_format_addresses(struct rpc_xprt *xprt)
161 {
162 struct sockaddr *sap = (struct sockaddr *)
163 &rpcx_to_rdmad(xprt).addr;
164 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
165 char buf[64];
166
167 (void)rpc_ntop(sap, buf, sizeof(buf));
168 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
169
170 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
171 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
172
173 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
174
175 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
176 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
177
178 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
179 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
180
181 /* netid */
182 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
183 }
184
185 static void
186 xprt_rdma_free_addresses(struct rpc_xprt *xprt)
187 {
188 unsigned int i;
189
190 for (i = 0; i < RPC_DISPLAY_MAX; i++)
191 switch (i) {
192 case RPC_DISPLAY_PROTO:
193 case RPC_DISPLAY_NETID:
194 continue;
195 default:
196 kfree(xprt->address_strings[i]);
197 }
198 }
199
200 static void
201 xprt_rdma_connect_worker(struct work_struct *work)
202 {
203 struct rpcrdma_xprt *r_xprt =
204 container_of(work, struct rpcrdma_xprt, rdma_connect.work);
205 struct rpc_xprt *xprt = &r_xprt->xprt;
206 int rc = 0;
207
208 xprt_clear_connected(xprt);
209
210 dprintk("RPC: %s: %sconnect\n", __func__,
211 r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
212 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
213 if (rc)
214 xprt_wake_pending_tasks(xprt, rc);
215
216 dprintk("RPC: %s: exit\n", __func__);
217 xprt_clear_connecting(xprt);
218 }
219
220 /*
221 * xprt_rdma_destroy
222 *
223 * Destroy the xprt.
224 * Free all memory associated with the object, including its own.
225 * NOTE: none of the *destroy methods free memory for their top-level
226 * objects, even though they may have allocated it (they do free
227 * private memory). It's up to the caller to handle it. In this
228 * case (RDMA transport), all structure memory is inlined with the
229 * struct rpcrdma_xprt.
230 */
231 static void
232 xprt_rdma_destroy(struct rpc_xprt *xprt)
233 {
234 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
235
236 dprintk("RPC: %s: called\n", __func__);
237
238 cancel_delayed_work_sync(&r_xprt->rdma_connect);
239
240 xprt_clear_connected(xprt);
241
242 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
243 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
244 rpcrdma_ia_close(&r_xprt->rx_ia);
245
246 xprt_rdma_free_addresses(xprt);
247
248 xprt_free(xprt);
249
250 dprintk("RPC: %s: returning\n", __func__);
251
252 module_put(THIS_MODULE);
253 }
254
255 static const struct rpc_timeout xprt_rdma_default_timeout = {
256 .to_initval = 60 * HZ,
257 .to_maxval = 60 * HZ,
258 };
259
260 /**
261 * xprt_setup_rdma - Set up transport to use RDMA
262 *
263 * @args: rpc transport arguments
264 */
265 static struct rpc_xprt *
266 xprt_setup_rdma(struct xprt_create *args)
267 {
268 struct rpcrdma_create_data_internal cdata;
269 struct rpc_xprt *xprt;
270 struct rpcrdma_xprt *new_xprt;
271 struct rpcrdma_ep *new_ep;
272 struct sockaddr_in *sin;
273 int rc;
274
275 if (args->addrlen > sizeof(xprt->addr)) {
276 dprintk("RPC: %s: address too large\n", __func__);
277 return ERR_PTR(-EBADF);
278 }
279
280 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
281 xprt_rdma_slot_table_entries,
282 xprt_rdma_slot_table_entries);
283 if (xprt == NULL) {
284 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
285 __func__);
286 return ERR_PTR(-ENOMEM);
287 }
288
289 /* 60 second timeout, no retries */
290 xprt->timeout = &xprt_rdma_default_timeout;
291 xprt->bind_timeout = RPCRDMA_BIND_TO;
292 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
293 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
294
295 xprt->resvport = 0; /* privileged port not needed */
296 xprt->tsh_size = 0; /* RPC-RDMA handles framing */
297 xprt->ops = &xprt_rdma_procs;
298
299 /*
300 * Set up RDMA-specific connect data.
301 */
302
303 /* Put server RDMA address in local cdata */
304 memcpy(&cdata.addr, args->dstaddr, args->addrlen);
305
306 /* Ensure xprt->addr holds valid server TCP (not RDMA)
307 * address, for any side protocols which peek at it */
308 xprt->prot = IPPROTO_TCP;
309 xprt->addrlen = args->addrlen;
310 memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
311
312 sin = (struct sockaddr_in *)&cdata.addr;
313 if (ntohs(sin->sin_port) != 0)
314 xprt_set_bound(xprt);
315
316 dprintk("RPC: %s: %pI4:%u\n",
317 __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
318
319 /* Set max requests */
320 cdata.max_requests = xprt->max_reqs;
321
322 /* Set some length limits */
323 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
324 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
325
326 cdata.inline_wsize = xprt_rdma_max_inline_write;
327 if (cdata.inline_wsize > cdata.wsize)
328 cdata.inline_wsize = cdata.wsize;
329
330 cdata.inline_rsize = xprt_rdma_max_inline_read;
331 if (cdata.inline_rsize > cdata.rsize)
332 cdata.inline_rsize = cdata.rsize;
333
334 cdata.padding = xprt_rdma_inline_write_padding;
335
336 /*
337 * Create new transport instance, which includes initialized
338 * o ia
339 * o endpoint
340 * o buffers
341 */
342
343 new_xprt = rpcx_to_rdmax(xprt);
344
345 rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
346 xprt_rdma_memreg_strategy);
347 if (rc)
348 goto out1;
349
350 /*
351 * initialize and create ep
352 */
353 new_xprt->rx_data = cdata;
354 new_ep = &new_xprt->rx_ep;
355 new_ep->rep_remote_addr = cdata.addr;
356
357 rc = rpcrdma_ep_create(&new_xprt->rx_ep,
358 &new_xprt->rx_ia, &new_xprt->rx_data);
359 if (rc)
360 goto out2;
361
362 /*
363 * Allocate pre-registered send and receive buffers for headers and
364 * any inline data. Also specify any padding which will be provided
365 * from a preregistered zero buffer.
366 */
367 rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia,
368 &new_xprt->rx_data);
369 if (rc)
370 goto out3;
371
372 /*
373 * Register a callback for connection events. This is necessary because
374 * connection loss notification is async. We also catch connection loss
375 * when reaping receives.
376 */
377 INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker);
378 new_ep->rep_func = rpcrdma_conn_func;
379 new_ep->rep_xprt = xprt;
380
381 xprt_rdma_format_addresses(xprt);
382 xprt->max_payload = rpcrdma_max_payload(new_xprt);
383 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
384 __func__, xprt->max_payload);
385
386 if (!try_module_get(THIS_MODULE))
387 goto out4;
388
389 return xprt;
390
391 out4:
392 xprt_rdma_free_addresses(xprt);
393 rc = -EINVAL;
394 out3:
395 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
396 out2:
397 rpcrdma_ia_close(&new_xprt->rx_ia);
398 out1:
399 xprt_free(xprt);
400 return ERR_PTR(rc);
401 }
402
403 /*
404 * Close a connection, during shutdown or timeout/reconnect
405 */
406 static void
407 xprt_rdma_close(struct rpc_xprt *xprt)
408 {
409 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
410
411 dprintk("RPC: %s: closing\n", __func__);
412 if (r_xprt->rx_ep.rep_connected > 0)
413 xprt->reestablish_timeout = 0;
414 xprt_disconnect_done(xprt);
415 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
416 }
417
418 static void
419 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
420 {
421 struct sockaddr_in *sap;
422
423 sap = (struct sockaddr_in *)&xprt->addr;
424 sap->sin_port = htons(port);
425 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
426 sap->sin_port = htons(port);
427 dprintk("RPC: %s: %u\n", __func__, port);
428 }
429
430 static void
431 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
432 {
433 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
434
435 if (r_xprt->rx_ep.rep_connected != 0) {
436 /* Reconnect */
437 schedule_delayed_work(&r_xprt->rdma_connect,
438 xprt->reestablish_timeout);
439 xprt->reestablish_timeout <<= 1;
440 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
441 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
442 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
443 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
444 } else {
445 schedule_delayed_work(&r_xprt->rdma_connect, 0);
446 if (!RPC_IS_ASYNC(task))
447 flush_delayed_work(&r_xprt->rdma_connect);
448 }
449 }
450
451 /*
452 * The RDMA allocate/free functions need the task structure as a place
453 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
454 * sequence. For this reason, the recv buffers are attached to send
455 * buffers for portions of the RPC. Note that the RPC layer allocates
456 * both send and receive buffers in the same call. We may register
457 * the receive buffer portion when using reply chunks.
458 */
459 static void *
460 xprt_rdma_allocate(struct rpc_task *task, size_t size)
461 {
462 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
463 struct rpcrdma_req *req, *nreq;
464
465 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
466 if (req == NULL)
467 return NULL;
468
469 if (size > req->rl_size) {
470 dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
471 "prog %d vers %d proc %d\n",
472 __func__, size, req->rl_size,
473 task->tk_client->cl_prog, task->tk_client->cl_vers,
474 task->tk_msg.rpc_proc->p_proc);
475 /*
476 * Outgoing length shortage. Our inline write max must have
477 * been configured to perform direct i/o.
478 *
479 * This is therefore a large metadata operation, and the
480 * allocate call was made on the maximum possible message,
481 * e.g. containing long filename(s) or symlink data. In
482 * fact, while these metadata operations *might* carry
483 * large outgoing payloads, they rarely *do*. However, we
484 * have to commit to the request here, so reallocate and
485 * register it now. The data path will never require this
486 * reallocation.
487 *
488 * If the allocation or registration fails, the RPC framework
489 * will (doggedly) retry.
490 */
491 if (task->tk_flags & RPC_TASK_SWAPPER)
492 nreq = kmalloc(sizeof *req + size, GFP_ATOMIC);
493 else
494 nreq = kmalloc(sizeof *req + size, GFP_NOFS);
495 if (nreq == NULL)
496 goto outfail;
497
498 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia,
499 nreq->rl_base, size + sizeof(struct rpcrdma_req)
500 - offsetof(struct rpcrdma_req, rl_base),
501 &nreq->rl_handle, &nreq->rl_iov)) {
502 kfree(nreq);
503 goto outfail;
504 }
505 rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size;
506 nreq->rl_size = size;
507 nreq->rl_niovs = 0;
508 nreq->rl_nchunks = 0;
509 nreq->rl_buffer = (struct rpcrdma_buffer *)req;
510 nreq->rl_reply = req->rl_reply;
511 memcpy(nreq->rl_segments,
512 req->rl_segments, sizeof nreq->rl_segments);
513 /* flag the swap with an unused field */
514 nreq->rl_iov.length = 0;
515 req->rl_reply = NULL;
516 req = nreq;
517 }
518 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
519 req->rl_connect_cookie = 0; /* our reserved value */
520 return req->rl_xdr_buf;
521
522 outfail:
523 rpcrdma_buffer_put(req);
524 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
525 return NULL;
526 }
527
528 /*
529 * This function returns all RDMA resources to the pool.
530 */
531 static void
532 xprt_rdma_free(void *buffer)
533 {
534 struct rpcrdma_req *req;
535 struct rpcrdma_xprt *r_xprt;
536 struct rpcrdma_rep *rep;
537 int i;
538
539 if (buffer == NULL)
540 return;
541
542 req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]);
543 if (req->rl_iov.length == 0) { /* see allocate above */
544 r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer,
545 struct rpcrdma_xprt, rx_buf);
546 } else
547 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
548 rep = req->rl_reply;
549
550 dprintk("RPC: %s: called on 0x%p%s\n",
551 __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : "");
552
553 /*
554 * Finish the deregistration. The process is considered
555 * complete when the rr_func vector becomes NULL - this
556 * was put in place during rpcrdma_reply_handler() - the wait
557 * call below will not block if the dereg is "done". If
558 * interrupted, our framework will clean up.
559 */
560 for (i = 0; req->rl_nchunks;) {
561 --req->rl_nchunks;
562 i += rpcrdma_deregister_external(
563 &req->rl_segments[i], r_xprt);
564 }
565
566 if (req->rl_iov.length == 0) { /* see allocate above */
567 struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer;
568 oreq->rl_reply = req->rl_reply;
569 (void) rpcrdma_deregister_internal(&r_xprt->rx_ia,
570 req->rl_handle,
571 &req->rl_iov);
572 kfree(req);
573 req = oreq;
574 }
575
576 /* Put back request+reply buffers */
577 rpcrdma_buffer_put(req);
578 }
579
580 /*
581 * send_request invokes the meat of RPC RDMA. It must do the following:
582 * 1. Marshal the RPC request into an RPC RDMA request, which means
583 * putting a header in front of data, and creating IOVs for RDMA
584 * from those in the request.
585 * 2. In marshaling, detect opportunities for RDMA, and use them.
586 * 3. Post a recv message to set up asynch completion, then send
587 * the request (rpcrdma_ep_post).
588 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
589 */
590
591 static int
592 xprt_rdma_send_request(struct rpc_task *task)
593 {
594 struct rpc_rqst *rqst = task->tk_rqstp;
595 struct rpc_xprt *xprt = rqst->rq_xprt;
596 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
597 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
598 int rc = 0;
599
600 if (req->rl_niovs == 0)
601 rc = rpcrdma_marshal_req(rqst);
602 else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
603 rc = rpcrdma_marshal_chunks(rqst, 0);
604 if (rc < 0)
605 goto failed_marshal;
606
607 if (req->rl_reply == NULL) /* e.g. reconnection */
608 rpcrdma_recv_buffer_get(req);
609
610 if (req->rl_reply) {
611 req->rl_reply->rr_func = rpcrdma_reply_handler;
612 /* this need only be done once, but... */
613 req->rl_reply->rr_xprt = xprt;
614 }
615
616 /* Must suppress retransmit to maintain credits */
617 if (req->rl_connect_cookie == xprt->connect_cookie)
618 goto drop_connection;
619 req->rl_connect_cookie = xprt->connect_cookie;
620
621 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
622 goto drop_connection;
623
624 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
625 rqst->rq_bytes_sent = 0;
626 return 0;
627
628 failed_marshal:
629 r_xprt->rx_stats.failed_marshal_count++;
630 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n",
631 __func__, rc);
632 if (rc == -EIO)
633 return -EIO;
634 drop_connection:
635 xprt_disconnect_done(xprt);
636 return -ENOTCONN; /* implies disconnect */
637 }
638
639 static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
640 {
641 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
642 long idle_time = 0;
643
644 if (xprt_connected(xprt))
645 idle_time = (long)(jiffies - xprt->last_used) / HZ;
646
647 seq_printf(seq,
648 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
649 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
650
651 0, /* need a local port? */
652 xprt->stat.bind_count,
653 xprt->stat.connect_count,
654 xprt->stat.connect_time,
655 idle_time,
656 xprt->stat.sends,
657 xprt->stat.recvs,
658 xprt->stat.bad_xids,
659 xprt->stat.req_u,
660 xprt->stat.bklog_u,
661
662 r_xprt->rx_stats.read_chunk_count,
663 r_xprt->rx_stats.write_chunk_count,
664 r_xprt->rx_stats.reply_chunk_count,
665 r_xprt->rx_stats.total_rdma_request,
666 r_xprt->rx_stats.total_rdma_reply,
667 r_xprt->rx_stats.pullup_copy_count,
668 r_xprt->rx_stats.fixup_copy_count,
669 r_xprt->rx_stats.hardway_register_count,
670 r_xprt->rx_stats.failed_marshal_count,
671 r_xprt->rx_stats.bad_reply_count);
672 }
673
674 /*
675 * Plumbing for rpc transport switch and kernel module
676 */
677
678 static struct rpc_xprt_ops xprt_rdma_procs = {
679 .reserve_xprt = xprt_reserve_xprt_cong,
680 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
681 .alloc_slot = xprt_alloc_slot,
682 .release_request = xprt_release_rqst_cong, /* ditto */
683 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
684 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
685 .set_port = xprt_rdma_set_port,
686 .connect = xprt_rdma_connect,
687 .buf_alloc = xprt_rdma_allocate,
688 .buf_free = xprt_rdma_free,
689 .send_request = xprt_rdma_send_request,
690 .close = xprt_rdma_close,
691 .destroy = xprt_rdma_destroy,
692 .print_stats = xprt_rdma_print_stats
693 };
694
695 static struct xprt_class xprt_rdma = {
696 .list = LIST_HEAD_INIT(xprt_rdma.list),
697 .name = "rdma",
698 .owner = THIS_MODULE,
699 .ident = XPRT_TRANSPORT_RDMA,
700 .setup = xprt_setup_rdma,
701 };
702
703 static void __exit xprt_rdma_cleanup(void)
704 {
705 int rc;
706
707 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
708 #ifdef RPC_DEBUG
709 if (sunrpc_table_header) {
710 unregister_sysctl_table(sunrpc_table_header);
711 sunrpc_table_header = NULL;
712 }
713 #endif
714 rc = xprt_unregister_transport(&xprt_rdma);
715 if (rc)
716 dprintk("RPC: %s: xprt_unregister returned %i\n",
717 __func__, rc);
718 }
719
720 static int __init xprt_rdma_init(void)
721 {
722 int rc;
723
724 rc = xprt_register_transport(&xprt_rdma);
725
726 if (rc)
727 return rc;
728
729 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
730
731 dprintk("Defaults:\n");
732 dprintk("\tSlots %d\n"
733 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
734 xprt_rdma_slot_table_entries,
735 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
736 dprintk("\tPadding %d\n\tMemreg %d\n",
737 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
738
739 #ifdef RPC_DEBUG
740 if (!sunrpc_table_header)
741 sunrpc_table_header = register_sysctl_table(sunrpc_table);
742 #endif
743 return 0;
744 }
745
746 module_init(xprt_rdma_init);
747 module_exit(xprt_rdma_cleanup);