]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/rds/ib_send.c
RDS: Clear up some confusing code in send_remove_from_sock
[mirror_ubuntu-bionic-kernel.git] / net / rds / ib_send.c
CommitLineData
6a0979df
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <linux/device.h>
36#include <linux/dmapool.h>
37
38#include "rds.h"
6a0979df
AG
39#include "ib.h"
40
41static void rds_ib_send_rdma_complete(struct rds_message *rm,
42 int wc_status)
43{
44 int notify_status;
45
46 switch (wc_status) {
47 case IB_WC_WR_FLUSH_ERR:
48 return;
49
50 case IB_WC_SUCCESS:
51 notify_status = RDS_RDMA_SUCCESS;
52 break;
53
54 case IB_WC_REM_ACCESS_ERR:
55 notify_status = RDS_RDMA_REMOTE_ERROR;
56 break;
57
58 default:
59 notify_status = RDS_RDMA_OTHER_ERROR;
60 break;
61 }
62 rds_rdma_send_complete(rm, notify_status);
63}
64
65static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
66 struct rds_rdma_op *op)
67{
68 if (op->r_mapped) {
69 ib_dma_unmap_sg(ic->i_cm_id->device,
70 op->r_sg, op->r_nents,
71 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72 op->r_mapped = 0;
73 }
74}
75
76static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
77 struct rds_ib_send_work *send,
78 int wc_status)
79{
80 struct rds_message *rm = send->s_rm;
81
82 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
83
84 ib_dma_unmap_sg(ic->i_cm_id->device,
e779137a
AG
85 rm->data.m_sg, rm->data.m_nents,
86 DMA_TO_DEVICE);
6a0979df 87
ff87e97a
AG
88 if (rm->rdma.m_rdma_op.r_active) {
89 rds_ib_send_unmap_rdma(ic, &rm->rdma.m_rdma_op);
6a0979df
AG
90
91 /* If the user asked for a completion notification on this
92 * message, we can implement three different semantics:
93 * 1. Notify when we received the ACK on the RDS message
94 * that was queued with the RDMA. This provides reliable
95 * notification of RDMA status at the expense of a one-way
96 * packet delay.
97 * 2. Notify when the IB stack gives us the completion event for
98 * the RDMA operation.
99 * 3. Notify when the IB stack gives us the completion event for
100 * the accompanying RDS messages.
101 * Here, we implement approach #3. To implement approach #2,
102 * call rds_rdma_send_complete from the cq_handler. To implement #1,
103 * don't call rds_rdma_send_complete at all, and fall back to the notify
104 * handling in the ACK processing code.
105 *
106 * Note: There's no need to explicitly sync any RDMA buffers using
107 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
108 * operation itself unmapped the RDMA buffers, which takes care
109 * of synching.
110 */
111 rds_ib_send_rdma_complete(rm, wc_status);
112
ff87e97a
AG
113 if (rm->rdma.m_rdma_op.r_write)
114 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
6a0979df 115 else
ff87e97a 116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
6a0979df
AG
117 }
118
119 /* If anyone waited for this message to get flushed out, wake
120 * them up now */
121 rds_message_unmapped(rm);
122
123 rds_message_put(rm);
124 send->s_rm = NULL;
125}
126
127void rds_ib_send_init_ring(struct rds_ib_connection *ic)
128{
129 struct rds_ib_send_work *send;
130 u32 i;
131
132 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
133 struct ib_sge *sge;
134
135 send->s_rm = NULL;
136 send->s_op = NULL;
137
138 send->s_wr.wr_id = i;
139 send->s_wr.sg_list = send->s_sge;
140 send->s_wr.num_sge = 1;
141 send->s_wr.opcode = IB_WR_SEND;
142 send->s_wr.send_flags = 0;
143 send->s_wr.ex.imm_data = 0;
144
145 sge = rds_ib_data_sge(ic, send->s_sge);
146 sge->lkey = ic->i_mr->lkey;
147
148 sge = rds_ib_header_sge(ic, send->s_sge);
149 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
150 sge->length = sizeof(struct rds_header);
151 sge->lkey = ic->i_mr->lkey;
152 }
153}
154
155void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
156{
157 struct rds_ib_send_work *send;
158 u32 i;
159
160 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
161 if (send->s_wr.opcode == 0xdead)
162 continue;
163 if (send->s_rm)
164 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
165 if (send->s_op)
166 rds_ib_send_unmap_rdma(ic, send->s_op);
167 }
168}
169
170/*
171 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
172 * operations performed in the send path. As the sender allocs and potentially
173 * unallocs the next free entry in the ring it doesn't alter which is
174 * the next to be freed, which is what this is concerned with.
175 */
176void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
177{
178 struct rds_connection *conn = context;
179 struct rds_ib_connection *ic = conn->c_transport_data;
180 struct ib_wc wc;
181 struct rds_ib_send_work *send;
182 u32 completed;
183 u32 oldest;
184 u32 i = 0;
185 int ret;
186
187 rdsdebug("cq %p conn %p\n", cq, conn);
188 rds_ib_stats_inc(s_ib_tx_cq_call);
189 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
190 if (ret)
191 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
192
193 while (ib_poll_cq(cq, 1, &wc) > 0) {
194 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
195 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
196 be32_to_cpu(wc.ex.imm_data));
197 rds_ib_stats_inc(s_ib_tx_cq_event);
198
199 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
200 if (ic->i_ack_queued + HZ/2 < jiffies)
201 rds_ib_stats_inc(s_ib_tx_stalled);
202 rds_ib_ack_send_complete(ic);
203 continue;
204 }
205
206 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
207
208 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
209
210 for (i = 0; i < completed; i++) {
211 send = &ic->i_sends[oldest];
212
213 /* In the error case, wc.opcode sometimes contains garbage */
214 switch (send->s_wr.opcode) {
215 case IB_WR_SEND:
216 if (send->s_rm)
217 rds_ib_send_unmap_rm(ic, send, wc.status);
218 break;
219 case IB_WR_RDMA_WRITE:
220 case IB_WR_RDMA_READ:
221 /* Nothing to be done - the SG list will be unmapped
222 * when the SEND completes. */
223 break;
224 default:
225 if (printk_ratelimit())
226 printk(KERN_NOTICE
227 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
228 __func__, send->s_wr.opcode);
229 break;
230 }
231
232 send->s_wr.opcode = 0xdead;
233 send->s_wr.num_sge = 1;
234 if (send->s_queued + HZ/2 < jiffies)
235 rds_ib_stats_inc(s_ib_tx_stalled);
236
237 /* If a RDMA operation produced an error, signal this right
238 * away. If we don't, the subsequent SEND that goes with this
239 * RDMA will be canceled with ERR_WFLUSH, and the application
240 * never learn that the RDMA failed. */
241 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
242 struct rds_message *rm;
243
244 rm = rds_send_get_message(conn, send->s_op);
450d06c0 245 if (rm) {
ff87e97a
AG
246 if (rm->rdma.m_rdma_op.r_active)
247 rds_ib_send_unmap_rdma(ic, &rm->rdma.m_rdma_op);
6a0979df 248 rds_ib_send_rdma_complete(rm, wc.status);
450d06c0
SP
249 rds_message_put(rm);
250 }
6a0979df
AG
251 }
252
253 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
254 }
255
256 rds_ib_ring_free(&ic->i_send_ring, completed);
257
f64f9e71
JP
258 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
259 test_bit(0, &conn->c_map_queued))
6a0979df
AG
260 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
261
262 /* We expect errors as the qp is drained during shutdown */
263 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
264 rds_ib_conn_error(conn,
265 "send completion on %pI4 "
266 "had status %u, disconnecting and reconnecting\n",
267 &conn->c_faddr, wc.status);
268 }
269 }
270}
271
272/*
273 * This is the main function for allocating credits when sending
274 * messages.
275 *
276 * Conceptually, we have two counters:
277 * - send credits: this tells us how many WRs we're allowed
278 * to submit without overruning the reciever's queue. For
279 * each SEND WR we post, we decrement this by one.
280 *
281 * - posted credits: this tells us how many WRs we recently
282 * posted to the receive queue. This value is transferred
283 * to the peer as a "credit update" in a RDS header field.
284 * Every time we transmit credits to the peer, we subtract
285 * the amount of transferred credits from this counter.
286 *
287 * It is essential that we avoid situations where both sides have
288 * exhausted their send credits, and are unable to send new credits
289 * to the peer. We achieve this by requiring that we send at least
290 * one credit update to the peer before exhausting our credits.
291 * When new credits arrive, we subtract one credit that is withheld
292 * until we've posted new buffers and are ready to transmit these
293 * credits (see rds_ib_send_add_credits below).
294 *
295 * The RDS send code is essentially single-threaded; rds_send_xmit
296 * grabs c_send_lock to ensure exclusive access to the send ring.
297 * However, the ACK sending code is independent and can race with
298 * message SENDs.
299 *
300 * In the send path, we need to update the counters for send credits
301 * and the counter of posted buffers atomically - when we use the
302 * last available credit, we cannot allow another thread to race us
303 * and grab the posted credits counter. Hence, we have to use a
304 * spinlock to protect the credit counter, or use atomics.
305 *
306 * Spinlocks shared between the send and the receive path are bad,
307 * because they create unnecessary delays. An early implementation
308 * using a spinlock showed a 5% degradation in throughput at some
309 * loads.
310 *
311 * This implementation avoids spinlocks completely, putting both
312 * counters into a single atomic, and updating that atomic using
313 * atomic_add (in the receive path, when receiving fresh credits),
314 * and using atomic_cmpxchg when updating the two counters.
315 */
316int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
7b70d033 317 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
6a0979df
AG
318{
319 unsigned int avail, posted, got = 0, advertise;
320 long oldval, newval;
321
322 *adv_credits = 0;
323 if (!ic->i_flowctl)
324 return wanted;
325
326try_again:
327 advertise = 0;
328 oldval = newval = atomic_read(&ic->i_credits);
329 posted = IB_GET_POST_CREDITS(oldval);
330 avail = IB_GET_SEND_CREDITS(oldval);
331
332 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
333 wanted, avail, posted);
334
335 /* The last credit must be used to send a credit update. */
336 if (avail && !posted)
337 avail--;
338
339 if (avail < wanted) {
340 struct rds_connection *conn = ic->i_cm_id->context;
341
342 /* Oops, there aren't that many credits left! */
343 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
344 got = avail;
345 } else {
346 /* Sometimes you get what you want, lalala. */
347 got = wanted;
348 }
349 newval -= IB_SET_SEND_CREDITS(got);
350
351 /*
352 * If need_posted is non-zero, then the caller wants
353 * the posted regardless of whether any send credits are
354 * available.
355 */
356 if (posted && (got || need_posted)) {
7b70d033 357 advertise = min_t(unsigned int, posted, max_posted);
6a0979df
AG
358 newval -= IB_SET_POST_CREDITS(advertise);
359 }
360
361 /* Finally bill everything */
362 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
363 goto try_again;
364
365 *adv_credits = advertise;
366 return got;
367}
368
369void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
370{
371 struct rds_ib_connection *ic = conn->c_transport_data;
372
373 if (credits == 0)
374 return;
375
376 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
377 credits,
378 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
379 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
380
381 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
382 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
383 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
384
385 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
386
387 rds_ib_stats_inc(s_ib_rx_credit_updates);
388}
389
390void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
391{
392 struct rds_ib_connection *ic = conn->c_transport_data;
393
394 if (posted == 0)
395 return;
396
397 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
398
399 /* Decide whether to send an update to the peer now.
400 * If we would send a credit update for every single buffer we
401 * post, we would end up with an ACK storm (ACK arrives,
402 * consumes buffer, we refill the ring, send ACK to remote
403 * advertising the newly posted buffer... ad inf)
404 *
405 * Performance pretty much depends on how often we send
406 * credit updates - too frequent updates mean lots of ACKs.
407 * Too infrequent updates, and the peer will run out of
408 * credits and has to throttle.
409 * For the time being, 16 seems to be a good compromise.
410 */
411 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
412 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
413}
414
415static inline void
416rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
417 struct rds_ib_send_work *send, unsigned int pos,
418 unsigned long buffer, unsigned int length,
419 int send_flags)
420{
421 struct ib_sge *sge;
422
423 WARN_ON(pos != send - ic->i_sends);
424
425 send->s_wr.send_flags = send_flags;
426 send->s_wr.opcode = IB_WR_SEND;
427 send->s_wr.num_sge = 2;
428 send->s_wr.next = NULL;
429 send->s_queued = jiffies;
430 send->s_op = NULL;
431
432 if (length != 0) {
433 sge = rds_ib_data_sge(ic, send->s_sge);
434 sge->addr = buffer;
435 sge->length = length;
436 sge->lkey = ic->i_mr->lkey;
437
438 sge = rds_ib_header_sge(ic, send->s_sge);
439 } else {
440 /* We're sending a packet with no payload. There is only
441 * one SGE */
442 send->s_wr.num_sge = 1;
443 sge = &send->s_sge[0];
444 }
445
446 sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
447 sge->length = sizeof(struct rds_header);
448 sge->lkey = ic->i_mr->lkey;
449}
450
451/*
452 * This can be called multiple times for a given message. The first time
453 * we see a message we map its scatterlist into the IB device so that
454 * we can provide that mapped address to the IB scatter gather entries
455 * in the IB work requests. We translate the scatterlist into a series
456 * of work requests that fragment the message. These work requests complete
457 * in order so we pass ownership of the message to the completion handler
458 * once we send the final fragment.
459 *
460 * The RDS core uses the c_send_lock to only enter this function once
461 * per connection. This makes sure that the tx ring alloc/unalloc pairs
462 * don't get out of sync and confuse the ring.
463 */
464int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
465 unsigned int hdr_off, unsigned int sg, unsigned int off)
466{
467 struct rds_ib_connection *ic = conn->c_transport_data;
468 struct ib_device *dev = ic->i_cm_id->device;
469 struct rds_ib_send_work *send = NULL;
470 struct rds_ib_send_work *first;
471 struct rds_ib_send_work *prev;
472 struct ib_send_wr *failed_wr;
473 struct scatterlist *scat;
474 u32 pos;
475 u32 i;
476 u32 work_alloc;
477 u32 credit_alloc;
478 u32 posted;
479 u32 adv_credits = 0;
480 int send_flags = 0;
481 int sent;
482 int ret;
483 int flow_controlled = 0;
484
485 BUG_ON(off % RDS_FRAG_SIZE);
486 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
487
2e7b3b99
AG
488 /* Do not send cong updates to IB loopback */
489 if (conn->c_loopback
490 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
491 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
492 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
493 }
494
6a0979df
AG
495 /* FIXME we may overallocate here */
496 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
497 i = 1;
498 else
499 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
500
501 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
502 if (work_alloc == 0) {
503 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
504 rds_ib_stats_inc(s_ib_tx_ring_full);
505 ret = -ENOMEM;
506 goto out;
507 }
508
509 credit_alloc = work_alloc;
510 if (ic->i_flowctl) {
7b70d033 511 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
6a0979df
AG
512 adv_credits += posted;
513 if (credit_alloc < work_alloc) {
514 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
515 work_alloc = credit_alloc;
516 flow_controlled++;
517 }
518 if (work_alloc == 0) {
d39e0602 519 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
6a0979df
AG
520 rds_ib_stats_inc(s_ib_tx_throttle);
521 ret = -ENOMEM;
522 goto out;
523 }
524 }
525
526 /* map the message the first time we see it */
8690bfa1 527 if (!ic->i_rm) {
6a0979df
AG
528 /*
529 printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n",
530 be16_to_cpu(rm->m_inc.i_hdr.h_dport),
531 rm->m_inc.i_hdr.h_flags,
532 be32_to_cpu(rm->m_inc.i_hdr.h_len));
533 */
e779137a
AG
534 if (rm->data.m_nents) {
535 rm->data.m_count = ib_dma_map_sg(dev,
536 rm->data.m_sg,
537 rm->data.m_nents,
538 DMA_TO_DEVICE);
539 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
540 if (rm->data.m_count == 0) {
6a0979df
AG
541 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
542 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
543 ret = -ENOMEM; /* XXX ? */
544 goto out;
545 }
546 } else {
e779137a 547 rm->data.m_count = 0;
6a0979df
AG
548 }
549
550 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
551 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
552 rds_message_addref(rm);
553 ic->i_rm = rm;
554
555 /* Finalize the header */
556 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
557 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
558 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
559 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
560
561 /* If it has a RDMA op, tell the peer we did it. This is
562 * used by the peer to release use-once RDMA MRs. */
ff87e97a 563 if (rm->rdma.m_rdma_op.r_active) {
6a0979df
AG
564 struct rds_ext_header_rdma ext_hdr;
565
ff87e97a 566 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key);
6a0979df
AG
567 rds_message_add_extension(&rm->m_inc.i_hdr,
568 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
569 }
570 if (rm->m_rdma_cookie) {
571 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
572 rds_rdma_cookie_key(rm->m_rdma_cookie),
573 rds_rdma_cookie_offset(rm->m_rdma_cookie));
574 }
575
576 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
577 * we should not do this unless we have a chance of at least
578 * sticking the header into the send ring. Which is why we
579 * should call rds_ib_ring_alloc first. */
580 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
581 rds_message_make_checksum(&rm->m_inc.i_hdr);
582
583 /*
584 * Update adv_credits since we reset the ACK_REQUIRED bit.
585 */
7b70d033 586 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
6a0979df
AG
587 adv_credits += posted;
588 BUG_ON(adv_credits > 255);
735f61e6 589 }
6a0979df
AG
590
591 send = &ic->i_sends[pos];
592 first = send;
593 prev = NULL;
e779137a 594 scat = &rm->data.m_sg[sg];
6a0979df
AG
595 sent = 0;
596 i = 0;
597
598 /* Sometimes you want to put a fence between an RDMA
599 * READ and the following SEND.
600 * We could either do this all the time
601 * or when requested by the user. Right now, we let
602 * the application choose.
603 */
ff87e97a 604 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence)
6a0979df
AG
605 send_flags = IB_SEND_FENCE;
606
607 /*
608 * We could be copying the header into the unused tail of the page.
609 * That would need to be changed in the future when those pages might
610 * be mapped userspace pages or page cache pages. So instead we always
611 * use a second sge and our long-lived ring of mapped headers. We send
612 * the header after the data so that the data payload can be aligned on
613 * the receiver.
614 */
615
616 /* handle a 0-len message */
617 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
618 rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
619 goto add_header;
620 }
621
622 /* if there's data reference it with a chain of work reqs */
e779137a 623 for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) {
6a0979df
AG
624 unsigned int len;
625
626 send = &ic->i_sends[pos];
627
628 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
629 rds_ib_xmit_populate_wr(ic, send, pos,
630 ib_sg_dma_address(dev, scat) + off, len,
631 send_flags);
632
633 /*
634 * We want to delay signaling completions just enough to get
635 * the batching benefits but not so much that we create dead time
636 * on the wire.
637 */
638 if (ic->i_unsignaled_wrs-- == 0) {
639 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
640 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
641 }
642
643 ic->i_unsignaled_bytes -= len;
644 if (ic->i_unsignaled_bytes <= 0) {
645 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
646 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
647 }
648
649 /*
650 * Always signal the last one if we're stopping due to flow control.
651 */
652 if (flow_controlled && i == (work_alloc-1))
653 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
654
655 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
656 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
657
658 sent += len;
659 off += len;
660 if (off == ib_sg_dma_len(dev, scat)) {
661 scat++;
662 off = 0;
663 }
664
665add_header:
666 /* Tack on the header after the data. The header SGE should already
667 * have been set up to point to the right header buffer. */
668 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
669
670 if (0) {
671 struct rds_header *hdr = &ic->i_send_hdrs[pos];
672
673 printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
674 be16_to_cpu(hdr->h_dport),
675 hdr->h_flags,
676 be32_to_cpu(hdr->h_len));
677 }
678 if (adv_credits) {
679 struct rds_header *hdr = &ic->i_send_hdrs[pos];
680
681 /* add credit and redo the header checksum */
682 hdr->h_credit = adv_credits;
683 rds_message_make_checksum(hdr);
684 adv_credits = 0;
685 rds_ib_stats_inc(s_ib_tx_credit_updates);
686 }
687
688 if (prev)
689 prev->s_wr.next = &send->s_wr;
690 prev = send;
691
692 pos = (pos + 1) % ic->i_send_ring.w_nr;
693 }
694
695 /* Account the RDS header in the number of bytes we sent, but just once.
696 * The caller has no concept of fragmentation. */
697 if (hdr_off == 0)
698 sent += sizeof(struct rds_header);
699
700 /* if we finished the message then send completion owns it */
e779137a 701 if (scat == &rm->data.m_sg[rm->data.m_count]) {
6a0979df
AG
702 prev->s_rm = ic->i_rm;
703 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
704 ic->i_rm = NULL;
705 }
706
707 if (i < work_alloc) {
708 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
709 work_alloc = i;
710 }
711 if (ic->i_flowctl && i < credit_alloc)
712 rds_ib_send_add_credits(conn, credit_alloc - i);
713
714 /* XXX need to worry about failed_wr and partial sends. */
715 failed_wr = &first->s_wr;
716 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
717 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
718 first, &first->s_wr, ret, failed_wr);
719 BUG_ON(failed_wr != &first->s_wr);
720 if (ret) {
721 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
722 "returned %d\n", &conn->c_faddr, ret);
723 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
724 if (prev->s_rm) {
725 ic->i_rm = prev->s_rm;
726 prev->s_rm = NULL;
727 }
735f61e6
AG
728
729 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
6a0979df
AG
730 goto out;
731 }
732
733 ret = sent;
734out:
735 BUG_ON(adv_credits);
736 return ret;
737}
738
739int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
740{
741 struct rds_ib_connection *ic = conn->c_transport_data;
742 struct rds_ib_send_work *send = NULL;
743 struct rds_ib_send_work *first;
744 struct rds_ib_send_work *prev;
745 struct ib_send_wr *failed_wr;
746 struct rds_ib_device *rds_ibdev;
747 struct scatterlist *scat;
748 unsigned long len;
749 u64 remote_addr = op->r_remote_addr;
750 u32 pos;
751 u32 work_alloc;
752 u32 i;
753 u32 j;
754 int sent;
755 int ret;
756 int num_sge;
757
758 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
759
760 /* map the message the first time we see it */
761 if (!op->r_mapped) {
762 op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
763 op->r_sg, op->r_nents, (op->r_write) ?
764 DMA_TO_DEVICE : DMA_FROM_DEVICE);
765 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
766 if (op->r_count == 0) {
767 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
768 ret = -ENOMEM; /* XXX ? */
769 goto out;
770 }
771
772 op->r_mapped = 1;
773 }
774
775 /*
776 * Instead of knowing how to return a partial rdma read/write we insist that there
777 * be enough work requests to send the entire message.
778 */
779 i = ceil(op->r_count, rds_ibdev->max_sge);
780
781 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
782 if (work_alloc != i) {
783 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
784 rds_ib_stats_inc(s_ib_tx_ring_full);
785 ret = -ENOMEM;
786 goto out;
787 }
788
789 send = &ic->i_sends[pos];
790 first = send;
791 prev = NULL;
792 scat = &op->r_sg[0];
793 sent = 0;
794 num_sge = op->r_count;
795
796 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
797 send->s_wr.send_flags = 0;
798 send->s_queued = jiffies;
799 /*
800 * We want to delay signaling completions just enough to get
801 * the batching benefits but not so much that we create dead time on the wire.
802 */
803 if (ic->i_unsignaled_wrs-- == 0) {
804 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
805 send->s_wr.send_flags = IB_SEND_SIGNALED;
806 }
807
808 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
809 send->s_wr.wr.rdma.remote_addr = remote_addr;
810 send->s_wr.wr.rdma.rkey = op->r_key;
811 send->s_op = op;
812
813 if (num_sge > rds_ibdev->max_sge) {
814 send->s_wr.num_sge = rds_ibdev->max_sge;
815 num_sge -= rds_ibdev->max_sge;
816 } else {
817 send->s_wr.num_sge = num_sge;
818 }
819
820 send->s_wr.next = NULL;
821
822 if (prev)
823 prev->s_wr.next = &send->s_wr;
824
825 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
826 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
827 send->s_sge[j].addr =
828 ib_sg_dma_address(ic->i_cm_id->device, scat);
829 send->s_sge[j].length = len;
830 send->s_sge[j].lkey = ic->i_mr->lkey;
831
832 sent += len;
833 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
834
835 remote_addr += len;
836 scat++;
837 }
838
839 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
840 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
841
842 prev = send;
843 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
844 send = ic->i_sends;
845 }
846
847 /* if we finished the message then send completion owns it */
848 if (scat == &op->r_sg[op->r_count])
849 prev->s_wr.send_flags = IB_SEND_SIGNALED;
850
851 if (i < work_alloc) {
852 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
853 work_alloc = i;
854 }
855
856 failed_wr = &first->s_wr;
857 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
858 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
859 first, &first->s_wr, ret, failed_wr);
860 BUG_ON(failed_wr != &first->s_wr);
861 if (ret) {
862 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
863 "returned %d\n", &conn->c_faddr, ret);
864 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
865 goto out;
866 }
867
868 if (unlikely(failed_wr != &first->s_wr)) {
869 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
870 BUG_ON(failed_wr != &first->s_wr);
871 }
872
873
874out:
875 return ret;
876}
877
878void rds_ib_xmit_complete(struct rds_connection *conn)
879{
880 struct rds_ib_connection *ic = conn->c_transport_data;
881
882 /* We may have a pending ACK or window update we were unable
883 * to send previously (due to flow control). Try again. */
884 rds_ib_attempt_ack(ic);
885}