]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/rds/ib_send.c
HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
[mirror_ubuntu-artful-kernel.git] / net / rds / ib_send.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
38
39 #include "rds.h"
40 #include "ib.h"
41
42 /*
43 * Convert IB-specific error message to RDS error message and call core
44 * completion handler.
45 */
46 static void rds_ib_send_complete(struct rds_message *rm,
47 int wc_status,
48 void (*complete)(struct rds_message *rm, int status))
49 {
50 int notify_status;
51
52 switch (wc_status) {
53 case IB_WC_WR_FLUSH_ERR:
54 return;
55
56 case IB_WC_SUCCESS:
57 notify_status = RDS_RDMA_SUCCESS;
58 break;
59
60 case IB_WC_REM_ACCESS_ERR:
61 notify_status = RDS_RDMA_REMOTE_ERROR;
62 break;
63
64 default:
65 notify_status = RDS_RDMA_OTHER_ERROR;
66 break;
67 }
68 complete(rm, notify_status);
69 }
70
71 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
72 struct rm_data_op *op,
73 int wc_status)
74 {
75 if (op->op_nents)
76 ib_dma_unmap_sg(ic->i_cm_id->device,
77 op->op_sg, op->op_nents,
78 DMA_TO_DEVICE);
79 }
80
81 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
82 struct rm_rdma_op *op,
83 int wc_status)
84 {
85 if (op->op_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->op_sg, op->op_nents,
88 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->op_mapped = 0;
90 }
91
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * we would need to take an event for the rdma WR. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
112 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
113 wc_status, rds_rdma_send_complete);
114
115 if (op->op_write)
116 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
117 else
118 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
119 }
120
121 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
122 struct rm_atomic_op *op,
123 int wc_status)
124 {
125 /* unmap atomic recvbuf */
126 if (op->op_mapped) {
127 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
128 DMA_FROM_DEVICE);
129 op->op_mapped = 0;
130 }
131
132 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
133 wc_status, rds_atomic_send_complete);
134
135 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
136 rds_ib_stats_inc(s_ib_atomic_cswp);
137 else
138 rds_ib_stats_inc(s_ib_atomic_fadd);
139 }
140
141 /*
142 * Unmap the resources associated with a struct send_work.
143 *
144 * Returns the rm for no good reason other than it is unobtainable
145 * other than by switching on wr.opcode, currently, and the caller,
146 * the event handler, needs it.
147 */
148 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
149 struct rds_ib_send_work *send,
150 int wc_status)
151 {
152 struct rds_message *rm = NULL;
153
154 /* In the error case, wc.opcode sometimes contains garbage */
155 switch (send->s_wr.opcode) {
156 case IB_WR_SEND:
157 if (send->s_op) {
158 rm = container_of(send->s_op, struct rds_message, data);
159 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
160 }
161 break;
162 case IB_WR_RDMA_WRITE:
163 case IB_WR_RDMA_READ:
164 if (send->s_op) {
165 rm = container_of(send->s_op, struct rds_message, rdma);
166 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
167 }
168 break;
169 case IB_WR_ATOMIC_FETCH_AND_ADD:
170 case IB_WR_ATOMIC_CMP_AND_SWP:
171 if (send->s_op) {
172 rm = container_of(send->s_op, struct rds_message, atomic);
173 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
174 }
175 break;
176 default:
177 printk_ratelimited(KERN_NOTICE
178 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
179 __func__, send->s_wr.opcode);
180 break;
181 }
182
183 send->s_wr.opcode = 0xdead;
184
185 return rm;
186 }
187
188 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
189 {
190 struct rds_ib_send_work *send;
191 u32 i;
192
193 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
194 struct ib_sge *sge;
195
196 send->s_op = NULL;
197
198 send->s_wr.wr_id = i | RDS_IB_SEND_OP;
199 send->s_wr.sg_list = send->s_sge;
200 send->s_wr.ex.imm_data = 0;
201
202 sge = &send->s_sge[0];
203 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
204 sge->length = sizeof(struct rds_header);
205 sge->lkey = ic->i_pd->local_dma_lkey;
206
207 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
208 }
209 }
210
211 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
212 {
213 struct rds_ib_send_work *send;
214 u32 i;
215
216 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
217 if (send->s_op && send->s_wr.opcode != 0xdead)
218 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
219 }
220 }
221
222 /*
223 * The only fast path caller always has a non-zero nr, so we don't
224 * bother testing nr before performing the atomic sub.
225 */
226 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
227 {
228 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
229 waitqueue_active(&rds_ib_ring_empty_wait))
230 wake_up(&rds_ib_ring_empty_wait);
231 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
232 }
233
234 /*
235 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
236 * operations performed in the send path. As the sender allocs and potentially
237 * unallocs the next free entry in the ring it doesn't alter which is
238 * the next to be freed, which is what this is concerned with.
239 */
240 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
241 {
242 struct rds_message *rm = NULL;
243 struct rds_connection *conn = ic->conn;
244 struct rds_ib_send_work *send;
245 u32 completed;
246 u32 oldest;
247 u32 i = 0;
248 int nr_sig = 0;
249
250
251 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
252 (unsigned long long)wc->wr_id, wc->status,
253 ib_wc_status_msg(wc->status), wc->byte_len,
254 be32_to_cpu(wc->ex.imm_data));
255 rds_ib_stats_inc(s_ib_tx_cq_event);
256
257 if (wc->wr_id == RDS_IB_ACK_WR_ID) {
258 if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
259 rds_ib_stats_inc(s_ib_tx_stalled);
260 rds_ib_ack_send_complete(ic);
261 return;
262 }
263
264 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
265
266 completed = rds_ib_ring_completed(&ic->i_send_ring,
267 (wc->wr_id & ~RDS_IB_SEND_OP),
268 oldest);
269
270 for (i = 0; i < completed; i++) {
271 send = &ic->i_sends[oldest];
272 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
273 nr_sig++;
274
275 rm = rds_ib_send_unmap_op(ic, send, wc->status);
276
277 if (time_after(jiffies, send->s_queued + HZ / 2))
278 rds_ib_stats_inc(s_ib_tx_stalled);
279
280 if (send->s_op) {
281 if (send->s_op == rm->m_final_op) {
282 /* If anyone waited for this message to get
283 * flushed out, wake them up now
284 */
285 rds_message_unmapped(rm);
286 }
287 rds_message_put(rm);
288 send->s_op = NULL;
289 }
290
291 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
292 }
293
294 rds_ib_ring_free(&ic->i_send_ring, completed);
295 rds_ib_sub_signaled(ic, nr_sig);
296 nr_sig = 0;
297
298 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
299 test_bit(0, &conn->c_map_queued))
300 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
301
302 /* We expect errors as the qp is drained during shutdown */
303 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
304 rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
305 &conn->c_faddr, wc->status,
306 ib_wc_status_msg(wc->status));
307 }
308 }
309
310 /*
311 * This is the main function for allocating credits when sending
312 * messages.
313 *
314 * Conceptually, we have two counters:
315 * - send credits: this tells us how many WRs we're allowed
316 * to submit without overruning the receiver's queue. For
317 * each SEND WR we post, we decrement this by one.
318 *
319 * - posted credits: this tells us how many WRs we recently
320 * posted to the receive queue. This value is transferred
321 * to the peer as a "credit update" in a RDS header field.
322 * Every time we transmit credits to the peer, we subtract
323 * the amount of transferred credits from this counter.
324 *
325 * It is essential that we avoid situations where both sides have
326 * exhausted their send credits, and are unable to send new credits
327 * to the peer. We achieve this by requiring that we send at least
328 * one credit update to the peer before exhausting our credits.
329 * When new credits arrive, we subtract one credit that is withheld
330 * until we've posted new buffers and are ready to transmit these
331 * credits (see rds_ib_send_add_credits below).
332 *
333 * The RDS send code is essentially single-threaded; rds_send_xmit
334 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
335 * However, the ACK sending code is independent and can race with
336 * message SENDs.
337 *
338 * In the send path, we need to update the counters for send credits
339 * and the counter of posted buffers atomically - when we use the
340 * last available credit, we cannot allow another thread to race us
341 * and grab the posted credits counter. Hence, we have to use a
342 * spinlock to protect the credit counter, or use atomics.
343 *
344 * Spinlocks shared between the send and the receive path are bad,
345 * because they create unnecessary delays. An early implementation
346 * using a spinlock showed a 5% degradation in throughput at some
347 * loads.
348 *
349 * This implementation avoids spinlocks completely, putting both
350 * counters into a single atomic, and updating that atomic using
351 * atomic_add (in the receive path, when receiving fresh credits),
352 * and using atomic_cmpxchg when updating the two counters.
353 */
354 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
355 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
356 {
357 unsigned int avail, posted, got = 0, advertise;
358 long oldval, newval;
359
360 *adv_credits = 0;
361 if (!ic->i_flowctl)
362 return wanted;
363
364 try_again:
365 advertise = 0;
366 oldval = newval = atomic_read(&ic->i_credits);
367 posted = IB_GET_POST_CREDITS(oldval);
368 avail = IB_GET_SEND_CREDITS(oldval);
369
370 rdsdebug("wanted=%u credits=%u posted=%u\n",
371 wanted, avail, posted);
372
373 /* The last credit must be used to send a credit update. */
374 if (avail && !posted)
375 avail--;
376
377 if (avail < wanted) {
378 struct rds_connection *conn = ic->i_cm_id->context;
379
380 /* Oops, there aren't that many credits left! */
381 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
382 got = avail;
383 } else {
384 /* Sometimes you get what you want, lalala. */
385 got = wanted;
386 }
387 newval -= IB_SET_SEND_CREDITS(got);
388
389 /*
390 * If need_posted is non-zero, then the caller wants
391 * the posted regardless of whether any send credits are
392 * available.
393 */
394 if (posted && (got || need_posted)) {
395 advertise = min_t(unsigned int, posted, max_posted);
396 newval -= IB_SET_POST_CREDITS(advertise);
397 }
398
399 /* Finally bill everything */
400 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
401 goto try_again;
402
403 *adv_credits = advertise;
404 return got;
405 }
406
407 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
408 {
409 struct rds_ib_connection *ic = conn->c_transport_data;
410
411 if (credits == 0)
412 return;
413
414 rdsdebug("credits=%u current=%u%s\n",
415 credits,
416 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
417 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
418
419 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
420 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
421 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
422
423 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
424
425 rds_ib_stats_inc(s_ib_rx_credit_updates);
426 }
427
428 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
429 {
430 struct rds_ib_connection *ic = conn->c_transport_data;
431
432 if (posted == 0)
433 return;
434
435 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
436
437 /* Decide whether to send an update to the peer now.
438 * If we would send a credit update for every single buffer we
439 * post, we would end up with an ACK storm (ACK arrives,
440 * consumes buffer, we refill the ring, send ACK to remote
441 * advertising the newly posted buffer... ad inf)
442 *
443 * Performance pretty much depends on how often we send
444 * credit updates - too frequent updates mean lots of ACKs.
445 * Too infrequent updates, and the peer will run out of
446 * credits and has to throttle.
447 * For the time being, 16 seems to be a good compromise.
448 */
449 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
450 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
451 }
452
453 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
454 struct rds_ib_send_work *send,
455 bool notify)
456 {
457 /*
458 * We want to delay signaling completions just enough to get
459 * the batching benefits but not so much that we create dead time
460 * on the wire.
461 */
462 if (ic->i_unsignaled_wrs-- == 0 || notify) {
463 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
464 send->s_wr.send_flags |= IB_SEND_SIGNALED;
465 return 1;
466 }
467 return 0;
468 }
469
470 /*
471 * This can be called multiple times for a given message. The first time
472 * we see a message we map its scatterlist into the IB device so that
473 * we can provide that mapped address to the IB scatter gather entries
474 * in the IB work requests. We translate the scatterlist into a series
475 * of work requests that fragment the message. These work requests complete
476 * in order so we pass ownership of the message to the completion handler
477 * once we send the final fragment.
478 *
479 * The RDS core uses the c_send_lock to only enter this function once
480 * per connection. This makes sure that the tx ring alloc/unalloc pairs
481 * don't get out of sync and confuse the ring.
482 */
483 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
484 unsigned int hdr_off, unsigned int sg, unsigned int off)
485 {
486 struct rds_ib_connection *ic = conn->c_transport_data;
487 struct ib_device *dev = ic->i_cm_id->device;
488 struct rds_ib_send_work *send = NULL;
489 struct rds_ib_send_work *first;
490 struct rds_ib_send_work *prev;
491 struct ib_send_wr *failed_wr;
492 struct scatterlist *scat;
493 u32 pos;
494 u32 i;
495 u32 work_alloc;
496 u32 credit_alloc = 0;
497 u32 posted;
498 u32 adv_credits = 0;
499 int send_flags = 0;
500 int bytes_sent = 0;
501 int ret;
502 int flow_controlled = 0;
503 int nr_sig = 0;
504
505 BUG_ON(off % RDS_FRAG_SIZE);
506 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
507
508 /* Do not send cong updates to IB loopback */
509 if (conn->c_loopback
510 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
511 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
512 scat = &rm->data.op_sg[sg];
513 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
514 return sizeof(struct rds_header) + ret;
515 }
516
517 /* FIXME we may overallocate here */
518 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
519 i = 1;
520 else
521 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
522
523 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
524 if (work_alloc == 0) {
525 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
526 rds_ib_stats_inc(s_ib_tx_ring_full);
527 ret = -ENOMEM;
528 goto out;
529 }
530
531 if (ic->i_flowctl) {
532 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
533 adv_credits += posted;
534 if (credit_alloc < work_alloc) {
535 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
536 work_alloc = credit_alloc;
537 flow_controlled = 1;
538 }
539 if (work_alloc == 0) {
540 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
541 rds_ib_stats_inc(s_ib_tx_throttle);
542 ret = -ENOMEM;
543 goto out;
544 }
545 }
546
547 /* map the message the first time we see it */
548 if (!ic->i_data_op) {
549 if (rm->data.op_nents) {
550 rm->data.op_count = ib_dma_map_sg(dev,
551 rm->data.op_sg,
552 rm->data.op_nents,
553 DMA_TO_DEVICE);
554 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
555 if (rm->data.op_count == 0) {
556 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
557 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
558 ret = -ENOMEM; /* XXX ? */
559 goto out;
560 }
561 } else {
562 rm->data.op_count = 0;
563 }
564
565 rds_message_addref(rm);
566 rm->data.op_dmasg = 0;
567 rm->data.op_dmaoff = 0;
568 ic->i_data_op = &rm->data;
569
570 /* Finalize the header */
571 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
572 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
573 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
574 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
575
576 /* If it has a RDMA op, tell the peer we did it. This is
577 * used by the peer to release use-once RDMA MRs. */
578 if (rm->rdma.op_active) {
579 struct rds_ext_header_rdma ext_hdr;
580
581 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
582 rds_message_add_extension(&rm->m_inc.i_hdr,
583 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
584 }
585 if (rm->m_rdma_cookie) {
586 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
587 rds_rdma_cookie_key(rm->m_rdma_cookie),
588 rds_rdma_cookie_offset(rm->m_rdma_cookie));
589 }
590
591 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
592 * we should not do this unless we have a chance of at least
593 * sticking the header into the send ring. Which is why we
594 * should call rds_ib_ring_alloc first. */
595 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
596 rds_message_make_checksum(&rm->m_inc.i_hdr);
597
598 /*
599 * Update adv_credits since we reset the ACK_REQUIRED bit.
600 */
601 if (ic->i_flowctl) {
602 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
603 adv_credits += posted;
604 BUG_ON(adv_credits > 255);
605 }
606 }
607
608 /* Sometimes you want to put a fence between an RDMA
609 * READ and the following SEND.
610 * We could either do this all the time
611 * or when requested by the user. Right now, we let
612 * the application choose.
613 */
614 if (rm->rdma.op_active && rm->rdma.op_fence)
615 send_flags = IB_SEND_FENCE;
616
617 /* Each frag gets a header. Msgs may be 0 bytes */
618 send = &ic->i_sends[pos];
619 first = send;
620 prev = NULL;
621 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
622 i = 0;
623 do {
624 unsigned int len = 0;
625
626 /* Set up the header */
627 send->s_wr.send_flags = send_flags;
628 send->s_wr.opcode = IB_WR_SEND;
629 send->s_wr.num_sge = 1;
630 send->s_wr.next = NULL;
631 send->s_queued = jiffies;
632 send->s_op = NULL;
633
634 send->s_sge[0].addr = ic->i_send_hdrs_dma
635 + (pos * sizeof(struct rds_header));
636 send->s_sge[0].length = sizeof(struct rds_header);
637
638 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
639
640 /* Set up the data, if present */
641 if (i < work_alloc
642 && scat != &rm->data.op_sg[rm->data.op_count]) {
643 len = min(RDS_FRAG_SIZE,
644 ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
645 send->s_wr.num_sge = 2;
646
647 send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
648 send->s_sge[1].addr += rm->data.op_dmaoff;
649 send->s_sge[1].length = len;
650
651 bytes_sent += len;
652 rm->data.op_dmaoff += len;
653 if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
654 scat++;
655 rm->data.op_dmasg++;
656 rm->data.op_dmaoff = 0;
657 }
658 }
659
660 rds_ib_set_wr_signal_state(ic, send, 0);
661
662 /*
663 * Always signal the last one if we're stopping due to flow control.
664 */
665 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
666 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
667
668 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
669 nr_sig++;
670
671 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
672 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
673
674 if (ic->i_flowctl && adv_credits) {
675 struct rds_header *hdr = &ic->i_send_hdrs[pos];
676
677 /* add credit and redo the header checksum */
678 hdr->h_credit = adv_credits;
679 rds_message_make_checksum(hdr);
680 adv_credits = 0;
681 rds_ib_stats_inc(s_ib_tx_credit_updates);
682 }
683
684 if (prev)
685 prev->s_wr.next = &send->s_wr;
686 prev = send;
687
688 pos = (pos + 1) % ic->i_send_ring.w_nr;
689 send = &ic->i_sends[pos];
690 i++;
691
692 } while (i < work_alloc
693 && scat != &rm->data.op_sg[rm->data.op_count]);
694
695 /* Account the RDS header in the number of bytes we sent, but just once.
696 * The caller has no concept of fragmentation. */
697 if (hdr_off == 0)
698 bytes_sent += sizeof(struct rds_header);
699
700 /* if we finished the message then send completion owns it */
701 if (scat == &rm->data.op_sg[rm->data.op_count]) {
702 prev->s_op = ic->i_data_op;
703 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
704 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
705 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
706 prev->s_wr.send_flags |= IB_SEND_SIGNALED;
707 nr_sig++;
708 }
709 ic->i_data_op = NULL;
710 }
711
712 /* Put back wrs & credits we didn't use */
713 if (i < work_alloc) {
714 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
715 work_alloc = i;
716 }
717 if (ic->i_flowctl && i < credit_alloc)
718 rds_ib_send_add_credits(conn, credit_alloc - i);
719
720 if (nr_sig)
721 atomic_add(nr_sig, &ic->i_signaled_sends);
722
723 /* XXX need to worry about failed_wr and partial sends. */
724 failed_wr = &first->s_wr;
725 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
726 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
727 first, &first->s_wr, ret, failed_wr);
728 BUG_ON(failed_wr != &first->s_wr);
729 if (ret) {
730 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
731 "returned %d\n", &conn->c_faddr, ret);
732 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
733 rds_ib_sub_signaled(ic, nr_sig);
734 if (prev->s_op) {
735 ic->i_data_op = prev->s_op;
736 prev->s_op = NULL;
737 }
738
739 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
740 goto out;
741 }
742
743 ret = bytes_sent;
744 out:
745 BUG_ON(adv_credits);
746 return ret;
747 }
748
749 /*
750 * Issue atomic operation.
751 * A simplified version of the rdma case, we always map 1 SG, and
752 * only 8 bytes, for the return value from the atomic operation.
753 */
754 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
755 {
756 struct rds_ib_connection *ic = conn->c_transport_data;
757 struct rds_ib_send_work *send = NULL;
758 struct ib_send_wr *failed_wr;
759 struct rds_ib_device *rds_ibdev;
760 u32 pos;
761 u32 work_alloc;
762 int ret;
763 int nr_sig = 0;
764
765 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
766
767 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
768 if (work_alloc != 1) {
769 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
770 rds_ib_stats_inc(s_ib_tx_ring_full);
771 ret = -ENOMEM;
772 goto out;
773 }
774
775 /* address of send request in ring */
776 send = &ic->i_sends[pos];
777 send->s_queued = jiffies;
778
779 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
780 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
781 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
782 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
783 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
784 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
785 } else { /* FADD */
786 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
787 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
788 send->s_wr.wr.atomic.swap = 0;
789 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
790 send->s_wr.wr.atomic.swap_mask = 0;
791 }
792 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
793 send->s_wr.num_sge = 1;
794 send->s_wr.next = NULL;
795 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
796 send->s_wr.wr.atomic.rkey = op->op_rkey;
797 send->s_op = op;
798 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
799
800 /* map 8 byte retval buffer to the device */
801 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
802 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
803 if (ret != 1) {
804 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
805 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
806 ret = -ENOMEM; /* XXX ? */
807 goto out;
808 }
809
810 /* Convert our struct scatterlist to struct ib_sge */
811 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
812 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
813 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
814
815 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
816 send->s_sge[0].addr, send->s_sge[0].length);
817
818 if (nr_sig)
819 atomic_add(nr_sig, &ic->i_signaled_sends);
820
821 failed_wr = &send->s_wr;
822 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
823 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
824 send, &send->s_wr, ret, failed_wr);
825 BUG_ON(failed_wr != &send->s_wr);
826 if (ret) {
827 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
828 "returned %d\n", &conn->c_faddr, ret);
829 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
830 rds_ib_sub_signaled(ic, nr_sig);
831 goto out;
832 }
833
834 if (unlikely(failed_wr != &send->s_wr)) {
835 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
836 BUG_ON(failed_wr != &send->s_wr);
837 }
838
839 out:
840 return ret;
841 }
842
843 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
844 {
845 struct rds_ib_connection *ic = conn->c_transport_data;
846 struct rds_ib_send_work *send = NULL;
847 struct rds_ib_send_work *first;
848 struct rds_ib_send_work *prev;
849 struct ib_send_wr *failed_wr;
850 struct scatterlist *scat;
851 unsigned long len;
852 u64 remote_addr = op->op_remote_addr;
853 u32 max_sge = ic->rds_ibdev->max_sge;
854 u32 pos;
855 u32 work_alloc;
856 u32 i;
857 u32 j;
858 int sent;
859 int ret;
860 int num_sge;
861 int nr_sig = 0;
862
863 /* map the op the first time we see it */
864 if (!op->op_mapped) {
865 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
866 op->op_sg, op->op_nents, (op->op_write) ?
867 DMA_TO_DEVICE : DMA_FROM_DEVICE);
868 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
869 if (op->op_count == 0) {
870 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
871 ret = -ENOMEM; /* XXX ? */
872 goto out;
873 }
874
875 op->op_mapped = 1;
876 }
877
878 /*
879 * Instead of knowing how to return a partial rdma read/write we insist that there
880 * be enough work requests to send the entire message.
881 */
882 i = ceil(op->op_count, max_sge);
883
884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
885 if (work_alloc != i) {
886 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
887 rds_ib_stats_inc(s_ib_tx_ring_full);
888 ret = -ENOMEM;
889 goto out;
890 }
891
892 send = &ic->i_sends[pos];
893 first = send;
894 prev = NULL;
895 scat = &op->op_sg[0];
896 sent = 0;
897 num_sge = op->op_count;
898
899 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
900 send->s_wr.send_flags = 0;
901 send->s_queued = jiffies;
902 send->s_op = NULL;
903
904 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
905
906 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
907 send->s_wr.wr.rdma.remote_addr = remote_addr;
908 send->s_wr.wr.rdma.rkey = op->op_rkey;
909
910 if (num_sge > max_sge) {
911 send->s_wr.num_sge = max_sge;
912 num_sge -= max_sge;
913 } else {
914 send->s_wr.num_sge = num_sge;
915 }
916
917 send->s_wr.next = NULL;
918
919 if (prev)
920 prev->s_wr.next = &send->s_wr;
921
922 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
923 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
924 send->s_sge[j].addr =
925 ib_sg_dma_address(ic->i_cm_id->device, scat);
926 send->s_sge[j].length = len;
927 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
928
929 sent += len;
930 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
931
932 remote_addr += len;
933 scat++;
934 }
935
936 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
937 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
938
939 prev = send;
940 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
941 send = ic->i_sends;
942 }
943
944 /* give a reference to the last op */
945 if (scat == &op->op_sg[op->op_count]) {
946 prev->s_op = op;
947 rds_message_addref(container_of(op, struct rds_message, rdma));
948 }
949
950 if (i < work_alloc) {
951 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
952 work_alloc = i;
953 }
954
955 if (nr_sig)
956 atomic_add(nr_sig, &ic->i_signaled_sends);
957
958 failed_wr = &first->s_wr;
959 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
960 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
961 first, &first->s_wr, ret, failed_wr);
962 BUG_ON(failed_wr != &first->s_wr);
963 if (ret) {
964 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
965 "returned %d\n", &conn->c_faddr, ret);
966 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
967 rds_ib_sub_signaled(ic, nr_sig);
968 goto out;
969 }
970
971 if (unlikely(failed_wr != &first->s_wr)) {
972 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
973 BUG_ON(failed_wr != &first->s_wr);
974 }
975
976
977 out:
978 return ret;
979 }
980
981 void rds_ib_xmit_complete(struct rds_connection *conn)
982 {
983 struct rds_ib_connection *ic = conn->c_transport_data;
984
985 /* We may have a pending ACK or window update we were unable
986 * to send previously (due to flow control). Try again. */
987 rds_ib_attempt_ack(ic);
988 }