]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/smc/smc_tx.c
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Copy user space data into send buffer, if send buffer space available.
8 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/net.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue.h>
18 #include <linux/sched/signal.h>
27 #define SMC_TX_WORK_DELAY HZ
29 /***************************** sndbuf producer *******************************/
31 /* callback implementation for sk.sk_write_space()
32 * to wakeup sndbuf producers that blocked with smc_tx_wait_memory().
33 * called under sk_socket lock.
35 static void smc_tx_write_space(struct sock
*sk
)
37 struct socket
*sock
= sk
->sk_socket
;
38 struct smc_sock
*smc
= smc_sk(sk
);
41 /* similar to sk_stream_write_space */
42 if (atomic_read(&smc
->conn
.sndbuf_space
) && sock
) {
43 clear_bit(SOCK_NOSPACE
, &sock
->flags
);
45 wq
= rcu_dereference(sk
->sk_wq
);
46 if (skwq_has_sleeper(wq
))
47 wake_up_interruptible_poll(&wq
->wait
,
48 POLLOUT
| POLLWRNORM
|
50 if (wq
&& wq
->fasync_list
&& !(sk
->sk_shutdown
& SEND_SHUTDOWN
))
51 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
56 /* Wakeup sndbuf producers that blocked with smc_tx_wait_memory().
57 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
59 void smc_tx_sndbuf_nonfull(struct smc_sock
*smc
)
61 if (smc
->sk
.sk_socket
&&
62 test_bit(SOCK_NOSPACE
, &smc
->sk
.sk_socket
->flags
))
63 smc
->sk
.sk_write_space(&smc
->sk
);
66 /* blocks sndbuf producer until at least one byte of free space available */
67 static int smc_tx_wait_memory(struct smc_sock
*smc
, int flags
)
69 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
70 struct smc_connection
*conn
= &smc
->conn
;
71 struct sock
*sk
= &smc
->sk
;
76 /* similar to sk_stream_wait_memory */
77 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
78 noblock
= timeo
? false : true;
79 add_wait_queue(sk_sleep(sk
), &wait
);
81 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
83 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
84 conn
->local_tx_ctrl
.conn_state_flags
.peer_done_writing
) {
88 if (conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
) {
94 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
98 if (signal_pending(current
)) {
99 rc
= sock_intr_errno(timeo
);
102 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
103 if (atomic_read(&conn
->sndbuf_space
))
104 break; /* at least 1 byte of free space available */
105 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
106 sk
->sk_write_pending
++;
107 sk_wait_event(sk
, &timeo
,
109 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
110 smc_cdc_rxed_any_close_or_senddone(conn
) ||
111 atomic_read(&conn
->sndbuf_space
),
113 sk
->sk_write_pending
--;
115 remove_wait_queue(sk_sleep(sk
), &wait
);
119 /* sndbuf producer: main API called by socket layer.
120 * called under sock lock.
122 int smc_tx_sendmsg(struct smc_sock
*smc
, struct msghdr
*msg
, size_t len
)
124 size_t copylen
, send_done
= 0, send_remaining
= len
;
125 size_t chunk_len
, chunk_off
, chunk_len_sum
;
126 struct smc_connection
*conn
= &smc
->conn
;
127 union smc_host_cursor prep
;
128 struct sock
*sk
= &smc
->sk
;
134 /* This should be in poll */
135 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
137 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
142 while (msg_data_left(msg
)) {
143 if (sk
->sk_state
== SMC_INIT
)
145 if (smc
->sk
.sk_shutdown
& SEND_SHUTDOWN
||
146 (smc
->sk
.sk_err
== ECONNABORTED
) ||
147 conn
->local_tx_ctrl
.conn_state_flags
.peer_conn_abort
)
149 if (smc_cdc_rxed_any_close(conn
))
150 return send_done
?: -ECONNRESET
;
152 if (!atomic_read(&conn
->sndbuf_space
)) {
153 rc
= smc_tx_wait_memory(smc
, msg
->msg_flags
);
162 /* initialize variables for 1st iteration of subsequent loop */
163 /* could be just 1 byte, even after smc_tx_wait_memory above */
164 writespace
= atomic_read(&conn
->sndbuf_space
);
165 /* not more than what user space asked for */
166 copylen
= min_t(size_t, send_remaining
, writespace
);
167 /* determine start of sndbuf */
168 sndbuf_base
= conn
->sndbuf_desc
->cpu_addr
;
169 smc_curs_write(&prep
,
170 smc_curs_read(&conn
->tx_curs_prep
, conn
),
172 tx_cnt_prep
= prep
.count
;
173 /* determine chunks where to write into sndbuf */
174 /* either unwrapped case, or 1st chunk of wrapped case */
175 chunk_len
= min_t(size_t,
176 copylen
, conn
->sndbuf_size
- tx_cnt_prep
);
177 chunk_len_sum
= chunk_len
;
178 chunk_off
= tx_cnt_prep
;
179 smc_sndbuf_sync_sg_for_cpu(conn
);
180 for (chunk
= 0; chunk
< 2; chunk
++) {
181 rc
= memcpy_from_msg(sndbuf_base
+ chunk_off
,
184 smc_sndbuf_sync_sg_for_device(conn
);
189 send_done
+= chunk_len
;
190 send_remaining
-= chunk_len
;
192 if (chunk_len_sum
== copylen
)
193 break; /* either on 1st or 2nd iteration */
194 /* prepare next (== 2nd) iteration */
195 chunk_len
= copylen
- chunk_len
; /* remainder */
196 chunk_len_sum
+= chunk_len
;
197 chunk_off
= 0; /* modulo offset in send ring buffer */
199 smc_sndbuf_sync_sg_for_device(conn
);
201 smc_curs_add(conn
->sndbuf_size
, &prep
, copylen
);
202 smc_curs_write(&conn
->tx_curs_prep
,
203 smc_curs_read(&prep
, conn
),
205 /* increased in send tasklet smc_cdc_tx_handler() */
206 smp_mb__before_atomic();
207 atomic_sub(copylen
, &conn
->sndbuf_space
);
208 /* guarantee 0 <= sndbuf_space <= sndbuf_size */
209 smp_mb__after_atomic();
210 /* since we just produced more new data into sndbuf,
211 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
213 smc_tx_sndbuf_nonempty(conn
);
214 } /* while (msg_data_left(msg)) */
219 rc
= sk_stream_error(sk
, msg
->msg_flags
, rc
);
220 /* make sure we wake any epoll edge trigger waiter */
221 if (unlikely(rc
== -EAGAIN
))
222 sk
->sk_write_space(sk
);
226 /***************************** sndbuf consumer *******************************/
228 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
229 static int smc_tx_rdma_write(struct smc_connection
*conn
, int peer_rmbe_offset
,
230 int num_sges
, struct ib_sge sges
[])
232 struct smc_link_group
*lgr
= conn
->lgr
;
233 struct ib_send_wr
*failed_wr
= NULL
;
234 struct ib_rdma_wr rdma_wr
;
235 struct smc_link
*link
;
238 memset(&rdma_wr
, 0, sizeof(rdma_wr
));
239 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
240 rdma_wr
.wr
.wr_id
= smc_wr_tx_get_next_wr_id(link
);
241 rdma_wr
.wr
.sg_list
= sges
;
242 rdma_wr
.wr
.num_sge
= num_sges
;
243 rdma_wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
244 rdma_wr
.remote_addr
=
245 lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].dma_addr
+
246 /* RMBE within RMB */
247 ((conn
->peer_conn_idx
- 1) * conn
->peer_rmbe_size
) +
248 /* offset within RMBE */
250 rdma_wr
.rkey
= lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].rkey
;
251 rc
= ib_post_send(link
->roce_qp
, &rdma_wr
.wr
, &failed_wr
);
253 conn
->local_tx_ctrl
.conn_state_flags
.peer_conn_abort
= 1;
257 /* sndbuf consumer */
258 static inline void smc_tx_advance_cursors(struct smc_connection
*conn
,
259 union smc_host_cursor
*prod
,
260 union smc_host_cursor
*sent
,
263 smc_curs_add(conn
->peer_rmbe_size
, prod
, len
);
264 /* increased in recv tasklet smc_cdc_msg_rcv() */
265 smp_mb__before_atomic();
266 /* data in flight reduces usable snd_wnd */
267 atomic_sub(len
, &conn
->peer_rmbe_space
);
268 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
269 smp_mb__after_atomic();
270 smc_curs_add(conn
->sndbuf_size
, sent
, len
);
273 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
274 * usable snd_wnd as max transmit
276 static int smc_tx_rdma_writes(struct smc_connection
*conn
)
278 size_t src_off
, src_len
, dst_off
, dst_len
; /* current chunk values */
279 size_t len
, dst_len_sum
, src_len_sum
, dstchunk
, srcchunk
;
280 union smc_host_cursor sent
, prep
, prod
, cons
;
281 struct ib_sge sges
[SMC_IB_MAX_SEND_SGE
];
282 struct smc_link_group
*lgr
= conn
->lgr
;
283 int to_send
, rmbespace
;
284 struct smc_link
*link
;
290 smc_curs_write(&sent
, smc_curs_read(&conn
->tx_curs_sent
, conn
), conn
);
291 smc_curs_write(&prep
, smc_curs_read(&conn
->tx_curs_prep
, conn
), conn
);
292 /* cf. wmem_alloc - (snd_max - snd_una) */
293 to_send
= smc_curs_diff(conn
->sndbuf_size
, &sent
, &prep
);
297 /* destination: RMBE */
299 rmbespace
= atomic_read(&conn
->peer_rmbe_space
);
302 smc_curs_write(&prod
,
303 smc_curs_read(&conn
->local_tx_ctrl
.prod
, conn
),
305 smc_curs_write(&cons
,
306 smc_curs_read(&conn
->local_rx_ctrl
.cons
, conn
),
309 /* if usable snd_wnd closes ask peer to advertise once it opens again */
310 conn
->local_tx_ctrl
.prod_flags
.write_blocked
= (to_send
>= rmbespace
);
311 /* cf. usable snd_wnd */
312 len
= min(to_send
, rmbespace
);
314 /* initialize variables for first iteration of subsequent nested loop */
315 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
316 dst_off
= prod
.count
;
317 if (prod
.wrap
== cons
.wrap
) {
318 /* the filled destination area is unwrapped,
319 * hence the available free destination space is wrapped
320 * and we need 2 destination chunks of sum len; start with 1st
321 * which is limited by what's available in sndbuf
323 dst_len
= min_t(size_t,
324 conn
->peer_rmbe_size
- prod
.count
, len
);
326 /* the filled destination area is wrapped,
327 * hence the available free destination space is unwrapped
328 * and we need a single destination chunk of entire len
332 dst_len_sum
= dst_len
;
333 src_off
= sent
.count
;
334 /* dst_len determines the maximum src_len */
335 if (sent
.count
+ dst_len
<= conn
->sndbuf_size
) {
336 /* unwrapped src case: single chunk of entire dst_len */
339 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
340 src_len
= conn
->sndbuf_size
- sent
.count
;
342 src_len_sum
= src_len
;
343 dma_addr
= sg_dma_address(conn
->sndbuf_desc
->sgt
[SMC_SINGLE_LINK
].sgl
);
344 for (dstchunk
= 0; dstchunk
< 2; dstchunk
++) {
346 for (srcchunk
= 0; srcchunk
< 2; srcchunk
++) {
347 sges
[srcchunk
].addr
= dma_addr
+ src_off
;
348 sges
[srcchunk
].length
= src_len
;
349 sges
[srcchunk
].lkey
= link
->roce_pd
->local_dma_lkey
;
352 if (src_off
>= conn
->sndbuf_size
)
353 src_off
-= conn
->sndbuf_size
;
354 /* modulo in send ring */
355 if (src_len_sum
== dst_len
)
356 break; /* either on 1st or 2nd iteration */
357 /* prepare next (== 2nd) iteration */
358 src_len
= dst_len
- src_len
; /* remainder */
359 src_len_sum
+= src_len
;
361 rc
= smc_tx_rdma_write(conn
, dst_off
, num_sges
, sges
);
364 if (dst_len_sum
== len
)
365 break; /* either on 1st or 2nd iteration */
366 /* prepare next (== 2nd) iteration */
367 dst_off
= 0; /* modulo offset in RMBE ring buffer */
368 dst_len
= len
- dst_len
; /* remainder */
369 dst_len_sum
+= dst_len
;
371 dst_len
, conn
->sndbuf_size
- sent
.count
);
372 src_len_sum
= src_len
;
375 smc_tx_advance_cursors(conn
, &prod
, &sent
, len
);
376 /* update connection's cursors with advanced local cursors */
377 smc_curs_write(&conn
->local_tx_ctrl
.prod
,
378 smc_curs_read(&prod
, conn
),
381 smc_curs_write(&conn
->tx_curs_sent
,
382 smc_curs_read(&sent
, conn
),
384 /* src: local sndbuf */
389 /* Wakeup sndbuf consumers from any context (IRQ or process)
390 * since there is more data to transmit; usable snd_wnd as max transmit
392 int smc_tx_sndbuf_nonempty(struct smc_connection
*conn
)
394 struct smc_cdc_tx_pend
*pend
;
395 struct smc_wr_buf
*wr_buf
;
398 spin_lock_bh(&conn
->send_lock
);
399 rc
= smc_cdc_get_free_slot(&conn
->lgr
->lnk
[SMC_SINGLE_LINK
], &wr_buf
,
403 struct smc_sock
*smc
=
404 container_of(conn
, struct smc_sock
, conn
);
406 if (smc
->sk
.sk_err
== ECONNABORTED
) {
407 rc
= sock_error(&smc
->sk
);
411 schedule_delayed_work(&conn
->tx_work
,
417 rc
= smc_tx_rdma_writes(conn
);
419 smc_wr_tx_put_slot(&conn
->lgr
->lnk
[SMC_SINGLE_LINK
],
420 (struct smc_wr_tx_pend_priv
*)pend
);
424 rc
= smc_cdc_msg_send(conn
, wr_buf
, pend
);
427 spin_unlock_bh(&conn
->send_lock
);
431 /* Wakeup sndbuf consumers from process context
432 * since there is more data to transmit
434 static void smc_tx_work(struct work_struct
*work
)
436 struct smc_connection
*conn
= container_of(to_delayed_work(work
),
437 struct smc_connection
,
439 struct smc_sock
*smc
= container_of(conn
, struct smc_sock
, conn
);
443 rc
= smc_tx_sndbuf_nonempty(conn
);
444 if (!rc
&& conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
445 !atomic_read(&conn
->bytes_to_rcv
))
446 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
447 release_sock(&smc
->sk
);
450 void smc_tx_consumer_update(struct smc_connection
*conn
)
452 union smc_host_cursor cfed
, cons
;
453 struct smc_cdc_tx_pend
*pend
;
454 struct smc_wr_buf
*wr_buf
;
457 smc_curs_write(&cons
,
458 smc_curs_read(&conn
->local_tx_ctrl
.cons
, conn
),
460 smc_curs_write(&cfed
,
461 smc_curs_read(&conn
->rx_curs_confirmed
, conn
),
463 to_confirm
= smc_curs_diff(conn
->rmbe_size
, &cfed
, &cons
);
465 if (conn
->local_rx_ctrl
.prod_flags
.cons_curs_upd_req
||
466 ((to_confirm
> conn
->rmbe_update_limit
) &&
467 ((to_confirm
> (conn
->rmbe_size
/ 2)) ||
468 conn
->local_rx_ctrl
.prod_flags
.write_blocked
))) {
469 rc
= smc_cdc_get_free_slot(&conn
->lgr
->lnk
[SMC_SINGLE_LINK
],
472 rc
= smc_cdc_msg_send(conn
, wr_buf
, pend
);
474 schedule_delayed_work(&conn
->tx_work
,
478 smc_curs_write(&conn
->rx_curs_confirmed
,
479 smc_curs_read(&conn
->local_tx_ctrl
.cons
, conn
),
481 conn
->local_rx_ctrl
.prod_flags
.cons_curs_upd_req
= 0;
483 if (conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
484 !atomic_read(&conn
->bytes_to_rcv
))
485 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
488 /***************************** send initialize *******************************/
490 /* Initialize send properties on connection establishment. NB: not __init! */
491 void smc_tx_init(struct smc_sock
*smc
)
493 smc
->sk
.sk_write_space
= smc_tx_write_space
;
494 INIT_DELAYED_WORK(&smc
->conn
.tx_work
, smc_tx_work
);
495 spin_lock_init(&smc
->conn
.send_lock
);