]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/smc/smc_tx.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
7 * Copy user space data into send buffer, if send buffer space available.
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
11 * Copyright IBM Corp. 2016
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
29 #define SMC_TX_WORK_DELAY HZ
30 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
32 /***************************** sndbuf producer *******************************/
34 /* callback implementation for sk.sk_write_space()
35 * to wakeup sndbuf producers that blocked with smc_tx_wait().
36 * called under sk_socket lock.
38 static void smc_tx_write_space(struct sock
*sk
)
40 struct socket
*sock
= sk
->sk_socket
;
41 struct smc_sock
*smc
= smc_sk(sk
);
44 /* similar to sk_stream_write_space */
45 if (atomic_read(&smc
->conn
.sndbuf_space
) && sock
) {
46 clear_bit(SOCK_NOSPACE
, &sock
->flags
);
48 wq
= rcu_dereference(sk
->sk_wq
);
49 if (skwq_has_sleeper(wq
))
50 wake_up_interruptible_poll(&wq
->wait
,
51 EPOLLOUT
| EPOLLWRNORM
|
53 if (wq
&& wq
->fasync_list
&& !(sk
->sk_shutdown
& SEND_SHUTDOWN
))
54 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
59 /* Wakeup sndbuf producers that blocked with smc_tx_wait().
60 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
62 void smc_tx_sndbuf_nonfull(struct smc_sock
*smc
)
64 if (smc
->sk
.sk_socket
&&
65 test_bit(SOCK_NOSPACE
, &smc
->sk
.sk_socket
->flags
))
66 smc
->sk
.sk_write_space(&smc
->sk
);
69 /* blocks sndbuf producer until at least one byte of free space available
70 * or urgent Byte was consumed
72 static int smc_tx_wait(struct smc_sock
*smc
, int flags
)
74 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
75 struct smc_connection
*conn
= &smc
->conn
;
76 struct sock
*sk
= &smc
->sk
;
81 /* similar to sk_stream_wait_memory */
82 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
83 noblock
= timeo
? false : true;
84 add_wait_queue(sk_sleep(sk
), &wait
);
86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
88 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
89 conn
->local_tx_ctrl
.conn_state_flags
.peer_done_writing
) {
93 if (smc_cdc_rxed_any_close(conn
)) {
99 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
103 if (signal_pending(current
)) {
104 rc
= sock_intr_errno(timeo
);
107 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
108 if (atomic_read(&conn
->sndbuf_space
) && !conn
->urg_tx_pend
)
109 break; /* at least 1 byte of free & no urgent data */
110 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
111 sk_wait_event(sk
, &timeo
,
113 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
114 smc_cdc_rxed_any_close(conn
) ||
115 (atomic_read(&conn
->sndbuf_space
) &&
119 remove_wait_queue(sk_sleep(sk
), &wait
);
123 static bool smc_tx_is_corked(struct smc_sock
*smc
)
125 struct tcp_sock
*tp
= tcp_sk(smc
->clcsock
->sk
);
127 return (tp
->nonagle
& TCP_NAGLE_CORK
) ? true : false;
130 /* sndbuf producer: main API called by socket layer.
131 * called under sock lock.
133 int smc_tx_sendmsg(struct smc_sock
*smc
, struct msghdr
*msg
, size_t len
)
135 size_t copylen
, send_done
= 0, send_remaining
= len
;
136 size_t chunk_len
, chunk_off
, chunk_len_sum
;
137 struct smc_connection
*conn
= &smc
->conn
;
138 union smc_host_cursor prep
;
139 struct sock
*sk
= &smc
->sk
;
145 /* This should be in poll */
146 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
148 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
153 while (msg_data_left(msg
)) {
154 if (sk
->sk_state
== SMC_INIT
)
156 if (smc
->sk
.sk_shutdown
& SEND_SHUTDOWN
||
157 (smc
->sk
.sk_err
== ECONNABORTED
) ||
158 conn
->local_tx_ctrl
.conn_state_flags
.peer_conn_abort
)
160 if (smc_cdc_rxed_any_close(conn
))
161 return send_done
?: -ECONNRESET
;
163 if (msg
->msg_flags
& MSG_OOB
)
164 conn
->local_tx_ctrl
.prod_flags
.urg_data_pending
= 1;
166 if (!atomic_read(&conn
->sndbuf_space
) || conn
->urg_tx_pend
) {
167 rc
= smc_tx_wait(smc
, msg
->msg_flags
);
176 /* initialize variables for 1st iteration of subsequent loop */
177 /* could be just 1 byte, even after smc_tx_wait above */
178 writespace
= atomic_read(&conn
->sndbuf_space
);
179 /* not more than what user space asked for */
180 copylen
= min_t(size_t, send_remaining
, writespace
);
181 /* determine start of sndbuf */
182 sndbuf_base
= conn
->sndbuf_desc
->cpu_addr
;
183 smc_curs_write(&prep
,
184 smc_curs_read(&conn
->tx_curs_prep
, conn
),
186 tx_cnt_prep
= prep
.count
;
187 /* determine chunks where to write into sndbuf */
188 /* either unwrapped case, or 1st chunk of wrapped case */
189 chunk_len
= min_t(size_t, copylen
, conn
->sndbuf_desc
->len
-
191 chunk_len_sum
= chunk_len
;
192 chunk_off
= tx_cnt_prep
;
193 smc_sndbuf_sync_sg_for_cpu(conn
);
194 for (chunk
= 0; chunk
< 2; chunk
++) {
195 rc
= memcpy_from_msg(sndbuf_base
+ chunk_off
,
198 smc_sndbuf_sync_sg_for_device(conn
);
203 send_done
+= chunk_len
;
204 send_remaining
-= chunk_len
;
206 if (chunk_len_sum
== copylen
)
207 break; /* either on 1st or 2nd iteration */
208 /* prepare next (== 2nd) iteration */
209 chunk_len
= copylen
- chunk_len
; /* remainder */
210 chunk_len_sum
+= chunk_len
;
211 chunk_off
= 0; /* modulo offset in send ring buffer */
213 smc_sndbuf_sync_sg_for_device(conn
);
215 smc_curs_add(conn
->sndbuf_desc
->len
, &prep
, copylen
);
216 smc_curs_write(&conn
->tx_curs_prep
,
217 smc_curs_read(&prep
, conn
),
219 /* increased in send tasklet smc_cdc_tx_handler() */
220 smp_mb__before_atomic();
221 atomic_sub(copylen
, &conn
->sndbuf_space
);
222 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
223 smp_mb__after_atomic();
224 /* since we just produced more new data into sndbuf,
225 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
227 if ((msg
->msg_flags
& MSG_OOB
) && !send_remaining
)
228 conn
->urg_tx_pend
= true;
229 if ((msg
->msg_flags
& MSG_MORE
|| smc_tx_is_corked(smc
)) &&
230 (atomic_read(&conn
->sndbuf_space
) >
231 (conn
->sndbuf_desc
->len
>> 1)))
232 /* for a corked socket defer the RDMA writes if there
233 * is still sufficient sndbuf_space available
235 schedule_delayed_work(&conn
->tx_work
,
238 smc_tx_sndbuf_nonempty(conn
);
239 } /* while (msg_data_left(msg)) */
244 rc
= sk_stream_error(sk
, msg
->msg_flags
, rc
);
245 /* make sure we wake any epoll edge trigger waiter */
246 if (unlikely(rc
== -EAGAIN
))
247 sk
->sk_write_space(sk
);
251 /***************************** sndbuf consumer *******************************/
253 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
254 static int smc_tx_rdma_write(struct smc_connection
*conn
, int peer_rmbe_offset
,
255 int num_sges
, struct ib_sge sges
[])
257 struct smc_link_group
*lgr
= conn
->lgr
;
258 struct ib_send_wr
*failed_wr
= NULL
;
259 struct ib_rdma_wr rdma_wr
;
260 struct smc_link
*link
;
263 memset(&rdma_wr
, 0, sizeof(rdma_wr
));
264 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
265 rdma_wr
.wr
.wr_id
= smc_wr_tx_get_next_wr_id(link
);
266 rdma_wr
.wr
.sg_list
= sges
;
267 rdma_wr
.wr
.num_sge
= num_sges
;
268 rdma_wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
269 rdma_wr
.remote_addr
=
270 lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].dma_addr
+
271 /* RMBE within RMB */
273 /* offset within RMBE */
275 rdma_wr
.rkey
= lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].rkey
;
276 rc
= ib_post_send(link
->roce_qp
, &rdma_wr
.wr
, &failed_wr
);
278 conn
->local_tx_ctrl
.conn_state_flags
.peer_conn_abort
= 1;
279 smc_lgr_terminate(lgr
);
284 /* sndbuf consumer */
285 static inline void smc_tx_advance_cursors(struct smc_connection
*conn
,
286 union smc_host_cursor
*prod
,
287 union smc_host_cursor
*sent
,
290 smc_curs_add(conn
->peer_rmbe_size
, prod
, len
);
291 /* increased in recv tasklet smc_cdc_msg_rcv() */
292 smp_mb__before_atomic();
293 /* data in flight reduces usable snd_wnd */
294 atomic_sub(len
, &conn
->peer_rmbe_space
);
295 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
296 smp_mb__after_atomic();
297 smc_curs_add(conn
->sndbuf_desc
->len
, sent
, len
);
300 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
301 * usable snd_wnd as max transmit
303 static int smc_tx_rdma_writes(struct smc_connection
*conn
)
305 size_t src_off
, src_len
, dst_off
, dst_len
; /* current chunk values */
306 size_t len
, dst_len_sum
, src_len_sum
, dstchunk
, srcchunk
;
307 union smc_host_cursor sent
, prep
, prod
, cons
;
308 struct ib_sge sges
[SMC_IB_MAX_SEND_SGE
];
309 struct smc_link_group
*lgr
= conn
->lgr
;
310 struct smc_cdc_producer_flags
*pflags
;
311 int to_send
, rmbespace
;
312 struct smc_link
*link
;
318 smc_curs_write(&sent
, smc_curs_read(&conn
->tx_curs_sent
, conn
), conn
);
319 smc_curs_write(&prep
, smc_curs_read(&conn
->tx_curs_prep
, conn
), conn
);
320 /* cf. wmem_alloc - (snd_max - snd_una) */
321 to_send
= smc_curs_diff(conn
->sndbuf_desc
->len
, &sent
, &prep
);
325 /* destination: RMBE */
327 rmbespace
= atomic_read(&conn
->peer_rmbe_space
);
330 smc_curs_write(&prod
,
331 smc_curs_read(&conn
->local_tx_ctrl
.prod
, conn
),
333 smc_curs_write(&cons
,
334 smc_curs_read(&conn
->local_rx_ctrl
.cons
, conn
),
337 /* if usable snd_wnd closes ask peer to advertise once it opens again */
338 pflags
= &conn
->local_tx_ctrl
.prod_flags
;
339 pflags
->write_blocked
= (to_send
>= rmbespace
);
340 /* cf. usable snd_wnd */
341 len
= min(to_send
, rmbespace
);
343 /* initialize variables for first iteration of subsequent nested loop */
344 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
345 dst_off
= prod
.count
;
346 if (prod
.wrap
== cons
.wrap
) {
347 /* the filled destination area is unwrapped,
348 * hence the available free destination space is wrapped
349 * and we need 2 destination chunks of sum len; start with 1st
350 * which is limited by what's available in sndbuf
352 dst_len
= min_t(size_t,
353 conn
->peer_rmbe_size
- prod
.count
, len
);
355 /* the filled destination area is wrapped,
356 * hence the available free destination space is unwrapped
357 * and we need a single destination chunk of entire len
361 dst_len_sum
= dst_len
;
362 src_off
= sent
.count
;
363 /* dst_len determines the maximum src_len */
364 if (sent
.count
+ dst_len
<= conn
->sndbuf_desc
->len
) {
365 /* unwrapped src case: single chunk of entire dst_len */
368 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
369 src_len
= conn
->sndbuf_desc
->len
- sent
.count
;
371 src_len_sum
= src_len
;
372 dma_addr
= sg_dma_address(conn
->sndbuf_desc
->sgt
[SMC_SINGLE_LINK
].sgl
);
373 for (dstchunk
= 0; dstchunk
< 2; dstchunk
++) {
375 for (srcchunk
= 0; srcchunk
< 2; srcchunk
++) {
376 sges
[srcchunk
].addr
= dma_addr
+ src_off
;
377 sges
[srcchunk
].length
= src_len
;
378 sges
[srcchunk
].lkey
= link
->roce_pd
->local_dma_lkey
;
381 if (src_off
>= conn
->sndbuf_desc
->len
)
382 src_off
-= conn
->sndbuf_desc
->len
;
383 /* modulo in send ring */
384 if (src_len_sum
== dst_len
)
385 break; /* either on 1st or 2nd iteration */
386 /* prepare next (== 2nd) iteration */
387 src_len
= dst_len
- src_len
; /* remainder */
388 src_len_sum
+= src_len
;
390 rc
= smc_tx_rdma_write(conn
, dst_off
, num_sges
, sges
);
393 if (dst_len_sum
== len
)
394 break; /* either on 1st or 2nd iteration */
395 /* prepare next (== 2nd) iteration */
396 dst_off
= 0; /* modulo offset in RMBE ring buffer */
397 dst_len
= len
- dst_len
; /* remainder */
398 dst_len_sum
+= dst_len
;
400 dst_len
, conn
->sndbuf_desc
->len
- sent
.count
);
401 src_len_sum
= src_len
;
404 if (conn
->urg_tx_pend
&& len
== to_send
)
405 pflags
->urg_data_present
= 1;
406 smc_tx_advance_cursors(conn
, &prod
, &sent
, len
);
407 /* update connection's cursors with advanced local cursors */
408 smc_curs_write(&conn
->local_tx_ctrl
.prod
,
409 smc_curs_read(&prod
, conn
),
412 smc_curs_write(&conn
->tx_curs_sent
,
413 smc_curs_read(&sent
, conn
),
415 /* src: local sndbuf */
420 /* Wakeup sndbuf consumers from any context (IRQ or process)
421 * since there is more data to transmit; usable snd_wnd as max transmit
423 int smc_tx_sndbuf_nonempty(struct smc_connection
*conn
)
425 struct smc_cdc_producer_flags
*pflags
;
426 struct smc_cdc_tx_pend
*pend
;
427 struct smc_wr_buf
*wr_buf
;
430 spin_lock_bh(&conn
->send_lock
);
431 rc
= smc_cdc_get_free_slot(conn
, &wr_buf
, &pend
);
434 struct smc_sock
*smc
=
435 container_of(conn
, struct smc_sock
, conn
);
437 if (smc
->sk
.sk_err
== ECONNABORTED
) {
438 rc
= sock_error(&smc
->sk
);
442 if (conn
->alert_token_local
) /* connection healthy */
443 mod_delayed_work(system_wq
, &conn
->tx_work
,
449 if (!conn
->local_tx_ctrl
.prod_flags
.urg_data_present
) {
450 rc
= smc_tx_rdma_writes(conn
);
452 smc_wr_tx_put_slot(&conn
->lgr
->lnk
[SMC_SINGLE_LINK
],
453 (struct smc_wr_tx_pend_priv
*)pend
);
458 rc
= smc_cdc_msg_send(conn
, wr_buf
, pend
);
459 pflags
= &conn
->local_tx_ctrl
.prod_flags
;
460 if (!rc
&& pflags
->urg_data_present
) {
461 pflags
->urg_data_pending
= 0;
462 pflags
->urg_data_present
= 0;
466 spin_unlock_bh(&conn
->send_lock
);
470 /* Wakeup sndbuf consumers from process context
471 * since there is more data to transmit
473 void smc_tx_work(struct work_struct
*work
)
475 struct smc_connection
*conn
= container_of(to_delayed_work(work
),
476 struct smc_connection
,
478 struct smc_sock
*smc
= container_of(conn
, struct smc_sock
, conn
);
482 if (smc
->sk
.sk_err
||
483 !conn
->alert_token_local
||
484 conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
)
487 rc
= smc_tx_sndbuf_nonempty(conn
);
488 if (!rc
&& conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
489 !atomic_read(&conn
->bytes_to_rcv
))
490 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
493 release_sock(&smc
->sk
);
496 void smc_tx_consumer_update(struct smc_connection
*conn
, bool force
)
498 union smc_host_cursor cfed
, cons
, prod
;
499 int sender_free
= conn
->rmb_desc
->len
;
502 smc_curs_write(&cons
,
503 smc_curs_read(&conn
->local_tx_ctrl
.cons
, conn
),
505 smc_curs_write(&cfed
,
506 smc_curs_read(&conn
->rx_curs_confirmed
, conn
),
508 to_confirm
= smc_curs_diff(conn
->rmb_desc
->len
, &cfed
, &cons
);
509 if (to_confirm
> conn
->rmbe_update_limit
) {
510 smc_curs_write(&prod
,
511 smc_curs_read(&conn
->local_rx_ctrl
.prod
, conn
),
513 sender_free
= conn
->rmb_desc
->len
-
514 smc_curs_diff(conn
->rmb_desc
->len
, &prod
, &cfed
);
517 if (conn
->local_rx_ctrl
.prod_flags
.cons_curs_upd_req
||
519 ((to_confirm
> conn
->rmbe_update_limit
) &&
520 ((sender_free
<= (conn
->rmb_desc
->len
/ 2)) ||
521 conn
->local_rx_ctrl
.prod_flags
.write_blocked
))) {
522 if ((smc_cdc_get_slot_and_msg_send(conn
) < 0) &&
523 conn
->alert_token_local
) { /* connection healthy */
524 schedule_delayed_work(&conn
->tx_work
,
528 smc_curs_write(&conn
->rx_curs_confirmed
,
529 smc_curs_read(&conn
->local_tx_ctrl
.cons
, conn
),
531 conn
->local_rx_ctrl
.prod_flags
.cons_curs_upd_req
= 0;
533 if (conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
534 !atomic_read(&conn
->bytes_to_rcv
))
535 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
538 /***************************** send initialize *******************************/
540 /* Initialize send properties on connection establishment. NB: not __init! */
541 void smc_tx_init(struct smc_sock
*smc
)
543 smc
->sk
.sk_write_space
= smc_tx_write_space
;