]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/smc/smc_tx.c
slip: Fix use-after-free Read in slip_open
[mirror_ubuntu-jammy-kernel.git] / net / smc / smc_tx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Manage send buffer.
6 * Producer:
7 * Copy user space data into send buffer, if send buffer space available.
8 * Consumer:
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
10 *
11 * Copyright IBM Corp. 2016
12 *
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 */
15
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
20
21 #include <net/sock.h>
22 #include <net/tcp.h>
23
24 #include "smc.h"
25 #include "smc_wr.h"
26 #include "smc_cdc.h"
27 #include "smc_close.h"
28 #include "smc_ism.h"
29 #include "smc_tx.h"
30
31 #define SMC_TX_WORK_DELAY 0
32 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
33
34 /***************************** sndbuf producer *******************************/
35
36 /* callback implementation for sk.sk_write_space()
37 * to wakeup sndbuf producers that blocked with smc_tx_wait().
38 * called under sk_socket lock.
39 */
40 static void smc_tx_write_space(struct sock *sk)
41 {
42 struct socket *sock = sk->sk_socket;
43 struct smc_sock *smc = smc_sk(sk);
44 struct socket_wq *wq;
45
46 /* similar to sk_stream_write_space */
47 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
48 clear_bit(SOCK_NOSPACE, &sock->flags);
49 rcu_read_lock();
50 wq = rcu_dereference(sk->sk_wq);
51 if (skwq_has_sleeper(wq))
52 wake_up_interruptible_poll(&wq->wait,
53 EPOLLOUT | EPOLLWRNORM |
54 EPOLLWRBAND);
55 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
56 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
57 rcu_read_unlock();
58 }
59 }
60
61 /* Wakeup sndbuf producers that blocked with smc_tx_wait().
62 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
63 */
64 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
65 {
66 if (smc->sk.sk_socket &&
67 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
68 smc->sk.sk_write_space(&smc->sk);
69 }
70
71 /* blocks sndbuf producer until at least one byte of free space available
72 * or urgent Byte was consumed
73 */
74 static int smc_tx_wait(struct smc_sock *smc, int flags)
75 {
76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk;
79 long timeo;
80 int rc = 0;
81
82 /* similar to sk_stream_wait_memory */
83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
84 add_wait_queue(sk_sleep(sk), &wait);
85 while (1) {
86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
87 if (sk->sk_err ||
88 (sk->sk_shutdown & SEND_SHUTDOWN) ||
89 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
90 rc = -EPIPE;
91 break;
92 }
93 if (smc_cdc_rxed_any_close(conn)) {
94 rc = -ECONNRESET;
95 break;
96 }
97 if (!timeo) {
98 /* ensure EPOLLOUT is subsequently generated */
99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
100 rc = -EAGAIN;
101 break;
102 }
103 if (signal_pending(current)) {
104 rc = sock_intr_errno(timeo);
105 break;
106 }
107 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
108 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
109 break; /* at least 1 byte of free & no urgent data */
110 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
111 sk_wait_event(sk, &timeo,
112 sk->sk_err ||
113 (sk->sk_shutdown & SEND_SHUTDOWN) ||
114 smc_cdc_rxed_any_close(conn) ||
115 (atomic_read(&conn->sndbuf_space) &&
116 !conn->urg_tx_pend),
117 &wait);
118 }
119 remove_wait_queue(sk_sleep(sk), &wait);
120 return rc;
121 }
122
123 static bool smc_tx_is_corked(struct smc_sock *smc)
124 {
125 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
126
127 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
128 }
129
130 /* sndbuf producer: main API called by socket layer.
131 * called under sock lock.
132 */
133 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
134 {
135 size_t copylen, send_done = 0, send_remaining = len;
136 size_t chunk_len, chunk_off, chunk_len_sum;
137 struct smc_connection *conn = &smc->conn;
138 union smc_host_cursor prep;
139 struct sock *sk = &smc->sk;
140 char *sndbuf_base;
141 int tx_cnt_prep;
142 int writespace;
143 int rc, chunk;
144
145 /* This should be in poll */
146 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
147
148 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
149 rc = -EPIPE;
150 goto out_err;
151 }
152
153 while (msg_data_left(msg)) {
154 if (sk->sk_state == SMC_INIT)
155 return -ENOTCONN;
156 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
157 (smc->sk.sk_err == ECONNABORTED) ||
158 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
159 return -EPIPE;
160 if (smc_cdc_rxed_any_close(conn))
161 return send_done ?: -ECONNRESET;
162
163 if (msg->msg_flags & MSG_OOB)
164 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
165
166 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
167 if (send_done)
168 return send_done;
169 rc = smc_tx_wait(smc, msg->msg_flags);
170 if (rc)
171 goto out_err;
172 continue;
173 }
174
175 /* initialize variables for 1st iteration of subsequent loop */
176 /* could be just 1 byte, even after smc_tx_wait above */
177 writespace = atomic_read(&conn->sndbuf_space);
178 /* not more than what user space asked for */
179 copylen = min_t(size_t, send_remaining, writespace);
180 /* determine start of sndbuf */
181 sndbuf_base = conn->sndbuf_desc->cpu_addr;
182 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
183 tx_cnt_prep = prep.count;
184 /* determine chunks where to write into sndbuf */
185 /* either unwrapped case, or 1st chunk of wrapped case */
186 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
187 tx_cnt_prep);
188 chunk_len_sum = chunk_len;
189 chunk_off = tx_cnt_prep;
190 smc_sndbuf_sync_sg_for_cpu(conn);
191 for (chunk = 0; chunk < 2; chunk++) {
192 rc = memcpy_from_msg(sndbuf_base + chunk_off,
193 msg, chunk_len);
194 if (rc) {
195 smc_sndbuf_sync_sg_for_device(conn);
196 if (send_done)
197 return send_done;
198 goto out_err;
199 }
200 send_done += chunk_len;
201 send_remaining -= chunk_len;
202
203 if (chunk_len_sum == copylen)
204 break; /* either on 1st or 2nd iteration */
205 /* prepare next (== 2nd) iteration */
206 chunk_len = copylen - chunk_len; /* remainder */
207 chunk_len_sum += chunk_len;
208 chunk_off = 0; /* modulo offset in send ring buffer */
209 }
210 smc_sndbuf_sync_sg_for_device(conn);
211 /* update cursors */
212 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
213 smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
214 /* increased in send tasklet smc_cdc_tx_handler() */
215 smp_mb__before_atomic();
216 atomic_sub(copylen, &conn->sndbuf_space);
217 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
218 smp_mb__after_atomic();
219 /* since we just produced more new data into sndbuf,
220 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
221 */
222 if ((msg->msg_flags & MSG_OOB) && !send_remaining)
223 conn->urg_tx_pend = true;
224 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
225 (atomic_read(&conn->sndbuf_space) >
226 (conn->sndbuf_desc->len >> 1)))
227 /* for a corked socket defer the RDMA writes if there
228 * is still sufficient sndbuf_space available
229 */
230 schedule_delayed_work(&conn->tx_work,
231 SMC_TX_CORK_DELAY);
232 else
233 smc_tx_sndbuf_nonempty(conn);
234 } /* while (msg_data_left(msg)) */
235
236 return send_done;
237
238 out_err:
239 rc = sk_stream_error(sk, msg->msg_flags, rc);
240 /* make sure we wake any epoll edge trigger waiter */
241 if (unlikely(rc == -EAGAIN))
242 sk->sk_write_space(sk);
243 return rc;
244 }
245
246 /***************************** sndbuf consumer *******************************/
247
248 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
249 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
250 u32 offset, int signal)
251 {
252 struct smc_ism_position pos;
253 int rc;
254
255 memset(&pos, 0, sizeof(pos));
256 pos.token = conn->peer_token;
257 pos.index = conn->peer_rmbe_idx;
258 pos.offset = conn->tx_off + offset;
259 pos.signal = signal;
260 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
261 if (rc)
262 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
263 return rc;
264 }
265
266 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
267 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
268 int num_sges, struct ib_rdma_wr *rdma_wr)
269 {
270 struct smc_link_group *lgr = conn->lgr;
271 struct smc_link *link;
272 int rc;
273
274 link = &lgr->lnk[SMC_SINGLE_LINK];
275 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
276 rdma_wr->wr.num_sge = num_sges;
277 rdma_wr->remote_addr =
278 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
279 /* RMBE within RMB */
280 conn->tx_off +
281 /* offset within RMBE */
282 peer_rmbe_offset;
283 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
284 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
285 if (rc) {
286 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
287 smc_lgr_terminate(lgr);
288 }
289 return rc;
290 }
291
292 /* sndbuf consumer */
293 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
294 union smc_host_cursor *prod,
295 union smc_host_cursor *sent,
296 size_t len)
297 {
298 smc_curs_add(conn->peer_rmbe_size, prod, len);
299 /* increased in recv tasklet smc_cdc_msg_rcv() */
300 smp_mb__before_atomic();
301 /* data in flight reduces usable snd_wnd */
302 atomic_sub(len, &conn->peer_rmbe_space);
303 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
304 smp_mb__after_atomic();
305 smc_curs_add(conn->sndbuf_desc->len, sent, len);
306 }
307
308 /* SMC-R helper for smc_tx_rdma_writes() */
309 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
310 size_t src_off, size_t src_len,
311 size_t dst_off, size_t dst_len,
312 struct smc_rdma_wr *wr_rdma_buf)
313 {
314 dma_addr_t dma_addr =
315 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
316 int src_len_sum = src_len, dst_len_sum = dst_len;
317 int sent_count = src_off;
318 int srcchunk, dstchunk;
319 int num_sges;
320 int rc;
321
322 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
323 struct ib_sge *sge =
324 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
325
326 num_sges = 0;
327 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
328 sge[srcchunk].addr = dma_addr + src_off;
329 sge[srcchunk].length = src_len;
330 num_sges++;
331
332 src_off += src_len;
333 if (src_off >= conn->sndbuf_desc->len)
334 src_off -= conn->sndbuf_desc->len;
335 /* modulo in send ring */
336 if (src_len_sum == dst_len)
337 break; /* either on 1st or 2nd iteration */
338 /* prepare next (== 2nd) iteration */
339 src_len = dst_len - src_len; /* remainder */
340 src_len_sum += src_len;
341 }
342 rc = smc_tx_rdma_write(conn, dst_off, num_sges,
343 &wr_rdma_buf->wr_tx_rdma[dstchunk]);
344 if (rc)
345 return rc;
346 if (dst_len_sum == len)
347 break; /* either on 1st or 2nd iteration */
348 /* prepare next (== 2nd) iteration */
349 dst_off = 0; /* modulo offset in RMBE ring buffer */
350 dst_len = len - dst_len; /* remainder */
351 dst_len_sum += dst_len;
352 src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
353 sent_count);
354 src_len_sum = src_len;
355 }
356 return 0;
357 }
358
359 /* SMC-D helper for smc_tx_rdma_writes() */
360 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
361 size_t src_off, size_t src_len,
362 size_t dst_off, size_t dst_len)
363 {
364 int src_len_sum = src_len, dst_len_sum = dst_len;
365 int srcchunk, dstchunk;
366 int rc;
367
368 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
369 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
370 void *data = conn->sndbuf_desc->cpu_addr + src_off;
371
372 rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
373 sizeof(struct smcd_cdc_msg), 0);
374 if (rc)
375 return rc;
376 dst_off += src_len;
377 src_off += src_len;
378 if (src_off >= conn->sndbuf_desc->len)
379 src_off -= conn->sndbuf_desc->len;
380 /* modulo in send ring */
381 if (src_len_sum == dst_len)
382 break; /* either on 1st or 2nd iteration */
383 /* prepare next (== 2nd) iteration */
384 src_len = dst_len - src_len; /* remainder */
385 src_len_sum += src_len;
386 }
387 if (dst_len_sum == len)
388 break; /* either on 1st or 2nd iteration */
389 /* prepare next (== 2nd) iteration */
390 dst_off = 0; /* modulo offset in RMBE ring buffer */
391 dst_len = len - dst_len; /* remainder */
392 dst_len_sum += dst_len;
393 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
394 src_len_sum = src_len;
395 }
396 return 0;
397 }
398
399 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
400 * usable snd_wnd as max transmit
401 */
402 static int smc_tx_rdma_writes(struct smc_connection *conn,
403 struct smc_rdma_wr *wr_rdma_buf)
404 {
405 size_t len, src_len, dst_off, dst_len; /* current chunk values */
406 union smc_host_cursor sent, prep, prod, cons;
407 struct smc_cdc_producer_flags *pflags;
408 int to_send, rmbespace;
409 int rc;
410
411 /* source: sndbuf */
412 smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
413 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
414 /* cf. wmem_alloc - (snd_max - snd_una) */
415 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
416 if (to_send <= 0)
417 return 0;
418
419 /* destination: RMBE */
420 /* cf. snd_wnd */
421 rmbespace = atomic_read(&conn->peer_rmbe_space);
422 if (rmbespace <= 0)
423 return 0;
424 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
425 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
426
427 /* if usable snd_wnd closes ask peer to advertise once it opens again */
428 pflags = &conn->local_tx_ctrl.prod_flags;
429 pflags->write_blocked = (to_send >= rmbespace);
430 /* cf. usable snd_wnd */
431 len = min(to_send, rmbespace);
432
433 /* initialize variables for first iteration of subsequent nested loop */
434 dst_off = prod.count;
435 if (prod.wrap == cons.wrap) {
436 /* the filled destination area is unwrapped,
437 * hence the available free destination space is wrapped
438 * and we need 2 destination chunks of sum len; start with 1st
439 * which is limited by what's available in sndbuf
440 */
441 dst_len = min_t(size_t,
442 conn->peer_rmbe_size - prod.count, len);
443 } else {
444 /* the filled destination area is wrapped,
445 * hence the available free destination space is unwrapped
446 * and we need a single destination chunk of entire len
447 */
448 dst_len = len;
449 }
450 /* dst_len determines the maximum src_len */
451 if (sent.count + dst_len <= conn->sndbuf_desc->len) {
452 /* unwrapped src case: single chunk of entire dst_len */
453 src_len = dst_len;
454 } else {
455 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
456 src_len = conn->sndbuf_desc->len - sent.count;
457 }
458
459 if (conn->lgr->is_smcd)
460 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
461 dst_off, dst_len);
462 else
463 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
464 dst_off, dst_len, wr_rdma_buf);
465 if (rc)
466 return rc;
467
468 if (conn->urg_tx_pend && len == to_send)
469 pflags->urg_data_present = 1;
470 smc_tx_advance_cursors(conn, &prod, &sent, len);
471 /* update connection's cursors with advanced local cursors */
472 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
473 /* dst: peer RMBE */
474 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
475
476 return 0;
477 }
478
479 /* Wakeup sndbuf consumers from any context (IRQ or process)
480 * since there is more data to transmit; usable snd_wnd as max transmit
481 */
482 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
483 {
484 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
485 struct smc_rdma_wr *wr_rdma_buf;
486 struct smc_cdc_tx_pend *pend;
487 struct smc_wr_buf *wr_buf;
488 int rc;
489
490 rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
491 if (rc < 0) {
492 if (rc == -EBUSY) {
493 struct smc_sock *smc =
494 container_of(conn, struct smc_sock, conn);
495
496 if (smc->sk.sk_err == ECONNABORTED)
497 return sock_error(&smc->sk);
498 rc = 0;
499 if (conn->alert_token_local) /* connection healthy */
500 mod_delayed_work(system_wq, &conn->tx_work,
501 SMC_TX_WORK_DELAY);
502 }
503 return rc;
504 }
505
506 spin_lock_bh(&conn->send_lock);
507 if (!pflags->urg_data_present) {
508 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
509 if (rc) {
510 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
511 (struct smc_wr_tx_pend_priv *)pend);
512 goto out_unlock;
513 }
514 }
515
516 rc = smc_cdc_msg_send(conn, wr_buf, pend);
517 if (!rc && pflags->urg_data_present) {
518 pflags->urg_data_pending = 0;
519 pflags->urg_data_present = 0;
520 }
521
522 out_unlock:
523 spin_unlock_bh(&conn->send_lock);
524 return rc;
525 }
526
527 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
528 {
529 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
530 int rc = 0;
531
532 spin_lock_bh(&conn->send_lock);
533 if (!pflags->urg_data_present)
534 rc = smc_tx_rdma_writes(conn, NULL);
535 if (!rc)
536 rc = smcd_cdc_msg_send(conn);
537
538 if (!rc && pflags->urg_data_present) {
539 pflags->urg_data_pending = 0;
540 pflags->urg_data_present = 0;
541 }
542 spin_unlock_bh(&conn->send_lock);
543 return rc;
544 }
545
546 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
547 {
548 int rc;
549
550 if (conn->lgr->is_smcd)
551 rc = smcd_tx_sndbuf_nonempty(conn);
552 else
553 rc = smcr_tx_sndbuf_nonempty(conn);
554
555 if (!rc) {
556 /* trigger socket release if connection is closing */
557 struct smc_sock *smc = container_of(conn, struct smc_sock,
558 conn);
559 smc_close_wake_tx_prepared(smc);
560 }
561 return rc;
562 }
563
564 /* Wakeup sndbuf consumers from process context
565 * since there is more data to transmit
566 */
567 void smc_tx_work(struct work_struct *work)
568 {
569 struct smc_connection *conn = container_of(to_delayed_work(work),
570 struct smc_connection,
571 tx_work);
572 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
573 int rc;
574
575 lock_sock(&smc->sk);
576 if (smc->sk.sk_err ||
577 !conn->alert_token_local ||
578 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
579 goto out;
580
581 rc = smc_tx_sndbuf_nonempty(conn);
582 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
583 !atomic_read(&conn->bytes_to_rcv))
584 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
585
586 out:
587 release_sock(&smc->sk);
588 }
589
590 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
591 {
592 union smc_host_cursor cfed, cons, prod;
593 int sender_free = conn->rmb_desc->len;
594 int to_confirm;
595
596 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
597 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
598 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
599 if (to_confirm > conn->rmbe_update_limit) {
600 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
601 sender_free = conn->rmb_desc->len -
602 smc_curs_diff_large(conn->rmb_desc->len,
603 &cfed, &prod);
604 }
605
606 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
607 force ||
608 ((to_confirm > conn->rmbe_update_limit) &&
609 ((sender_free <= (conn->rmb_desc->len / 2)) ||
610 conn->local_rx_ctrl.prod_flags.write_blocked))) {
611 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
612 conn->alert_token_local) { /* connection healthy */
613 schedule_delayed_work(&conn->tx_work,
614 SMC_TX_WORK_DELAY);
615 return;
616 }
617 }
618 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
619 !atomic_read(&conn->bytes_to_rcv))
620 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
621 }
622
623 /***************************** send initialize *******************************/
624
625 /* Initialize send properties on connection establishment. NB: not __init! */
626 void smc_tx_init(struct smc_sock *smc)
627 {
628 smc->sk.sk_write_space = smc_tx_write_space;
629 }