]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/smc/smc_rx.c
KVM: SVM: Move spec control call after restore of GS
[mirror_ubuntu-artful-kernel.git] / net / smc / smc_rx.c
CommitLineData
952310cc
UB
1/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * Manage RMBE
5 * copy new RMBE data into user space
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/net.h>
13#include <linux/rcupdate.h>
c3edc401
IM
14#include <linux/sched/signal.h>
15
952310cc
UB
16#include <net/sock.h>
17
18#include "smc.h"
19#include "smc_core.h"
20#include "smc_cdc.h"
21#include "smc_tx.h" /* smc_tx_consumer_update() */
22#include "smc_rx.h"
23
24/* callback implementation for sk.sk_data_ready()
25 * to wakeup rcvbuf consumers that blocked with smc_rx_wait_data().
26 * indirectly called by smc_cdc_msg_recv_action().
27 */
28static void smc_rx_data_ready(struct sock *sk)
29{
30 struct socket_wq *wq;
31
32 /* derived from sock_def_readable() */
33 /* called already in smc_listen_work() */
34 rcu_read_lock();
35 wq = rcu_dereference(sk->sk_wq);
36 if (skwq_has_sleeper(wq))
37 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
38 POLLRDNORM | POLLRDBAND);
90e9517e 39 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
952310cc
UB
40 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
41 (sk->sk_state == SMC_CLOSED))
42 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
952310cc
UB
43 rcu_read_unlock();
44}
45
46/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
47 * @smc smc socket
48 * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
49 * Returns:
50 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
51 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
52 */
53static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
54{
55 DEFINE_WAIT_FUNC(wait, woken_wake_function);
56 struct smc_connection *conn = &smc->conn;
57 struct sock *sk = &smc->sk;
58 int rc;
59
60 if (atomic_read(&conn->bytes_to_rcv))
61 return 1;
62 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
63 add_wait_queue(sk_sleep(sk), &wait);
64 rc = sk_wait_event(sk, timeo,
65 sk->sk_err ||
66 sk->sk_shutdown & RCV_SHUTDOWN ||
67 sock_flag(sk, SOCK_DONE) ||
68 atomic_read(&conn->bytes_to_rcv) ||
69 smc_cdc_rxed_any_close_or_senddone(conn),
70 &wait);
71 remove_wait_queue(sk_sleep(sk), &wait);
72 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
73 return rc;
74}
75
76/* rcvbuf consumer: main API called by socket layer.
77 * called under sk lock.
78 */
79int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
80 int flags)
81{
82 size_t copylen, read_done = 0, read_remaining = len;
83 size_t chunk_len, chunk_off, chunk_len_sum;
84 struct smc_connection *conn = &smc->conn;
85 union smc_host_cursor cons;
86 int readable, chunk;
87 char *rcvbuf_base;
88 struct sock *sk;
89 long timeo;
90 int target; /* Read at least these many bytes */
91 int rc;
92
93 if (unlikely(flags & MSG_ERRQUEUE))
94 return -EINVAL; /* future work for sk.sk_family == AF_SMC */
95 if (flags & MSG_OOB)
96 return -EINVAL; /* future work */
97
98 sk = &smc->sk;
99 if (sk->sk_state == SMC_LISTEN)
100 return -ENOTCONN;
101 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
102 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
103
104 msg->msg_namelen = 0;
105 /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
106 rcvbuf_base = conn->rmb_desc->cpu_addr;
107
108 do { /* while (read_remaining) */
109 if (read_done >= target)
110 break;
111
112 if (atomic_read(&conn->bytes_to_rcv))
113 goto copy;
114
115 if (read_done) {
116 if (sk->sk_err ||
117 sk->sk_state == SMC_CLOSED ||
118 (sk->sk_shutdown & RCV_SHUTDOWN) ||
119 !timeo ||
120 signal_pending(current) ||
121 smc_cdc_rxed_any_close_or_senddone(conn) ||
122 conn->local_tx_ctrl.conn_state_flags.
123 peer_conn_abort)
124 break;
125 } else {
126 if (sock_flag(sk, SOCK_DONE))
127 break;
128 if (sk->sk_err) {
129 read_done = sock_error(sk);
130 break;
131 }
132 if (sk->sk_shutdown & RCV_SHUTDOWN ||
133 smc_cdc_rxed_any_close_or_senddone(conn) ||
134 conn->local_tx_ctrl.conn_state_flags.
135 peer_conn_abort)
136 break;
137 if (sk->sk_state == SMC_CLOSED) {
138 if (!sock_flag(sk, SOCK_DONE)) {
139 /* This occurs when user tries to read
140 * from never connected socket.
141 */
142 read_done = -ENOTCONN;
143 break;
144 }
145 break;
146 }
147 if (signal_pending(current)) {
148 read_done = sock_intr_errno(timeo);
149 break;
150 }
151 }
152
153 if (!atomic_read(&conn->bytes_to_rcv)) {
154 smc_rx_wait_data(smc, &timeo);
155 continue;
156 }
157
158copy:
159 /* initialize variables for 1st iteration of subsequent loop */
160 /* could be just 1 byte, even after smc_rx_wait_data above */
161 readable = atomic_read(&conn->bytes_to_rcv);
162 /* not more than what user space asked for */
163 copylen = min_t(size_t, read_remaining, readable);
164 smc_curs_write(&cons,
165 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
166 conn);
167 /* determine chunks where to read from rcvbuf */
168 /* either unwrapped case, or 1st chunk of wrapped case */
169 chunk_len = min_t(size_t,
170 copylen, conn->rmbe_size - cons.count);
171 chunk_len_sum = chunk_len;
172 chunk_off = cons.count;
173 for (chunk = 0; chunk < 2; chunk++) {
174 if (!(flags & MSG_TRUNC)) {
175 rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off,
176 chunk_len);
177 if (rc) {
178 if (!read_done)
179 read_done = -EFAULT;
180 goto out;
181 }
182 }
183 read_remaining -= chunk_len;
184 read_done += chunk_len;
185
186 if (chunk_len_sum == copylen)
187 break; /* either on 1st or 2nd iteration */
188 /* prepare next (== 2nd) iteration */
189 chunk_len = copylen - chunk_len; /* remainder */
190 chunk_len_sum += chunk_len;
191 chunk_off = 0; /* modulo offset in recv ring buffer */
192 }
193
194 /* update cursors */
195 if (!(flags & MSG_PEEK)) {
196 smc_curs_add(conn->rmbe_size, &cons, copylen);
197 /* increased in recv tasklet smc_cdc_msg_rcv() */
198 smp_mb__before_atomic();
199 atomic_sub(copylen, &conn->bytes_to_rcv);
200 /* guarantee 0 <= bytes_to_rcv <= rmbe_size */
201 smp_mb__after_atomic();
202 smc_curs_write(&conn->local_tx_ctrl.cons,
203 smc_curs_read(&cons, conn),
204 conn);
205 /* send consumer cursor update if required */
206 /* similar to advertising new TCP rcv_wnd if required */
207 smc_tx_consumer_update(conn);
208 }
209 } while (read_remaining);
210out:
211 return read_done;
212}
213
214/* Initialize receive properties on connection establishment. NB: not __init! */
215void smc_rx_init(struct smc_sock *smc)
216{
217 smc->sk.sk_data_ready = smc_rx_data_ready;
218}