]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/rds/threads.c
RDS: Add rds_conn_path_error()
[mirror_ubuntu-zesty-kernel.git] / net / rds / threads.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/random.h>
35 #include <linux/export.h>
36
37 #include "rds.h"
38
39 /*
40 * All of connection management is simplified by serializing it through
41 * work queues that execute in a connection managing thread.
42 *
43 * TCP wants to send acks through sendpage() in response to data_ready(),
44 * but it needs a process context to do so.
45 *
46 * The receive paths need to allocate but can't drop packets (!) so we have
47 * a thread around to block allocating if the receive fast path sees an
48 * allocation failure.
49 */
50
51 /* Grand Unified Theory of connection life cycle:
52 * At any point in time, the connection can be in one of these states:
53 * DOWN, CONNECTING, UP, DISCONNECTING, ERROR
54 *
55 * The following transitions are possible:
56 * ANY -> ERROR
57 * UP -> DISCONNECTING
58 * ERROR -> DISCONNECTING
59 * DISCONNECTING -> DOWN
60 * DOWN -> CONNECTING
61 * CONNECTING -> UP
62 *
63 * Transition to state DISCONNECTING/DOWN:
64 * - Inside the shutdown worker; synchronizes with xmit path
65 * through RDS_IN_XMIT, and with connection management callbacks
66 * via c_cm_lock.
67 *
68 * For receive callbacks, we rely on the underlying transport
69 * (TCP, IB/RDMA) to provide the necessary synchronisation.
70 */
71 struct workqueue_struct *rds_wq;
72 EXPORT_SYMBOL_GPL(rds_wq);
73
74 void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
75 {
76 if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
77 printk(KERN_WARNING "%s: Cannot transition to state UP, "
78 "current state is %d\n",
79 __func__,
80 atomic_read(&cp->cp_state));
81 rds_conn_path_drop(cp);
82 return;
83 }
84
85 rdsdebug("conn %p for %pI4 to %pI4 complete\n",
86 cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
87
88 cp->cp_reconnect_jiffies = 0;
89 set_bit(0, &cp->cp_conn->c_map_queued);
90 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
91 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
92 }
93 EXPORT_SYMBOL_GPL(rds_connect_path_complete);
94
95 void rds_connect_complete(struct rds_connection *conn)
96 {
97 rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING);
98 }
99 EXPORT_SYMBOL_GPL(rds_connect_complete);
100
101 /*
102 * This random exponential backoff is relied on to eventually resolve racing
103 * connects.
104 *
105 * If connect attempts race then both parties drop both connections and come
106 * here to wait for a random amount of time before trying again. Eventually
107 * the backoff range will be so much greater than the time it takes to
108 * establish a connection that one of the pair will establish the connection
109 * before the other's random delay fires.
110 *
111 * Connection attempts that arrive while a connection is already established
112 * are also considered to be racing connects. This lets a connection from
113 * a rebooted machine replace an existing stale connection before the transport
114 * notices that the connection has failed.
115 *
116 * We should *always* start with a random backoff; otherwise a broken connection
117 * will always take several iterations to be re-established.
118 */
119 void rds_queue_reconnect(struct rds_conn_path *cp)
120 {
121 unsigned long rand;
122 struct rds_connection *conn = cp->cp_conn;
123
124 rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
125 conn, &conn->c_laddr, &conn->c_faddr,
126 cp->cp_reconnect_jiffies);
127
128 set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
129 if (cp->cp_reconnect_jiffies == 0) {
130 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
131 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
132 return;
133 }
134
135 get_random_bytes(&rand, sizeof(rand));
136 rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
137 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
138 conn, &conn->c_laddr, &conn->c_faddr);
139 queue_delayed_work(rds_wq, &cp->cp_conn_w,
140 rand % cp->cp_reconnect_jiffies);
141
142 cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
143 rds_sysctl_reconnect_max_jiffies);
144 }
145
146 void rds_connect_worker(struct work_struct *work)
147 {
148 struct rds_conn_path *cp = container_of(work,
149 struct rds_conn_path,
150 cp_conn_w.work);
151 struct rds_connection *conn = cp->cp_conn;
152 int ret;
153
154 clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
155 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
156 ret = conn->c_trans->conn_connect(conn);
157 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
158 conn, &conn->c_laddr, &conn->c_faddr, ret);
159
160 if (ret) {
161 if (rds_conn_path_transition(cp,
162 RDS_CONN_CONNECTING,
163 RDS_CONN_DOWN))
164 rds_queue_reconnect(cp);
165 else
166 rds_conn_path_error(cp,
167 "RDS: connect failed\n");
168 }
169 }
170 }
171
172 void rds_send_worker(struct work_struct *work)
173 {
174 struct rds_conn_path *cp = container_of(work,
175 struct rds_conn_path,
176 cp_send_w.work);
177 int ret;
178
179 if (rds_conn_path_state(cp) == RDS_CONN_UP) {
180 clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags);
181 ret = rds_send_xmit(cp);
182 cond_resched();
183 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
184 switch (ret) {
185 case -EAGAIN:
186 rds_stats_inc(s_send_immediate_retry);
187 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
188 break;
189 case -ENOMEM:
190 rds_stats_inc(s_send_delayed_retry);
191 queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
192 default:
193 break;
194 }
195 }
196 }
197
198 void rds_recv_worker(struct work_struct *work)
199 {
200 struct rds_conn_path *cp = container_of(work,
201 struct rds_conn_path,
202 cp_recv_w.work);
203 int ret;
204
205 if (rds_conn_path_state(cp) == RDS_CONN_UP) {
206 ret = cp->cp_conn->c_trans->recv(cp->cp_conn);
207 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
208 switch (ret) {
209 case -EAGAIN:
210 rds_stats_inc(s_recv_immediate_retry);
211 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
212 break;
213 case -ENOMEM:
214 rds_stats_inc(s_recv_delayed_retry);
215 queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
216 default:
217 break;
218 }
219 }
220 }
221
222 void rds_shutdown_worker(struct work_struct *work)
223 {
224 struct rds_conn_path *cp = container_of(work,
225 struct rds_conn_path,
226 cp_down_w);
227
228 rds_conn_shutdown(cp->cp_conn);
229 }
230
231 void rds_threads_exit(void)
232 {
233 destroy_workqueue(rds_wq);
234 }
235
236 int rds_threads_init(void)
237 {
238 rds_wq = create_singlethread_workqueue("krdsd");
239 if (!rds_wq)
240 return -ENOMEM;
241
242 return 0;
243 }