]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rds/ib.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / rds / ib.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/if.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/delay.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42
43 #include "rds_single_path.h"
44 #include "rds.h"
45 #include "ib.h"
46 #include "ib_mr.h"
47
48 static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
49 static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
50 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
51
52 module_param(rds_ib_mr_1m_pool_size, int, 0444);
53 MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
54 module_param(rds_ib_mr_8k_pool_size, int, 0444);
55 MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA");
56 module_param(rds_ib_retry_count, int, 0444);
57 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
58
59 /*
60 * we have a clumsy combination of RCU and a rwsem protecting this list
61 * because it is used both in the get_mr fast path and while blocking in
62 * the FMR flushing path.
63 */
64 DECLARE_RWSEM(rds_ib_devices_lock);
65 struct list_head rds_ib_devices;
66
67 /* NOTE: if also grabbing ibdev lock, grab this first */
68 DEFINE_SPINLOCK(ib_nodev_conns_lock);
69 LIST_HEAD(ib_nodev_conns);
70
71 static void rds_ib_nodev_connect(void)
72 {
73 struct rds_ib_connection *ic;
74
75 spin_lock(&ib_nodev_conns_lock);
76 list_for_each_entry(ic, &ib_nodev_conns, ib_node)
77 rds_conn_connect_if_down(ic->conn);
78 spin_unlock(&ib_nodev_conns_lock);
79 }
80
81 static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
82 {
83 struct rds_ib_connection *ic;
84 unsigned long flags;
85
86 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
87 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
88 rds_conn_drop(ic->conn);
89 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
90 }
91
92 /*
93 * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
94 * from interrupt context so we push freing off into a work struct in krdsd.
95 */
96 static void rds_ib_dev_free(struct work_struct *work)
97 {
98 struct rds_ib_ipaddr *i_ipaddr, *i_next;
99 struct rds_ib_device *rds_ibdev = container_of(work,
100 struct rds_ib_device, free_work);
101
102 if (rds_ibdev->mr_8k_pool)
103 rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
104 if (rds_ibdev->mr_1m_pool)
105 rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
106 if (rds_ibdev->pd)
107 ib_dealloc_pd(rds_ibdev->pd);
108
109 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
110 list_del(&i_ipaddr->list);
111 kfree(i_ipaddr);
112 }
113
114 kfree(rds_ibdev->vector_load);
115
116 kfree(rds_ibdev);
117 }
118
119 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
120 {
121 BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
122 if (refcount_dec_and_test(&rds_ibdev->refcount))
123 queue_work(rds_wq, &rds_ibdev->free_work);
124 }
125
126 static void rds_ib_add_one(struct ib_device *device)
127 {
128 struct rds_ib_device *rds_ibdev;
129
130 /* Only handle IB (no iWARP) devices */
131 if (device->node_type != RDMA_NODE_IB_CA)
132 return;
133
134 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
135 ibdev_to_node(device));
136 if (!rds_ibdev)
137 return;
138
139 spin_lock_init(&rds_ibdev->spinlock);
140 refcount_set(&rds_ibdev->refcount, 1);
141 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
142
143 rds_ibdev->max_wrs = device->attrs.max_qp_wr;
144 rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
145
146 rds_ibdev->has_fr = (device->attrs.device_cap_flags &
147 IB_DEVICE_MEM_MGT_EXTENSIONS);
148 rds_ibdev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
149 device->map_phys_fmr && device->unmap_fmr);
150 rds_ibdev->use_fastreg = (rds_ibdev->has_fr && !rds_ibdev->has_fmr);
151
152 rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
153 rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
154 min_t(unsigned int, (device->attrs.max_mr / 2),
155 rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
156
157 rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
158 min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE),
159 rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size;
160
161 rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
162 rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
163
164 rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors,
165 GFP_KERNEL);
166 if (!rds_ibdev->vector_load) {
167 pr_err("RDS/IB: %s failed to allocate vector memory\n",
168 __func__);
169 goto put_dev;
170 }
171
172 rds_ibdev->dev = device;
173 rds_ibdev->pd = ib_alloc_pd(device, 0);
174 if (IS_ERR(rds_ibdev->pd)) {
175 rds_ibdev->pd = NULL;
176 goto put_dev;
177 }
178
179 rds_ibdev->mr_1m_pool =
180 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
181 if (IS_ERR(rds_ibdev->mr_1m_pool)) {
182 rds_ibdev->mr_1m_pool = NULL;
183 goto put_dev;
184 }
185
186 rds_ibdev->mr_8k_pool =
187 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
188 if (IS_ERR(rds_ibdev->mr_8k_pool)) {
189 rds_ibdev->mr_8k_pool = NULL;
190 goto put_dev;
191 }
192
193 rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
194 device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
195 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
196 rds_ibdev->max_8k_mrs);
197
198 pr_info("RDS/IB: %s: %s supported and preferred\n",
199 device->name,
200 rds_ibdev->use_fastreg ? "FRMR" : "FMR");
201
202 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
203 INIT_LIST_HEAD(&rds_ibdev->conn_list);
204
205 down_write(&rds_ib_devices_lock);
206 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
207 up_write(&rds_ib_devices_lock);
208 refcount_inc(&rds_ibdev->refcount);
209
210 ib_set_client_data(device, &rds_ib_client, rds_ibdev);
211 refcount_inc(&rds_ibdev->refcount);
212
213 rds_ib_nodev_connect();
214
215 put_dev:
216 rds_ib_dev_put(rds_ibdev);
217 }
218
219 /*
220 * New connections use this to find the device to associate with the
221 * connection. It's not in the fast path so we're not concerned about the
222 * performance of the IB call. (As of this writing, it uses an interrupt
223 * blocking spinlock to serialize walking a per-device list of all registered
224 * clients.)
225 *
226 * RCU is used to handle incoming connections racing with device teardown.
227 * Rather than use a lock to serialize removal from the client_data and
228 * getting a new reference, we use an RCU grace period. The destruction
229 * path removes the device from client_data and then waits for all RCU
230 * readers to finish.
231 *
232 * A new connection can get NULL from this if its arriving on a
233 * device that is in the process of being removed.
234 */
235 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
236 {
237 struct rds_ib_device *rds_ibdev;
238
239 rcu_read_lock();
240 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
241 if (rds_ibdev)
242 refcount_inc(&rds_ibdev->refcount);
243 rcu_read_unlock();
244 return rds_ibdev;
245 }
246
247 /*
248 * The IB stack is letting us know that a device is going away. This can
249 * happen if the underlying HCA driver is removed or if PCI hotplug is removing
250 * the pci function, for example.
251 *
252 * This can be called at any time and can be racing with any other RDS path.
253 */
254 static void rds_ib_remove_one(struct ib_device *device, void *client_data)
255 {
256 struct rds_ib_device *rds_ibdev = client_data;
257
258 if (!rds_ibdev)
259 return;
260
261 rds_ib_dev_shutdown(rds_ibdev);
262
263 /* stop connection attempts from getting a reference to this device. */
264 ib_set_client_data(device, &rds_ib_client, NULL);
265
266 down_write(&rds_ib_devices_lock);
267 list_del_rcu(&rds_ibdev->list);
268 up_write(&rds_ib_devices_lock);
269
270 /*
271 * This synchronize rcu is waiting for readers of both the ib
272 * client data and the devices list to finish before we drop
273 * both of those references.
274 */
275 synchronize_rcu();
276 rds_ib_dev_put(rds_ibdev);
277 rds_ib_dev_put(rds_ibdev);
278 }
279
280 struct ib_client rds_ib_client = {
281 .name = "rds_ib",
282 .add = rds_ib_add_one,
283 .remove = rds_ib_remove_one
284 };
285
286 static int rds_ib_conn_info_visitor(struct rds_connection *conn,
287 void *buffer)
288 {
289 struct rds_info_rdma_connection *iinfo = buffer;
290 struct rds_ib_connection *ic;
291
292 /* We will only ever look at IB transports */
293 if (conn->c_trans != &rds_ib_transport)
294 return 0;
295
296 iinfo->src_addr = conn->c_laddr;
297 iinfo->dst_addr = conn->c_faddr;
298
299 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
300 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
301 if (rds_conn_state(conn) == RDS_CONN_UP) {
302 struct rds_ib_device *rds_ibdev;
303 struct rdma_dev_addr *dev_addr;
304
305 ic = conn->c_transport_data;
306 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
307
308 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
309 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
310
311 rds_ibdev = ic->rds_ibdev;
312 iinfo->max_send_wr = ic->i_send_ring.w_nr;
313 iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
314 iinfo->max_send_sge = rds_ibdev->max_sge;
315 rds_ib_get_mr_info(rds_ibdev, iinfo);
316 }
317 return 1;
318 }
319
320 static void rds_ib_ic_info(struct socket *sock, unsigned int len,
321 struct rds_info_iterator *iter,
322 struct rds_info_lengths *lens)
323 {
324 rds_for_each_conn_info(sock, len, iter, lens,
325 rds_ib_conn_info_visitor,
326 sizeof(struct rds_info_rdma_connection));
327 }
328
329
330 /*
331 * Early RDS/IB was built to only bind to an address if there is an IPoIB
332 * device with that address set.
333 *
334 * If it were me, I'd advocate for something more flexible. Sending and
335 * receiving should be device-agnostic. Transports would try and maintain
336 * connections between peers who have messages queued. Userspace would be
337 * allowed to influence which paths have priority. We could call userspace
338 * asserting this policy "routing".
339 */
340 static int rds_ib_laddr_check(struct net *net, __be32 addr)
341 {
342 int ret;
343 struct rdma_cm_id *cm_id;
344 struct sockaddr_in sin;
345
346 /* Create a CMA ID and try to bind it. This catches both
347 * IB and iWARP capable NICs.
348 */
349 cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
350 if (IS_ERR(cm_id))
351 return PTR_ERR(cm_id);
352
353 memset(&sin, 0, sizeof(sin));
354 sin.sin_family = AF_INET;
355 sin.sin_addr.s_addr = addr;
356
357 /* rdma_bind_addr will only succeed for IB & iWARP devices */
358 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
359 /* due to this, we will claim to support iWARP devices unless we
360 check node_type. */
361 if (ret || !cm_id->device ||
362 cm_id->device->node_type != RDMA_NODE_IB_CA)
363 ret = -EADDRNOTAVAIL;
364
365 rdsdebug("addr %pI4 ret %d node type %d\n",
366 &addr, ret,
367 cm_id->device ? cm_id->device->node_type : -1);
368
369 rdma_destroy_id(cm_id);
370
371 return ret;
372 }
373
374 static void rds_ib_unregister_client(void)
375 {
376 ib_unregister_client(&rds_ib_client);
377 /* wait for rds_ib_dev_free() to complete */
378 flush_workqueue(rds_wq);
379 }
380
381 void rds_ib_exit(void)
382 {
383 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
384 rds_ib_unregister_client();
385 rds_ib_destroy_nodev_conns();
386 rds_ib_sysctl_exit();
387 rds_ib_recv_exit();
388 rds_trans_unregister(&rds_ib_transport);
389 rds_ib_mr_exit();
390 }
391
392 struct rds_transport rds_ib_transport = {
393 .laddr_check = rds_ib_laddr_check,
394 .xmit_path_complete = rds_ib_xmit_path_complete,
395 .xmit = rds_ib_xmit,
396 .xmit_rdma = rds_ib_xmit_rdma,
397 .xmit_atomic = rds_ib_xmit_atomic,
398 .recv_path = rds_ib_recv_path,
399 .conn_alloc = rds_ib_conn_alloc,
400 .conn_free = rds_ib_conn_free,
401 .conn_path_connect = rds_ib_conn_path_connect,
402 .conn_path_shutdown = rds_ib_conn_path_shutdown,
403 .inc_copy_to_user = rds_ib_inc_copy_to_user,
404 .inc_free = rds_ib_inc_free,
405 .cm_initiate_connect = rds_ib_cm_initiate_connect,
406 .cm_handle_connect = rds_ib_cm_handle_connect,
407 .cm_connect_complete = rds_ib_cm_connect_complete,
408 .stats_info_copy = rds_ib_stats_info_copy,
409 .exit = rds_ib_exit,
410 .get_mr = rds_ib_get_mr,
411 .sync_mr = rds_ib_sync_mr,
412 .free_mr = rds_ib_free_mr,
413 .flush_mrs = rds_ib_flush_mrs,
414 .t_owner = THIS_MODULE,
415 .t_name = "infiniband",
416 .t_type = RDS_TRANS_IB
417 };
418
419 int rds_ib_init(void)
420 {
421 int ret;
422
423 INIT_LIST_HEAD(&rds_ib_devices);
424
425 ret = rds_ib_mr_init();
426 if (ret)
427 goto out;
428
429 ret = ib_register_client(&rds_ib_client);
430 if (ret)
431 goto out_mr_exit;
432
433 ret = rds_ib_sysctl_init();
434 if (ret)
435 goto out_ibreg;
436
437 ret = rds_ib_recv_init();
438 if (ret)
439 goto out_sysctl;
440
441 rds_trans_register(&rds_ib_transport);
442
443 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
444
445 goto out;
446
447 out_sysctl:
448 rds_ib_sysctl_exit();
449 out_ibreg:
450 rds_ib_unregister_client();
451 out_mr_exit:
452 rds_ib_mr_exit();
453 out:
454 return ret;
455 }
456
457 MODULE_LICENSE("GPL");
458