]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/core/verbs.c
IB/core: Add might_sleep() annotation to ib_init_ah_from_wc()
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / core / verbs.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
33b9b3ee 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
1da177e4
LT
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
1da177e4
LT
37 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
b108d976 41#include <linux/export.h>
8c65b4a6 42#include <linux/string.h>
0e0ec7e0 43#include <linux/slab.h>
dbf727de
MB
44#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
d291f1a6 47#include <linux/security.h>
1da177e4 48
a4d61e84
RD
49#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
dd5f03be 51#include <rdma/ib_addr.h>
a060b562 52#include <rdma/rw.h>
1da177e4 53
ed4c54e5 54#include "core_priv.h"
1da177e4 55
2b1b5b60
SG
56static const char * const ib_events[] = {
57 [IB_EVENT_CQ_ERR] = "CQ error",
58 [IB_EVENT_QP_FATAL] = "QP fatal error",
59 [IB_EVENT_QP_REQ_ERR] = "QP request error",
60 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
61 [IB_EVENT_COMM_EST] = "communication established",
62 [IB_EVENT_SQ_DRAINED] = "send queue drained",
63 [IB_EVENT_PATH_MIG] = "path migration successful",
64 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
65 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
66 [IB_EVENT_PORT_ACTIVE] = "port active",
67 [IB_EVENT_PORT_ERR] = "port error",
68 [IB_EVENT_LID_CHANGE] = "LID change",
69 [IB_EVENT_PKEY_CHANGE] = "P_key change",
70 [IB_EVENT_SM_CHANGE] = "SM change",
71 [IB_EVENT_SRQ_ERR] = "SRQ error",
72 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
73 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
74 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
75 [IB_EVENT_GID_CHANGE] = "GID changed",
76};
77
db7489e0 78const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
2b1b5b60
SG
79{
80 size_t index = event;
81
82 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
83 ib_events[index] : "unrecognized event";
84}
85EXPORT_SYMBOL(ib_event_msg);
86
87static const char * const wc_statuses[] = {
88 [IB_WC_SUCCESS] = "success",
89 [IB_WC_LOC_LEN_ERR] = "local length error",
90 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
91 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
92 [IB_WC_LOC_PROT_ERR] = "local protection error",
93 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
94 [IB_WC_MW_BIND_ERR] = "memory management operation error",
95 [IB_WC_BAD_RESP_ERR] = "bad response error",
96 [IB_WC_LOC_ACCESS_ERR] = "local access error",
97 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
98 [IB_WC_REM_ACCESS_ERR] = "remote access error",
99 [IB_WC_REM_OP_ERR] = "remote operation error",
100 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
101 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
102 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
103 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
104 [IB_WC_REM_ABORT_ERR] = "operation aborted",
105 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
106 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
107 [IB_WC_FATAL_ERR] = "fatal error",
108 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
109 [IB_WC_GENERAL_ERR] = "general error",
110};
111
db7489e0 112const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
2b1b5b60
SG
113{
114 size_t index = status;
115
116 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
117 wc_statuses[index] : "unrecognized status";
118}
119EXPORT_SYMBOL(ib_wc_status_msg);
120
8385fd84 121__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
bf6a9e31
JM
122{
123 switch (rate) {
124 case IB_RATE_2_5_GBPS: return 1;
125 case IB_RATE_5_GBPS: return 2;
126 case IB_RATE_10_GBPS: return 4;
127 case IB_RATE_20_GBPS: return 8;
128 case IB_RATE_30_GBPS: return 12;
129 case IB_RATE_40_GBPS: return 16;
130 case IB_RATE_60_GBPS: return 24;
131 case IB_RATE_80_GBPS: return 32;
132 case IB_RATE_120_GBPS: return 48;
133 default: return -1;
134 }
135}
136EXPORT_SYMBOL(ib_rate_to_mult);
137
8385fd84 138__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
bf6a9e31
JM
139{
140 switch (mult) {
141 case 1: return IB_RATE_2_5_GBPS;
142 case 2: return IB_RATE_5_GBPS;
143 case 4: return IB_RATE_10_GBPS;
144 case 8: return IB_RATE_20_GBPS;
145 case 12: return IB_RATE_30_GBPS;
146 case 16: return IB_RATE_40_GBPS;
147 case 24: return IB_RATE_60_GBPS;
148 case 32: return IB_RATE_80_GBPS;
149 case 48: return IB_RATE_120_GBPS;
150 default: return IB_RATE_PORT_CURRENT;
151 }
152}
153EXPORT_SYMBOL(mult_to_ib_rate);
154
8385fd84 155__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
71eeba16
MA
156{
157 switch (rate) {
158 case IB_RATE_2_5_GBPS: return 2500;
159 case IB_RATE_5_GBPS: return 5000;
160 case IB_RATE_10_GBPS: return 10000;
161 case IB_RATE_20_GBPS: return 20000;
162 case IB_RATE_30_GBPS: return 30000;
163 case IB_RATE_40_GBPS: return 40000;
164 case IB_RATE_60_GBPS: return 60000;
165 case IB_RATE_80_GBPS: return 80000;
166 case IB_RATE_120_GBPS: return 120000;
167 case IB_RATE_14_GBPS: return 14062;
168 case IB_RATE_56_GBPS: return 56250;
169 case IB_RATE_112_GBPS: return 112500;
170 case IB_RATE_168_GBPS: return 168750;
171 case IB_RATE_25_GBPS: return 25781;
172 case IB_RATE_100_GBPS: return 103125;
173 case IB_RATE_200_GBPS: return 206250;
174 case IB_RATE_300_GBPS: return 309375;
175 default: return -1;
176 }
177}
178EXPORT_SYMBOL(ib_rate_to_mbps);
179
8385fd84 180__attribute_const__ enum rdma_transport_type
07ebafba
TT
181rdma_node_get_transport(enum rdma_node_type node_type)
182{
cdc596d8
LR
183
184 if (node_type == RDMA_NODE_USNIC)
5db5765e 185 return RDMA_TRANSPORT_USNIC;
cdc596d8 186 if (node_type == RDMA_NODE_USNIC_UDP)
248567f7 187 return RDMA_TRANSPORT_USNIC_UDP;
cdc596d8
LR
188 if (node_type == RDMA_NODE_RNIC)
189 return RDMA_TRANSPORT_IWARP;
190
191 return RDMA_TRANSPORT_IB;
07ebafba
TT
192}
193EXPORT_SYMBOL(rdma_node_get_transport);
194
a3f5adaf
EC
195enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
196{
82901e3e 197 enum rdma_transport_type lt;
a3f5adaf
EC
198 if (device->get_link_layer)
199 return device->get_link_layer(device, port_num);
200
82901e3e
LR
201 lt = rdma_node_get_transport(device->node_type);
202 if (lt == RDMA_TRANSPORT_IB)
a3f5adaf 203 return IB_LINK_LAYER_INFINIBAND;
82901e3e
LR
204
205 return IB_LINK_LAYER_ETHERNET;
a3f5adaf
EC
206}
207EXPORT_SYMBOL(rdma_port_get_link_layer);
208
1da177e4
LT
209/* Protection domains */
210
96249d70
JG
211/**
212 * ib_alloc_pd - Allocates an unused protection domain.
213 * @device: The device on which to allocate the protection domain.
214 *
215 * A protection domain object provides an association between QPs, shared
216 * receive queues, address handles, memory regions, and memory windows.
217 *
218 * Every PD has a local_dma_lkey which can be used as the lkey value for local
219 * memory operations.
220 */
ed082d36
CH
221struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
222 const char *caller)
1da177e4
LT
223{
224 struct ib_pd *pd;
ed082d36 225 int mr_access_flags = 0;
1da177e4 226
b5e81bf5 227 pd = device->alloc_pd(device, NULL, NULL);
96249d70
JG
228 if (IS_ERR(pd))
229 return pd;
1da177e4 230
96249d70
JG
231 pd->device = device;
232 pd->uobject = NULL;
50d46335 233 pd->__internal_mr = NULL;
96249d70 234 atomic_set(&pd->usecnt, 0);
ed082d36 235 pd->flags = flags;
1da177e4 236
86bee4c9 237 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
96249d70 238 pd->local_dma_lkey = device->local_dma_lkey;
ed082d36
CH
239 else
240 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
241
242 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
243 pr_warn("%s: enabling unsafe global rkey\n", caller);
244 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
245 }
246
247 if (mr_access_flags) {
96249d70
JG
248 struct ib_mr *mr;
249
5ef990f0 250 mr = pd->device->get_dma_mr(pd, mr_access_flags);
96249d70
JG
251 if (IS_ERR(mr)) {
252 ib_dealloc_pd(pd);
5ef990f0 253 return ERR_CAST(mr);
96249d70 254 }
1da177e4 255
5ef990f0
CH
256 mr->device = pd->device;
257 mr->pd = pd;
258 mr->uobject = NULL;
259 mr->need_inval = false;
260
50d46335 261 pd->__internal_mr = mr;
ed082d36
CH
262
263 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
264 pd->local_dma_lkey = pd->__internal_mr->lkey;
265
266 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
267 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
1da177e4 268 }
ed082d36 269
1da177e4
LT
270 return pd;
271}
ed082d36 272EXPORT_SYMBOL(__ib_alloc_pd);
1da177e4 273
7dd78647
JG
274/**
275 * ib_dealloc_pd - Deallocates a protection domain.
276 * @pd: The protection domain to deallocate.
277 *
278 * It is an error to call this function while any resources in the pd still
279 * exist. The caller is responsible to synchronously destroy them and
280 * guarantee no new allocations will happen.
281 */
282void ib_dealloc_pd(struct ib_pd *pd)
1da177e4 283{
7dd78647
JG
284 int ret;
285
50d46335 286 if (pd->__internal_mr) {
5ef990f0 287 ret = pd->device->dereg_mr(pd->__internal_mr);
7dd78647 288 WARN_ON(ret);
50d46335 289 pd->__internal_mr = NULL;
96249d70 290 }
1da177e4 291
7dd78647
JG
292 /* uverbs manipulates usecnt with proper locking, while the kabi
293 requires the caller to guarantee we can't race here. */
294 WARN_ON(atomic_read(&pd->usecnt));
1da177e4 295
7dd78647
JG
296 /* Making delalloc_pd a void return is a WIP, no driver should return
297 an error here. */
298 ret = pd->device->dealloc_pd(pd);
299 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
1da177e4
LT
300}
301EXPORT_SYMBOL(ib_dealloc_pd);
302
303/* Address handles */
304
0a18cfe4 305struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
1da177e4
LT
306{
307 struct ib_ah *ah;
308
477864c8 309 ah = pd->device->create_ah(pd, ah_attr, NULL);
1da177e4
LT
310
311 if (!IS_ERR(ah)) {
b5e81bf5
RD
312 ah->device = pd->device;
313 ah->pd = pd;
314 ah->uobject = NULL;
44c58487 315 ah->type = ah_attr->type;
1da177e4
LT
316 atomic_inc(&pd->usecnt);
317 }
318
319 return ah;
320}
0a18cfe4 321EXPORT_SYMBOL(rdma_create_ah);
1da177e4 322
850d8fd7 323int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
c865f246
SK
324{
325 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
326 struct iphdr ip4h_checked;
327 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
328
329 /* If it's IPv6, the version must be 6, otherwise, the first
330 * 20 bytes (before the IPv4 header) are garbled.
331 */
332 if (ip6h->version != 6)
333 return (ip4h->version == 4) ? 4 : 0;
334 /* version may be 6 or 4 because the first 20 bytes could be garbled */
335
336 /* RoCE v2 requires no options, thus header length
337 * must be 5 words
338 */
339 if (ip4h->ihl != 5)
340 return 6;
341
342 /* Verify checksum.
343 * We can't write on scattered buffers so we need to copy to
344 * temp buffer.
345 */
346 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
347 ip4h_checked.check = 0;
348 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
349 /* if IPv4 header checksum is OK, believe it */
350 if (ip4h->check == ip4h_checked.check)
351 return 4;
352 return 6;
353}
850d8fd7 354EXPORT_SYMBOL(ib_get_rdma_header_version);
c865f246
SK
355
356static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
357 u8 port_num,
358 const struct ib_grh *grh)
359{
360 int grh_version;
361
362 if (rdma_protocol_ib(device, port_num))
363 return RDMA_NETWORK_IB;
364
850d8fd7 365 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
c865f246
SK
366
367 if (grh_version == 4)
368 return RDMA_NETWORK_IPV4;
369
370 if (grh->next_hdr == IPPROTO_UDP)
371 return RDMA_NETWORK_IPV6;
372
373 return RDMA_NETWORK_ROCE_V1;
374}
375
dbf727de
MB
376struct find_gid_index_context {
377 u16 vlan_id;
c865f246 378 enum ib_gid_type gid_type;
dbf727de
MB
379};
380
381static bool find_gid_index(const union ib_gid *gid,
382 const struct ib_gid_attr *gid_attr,
383 void *context)
384{
385 struct find_gid_index_context *ctx =
386 (struct find_gid_index_context *)context;
387
c865f246
SK
388 if (ctx->gid_type != gid_attr->gid_type)
389 return false;
390
dbf727de
MB
391 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
392 (is_vlan_dev(gid_attr->ndev) &&
393 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
394 return false;
395
396 return true;
397}
398
399static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
400 u16 vlan_id, const union ib_gid *sgid,
c865f246 401 enum ib_gid_type gid_type,
dbf727de
MB
402 u16 *gid_index)
403{
c865f246
SK
404 struct find_gid_index_context context = {.vlan_id = vlan_id,
405 .gid_type = gid_type};
dbf727de
MB
406
407 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
408 &context, gid_index);
409}
410
850d8fd7
MS
411int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
412 enum rdma_network_type net_type,
413 union ib_gid *sgid, union ib_gid *dgid)
c865f246
SK
414{
415 struct sockaddr_in src_in;
416 struct sockaddr_in dst_in;
417 __be32 src_saddr, dst_saddr;
418
419 if (!sgid || !dgid)
420 return -EINVAL;
421
422 if (net_type == RDMA_NETWORK_IPV4) {
423 memcpy(&src_in.sin_addr.s_addr,
424 &hdr->roce4grh.saddr, 4);
425 memcpy(&dst_in.sin_addr.s_addr,
426 &hdr->roce4grh.daddr, 4);
427 src_saddr = src_in.sin_addr.s_addr;
428 dst_saddr = dst_in.sin_addr.s_addr;
429 ipv6_addr_set_v4mapped(src_saddr,
430 (struct in6_addr *)sgid);
431 ipv6_addr_set_v4mapped(dst_saddr,
432 (struct in6_addr *)dgid);
433 return 0;
434 } else if (net_type == RDMA_NETWORK_IPV6 ||
435 net_type == RDMA_NETWORK_IB) {
436 *dgid = hdr->ibgrh.dgid;
437 *sgid = hdr->ibgrh.sgid;
438 return 0;
439 } else {
440 return -EINVAL;
441 }
442}
850d8fd7 443EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
c865f246 444
28b5b3a2
GS
445/*
446 * This function creates ah from the incoming packet.
447 * Incoming packet has dgid of the receiver node on which this code is
448 * getting executed and, sgid contains the GID of the sender.
449 *
450 * When resolving mac address of destination, the arrived dgid is used
451 * as sgid and, sgid is used as dgid because sgid contains destinations
452 * GID whom to respond to.
453 *
454 * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
455 * position of arguments dgid and sgid do not match the order of the
456 * parameters.
457 */
73cdaaee
IW
458int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
459 const struct ib_wc *wc, const struct ib_grh *grh,
90898850 460 struct rdma_ah_attr *ah_attr)
513789ed 461{
513789ed
HR
462 u32 flow_class;
463 u16 gid_index;
464 int ret;
c865f246
SK
465 enum rdma_network_type net_type = RDMA_NETWORK_IB;
466 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
c3efe750 467 int hoplimit = 0xff;
c865f246
SK
468 union ib_gid dgid;
469 union ib_gid sgid;
513789ed 470
79364227
RD
471 might_sleep();
472
4e00d694 473 memset(ah_attr, 0, sizeof *ah_attr);
44c58487 474 ah_attr->type = rdma_ah_find_type(device, port_num);
227128fc 475 if (rdma_cap_eth_ah(device, port_num)) {
c865f246
SK
476 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
477 net_type = wc->network_hdr_type;
478 else
479 net_type = ib_get_net_type_by_grh(device, port_num, grh);
480 gid_type = ib_network_to_gid_type(net_type);
481 }
850d8fd7
MS
482 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
483 &sgid, &dgid);
c865f246
SK
484 if (ret)
485 return ret;
486
487 if (rdma_protocol_roce(device, port_num)) {
20029832 488 int if_index = 0;
dbf727de
MB
489 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
490 wc->vlan_id : 0xffff;
20029832
MB
491 struct net_device *idev;
492 struct net_device *resolved_dev;
dbf727de 493
dd5f03be
MB
494 if (!(wc->wc_flags & IB_WC_GRH))
495 return -EPROTOTYPE;
496
20029832
MB
497 if (!device->get_netdev)
498 return -EOPNOTSUPP;
499
500 idev = device->get_netdev(device, port_num);
501 if (!idev)
502 return -ENODEV;
503
f7f4b23e 504 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
44c58487 505 ah_attr->roce.dmac,
f7f4b23e
MB
506 wc->wc_flags & IB_WC_WITH_VLAN ?
507 NULL : &vlan_id,
c3efe750 508 &if_index, &hoplimit);
20029832
MB
509 if (ret) {
510 dev_put(idev);
511 return ret;
dd5f03be 512 }
dbf727de 513
20029832 514 resolved_dev = dev_get_by_index(&init_net, if_index);
20029832
MB
515 rcu_read_lock();
516 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
517 resolved_dev))
518 ret = -EHOSTUNREACH;
519 rcu_read_unlock();
520 dev_put(idev);
521 dev_put(resolved_dev);
522 if (ret)
523 return ret;
524
dbf727de 525 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
c865f246 526 &dgid, gid_type, &gid_index);
dbf727de
MB
527 if (ret)
528 return ret;
dd5f03be
MB
529 }
530
d8966fcd
DC
531 rdma_ah_set_dlid(ah_attr, wc->slid);
532 rdma_ah_set_sl(ah_attr, wc->sl);
533 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
534 rdma_ah_set_port_num(ah_attr, port_num);
513789ed
HR
535
536 if (wc->wc_flags & IB_WC_GRH) {
dbf727de 537 if (!rdma_cap_eth_ah(device, port_num)) {
b3556005
EC
538 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
539 ret = ib_find_cached_gid_by_port(device, &dgid,
540 IB_GID_TYPE_IB,
541 port_num, NULL,
542 &gid_index);
543 if (ret)
544 return ret;
545 } else {
546 gid_index = 0;
547 }
dbf727de 548 }
513789ed 549
497677ab 550 flow_class = be32_to_cpu(grh->version_tclass_flow);
d8966fcd
DC
551 rdma_ah_set_grh(ah_attr, &sgid,
552 flow_class & 0xFFFFF,
553 (u8)gid_index, hoplimit,
554 (flow_class >> 20) & 0xFF);
555
513789ed 556 }
4e00d694
SH
557 return 0;
558}
559EXPORT_SYMBOL(ib_init_ah_from_wc);
560
73cdaaee
IW
561struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
562 const struct ib_grh *grh, u8 port_num)
4e00d694 563{
90898850 564 struct rdma_ah_attr ah_attr;
4e00d694
SH
565 int ret;
566
567 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
568 if (ret)
569 return ERR_PTR(ret);
513789ed 570
0a18cfe4 571 return rdma_create_ah(pd, &ah_attr);
513789ed
HR
572}
573EXPORT_SYMBOL(ib_create_ah_from_wc);
574
67b985b6 575int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
1da177e4 576{
44c58487
DC
577 if (ah->type != ah_attr->type)
578 return -EINVAL;
579
1da177e4
LT
580 return ah->device->modify_ah ?
581 ah->device->modify_ah(ah, ah_attr) :
582 -ENOSYS;
583}
67b985b6 584EXPORT_SYMBOL(rdma_modify_ah);
1da177e4 585
bfbfd661 586int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
1da177e4
LT
587{
588 return ah->device->query_ah ?
589 ah->device->query_ah(ah, ah_attr) :
590 -ENOSYS;
591}
bfbfd661 592EXPORT_SYMBOL(rdma_query_ah);
1da177e4 593
36523159 594int rdma_destroy_ah(struct ib_ah *ah)
1da177e4
LT
595{
596 struct ib_pd *pd;
597 int ret;
598
599 pd = ah->pd;
600 ret = ah->device->destroy_ah(ah);
601 if (!ret)
602 atomic_dec(&pd->usecnt);
603
604 return ret;
605}
36523159 606EXPORT_SYMBOL(rdma_destroy_ah);
1da177e4 607
d41fcc67
RD
608/* Shared receive queues */
609
610struct ib_srq *ib_create_srq(struct ib_pd *pd,
611 struct ib_srq_init_attr *srq_init_attr)
612{
613 struct ib_srq *srq;
614
615 if (!pd->device->create_srq)
616 return ERR_PTR(-ENOSYS);
617
618 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
619
620 if (!IS_ERR(srq)) {
621 srq->device = pd->device;
622 srq->pd = pd;
623 srq->uobject = NULL;
624 srq->event_handler = srq_init_attr->event_handler;
625 srq->srq_context = srq_init_attr->srq_context;
96104eda 626 srq->srq_type = srq_init_attr->srq_type;
1a56ff6d
AK
627 if (ib_srq_has_cq(srq->srq_type)) {
628 srq->ext.cq = srq_init_attr->ext.cq;
629 atomic_inc(&srq->ext.cq->usecnt);
630 }
418d5130
SH
631 if (srq->srq_type == IB_SRQT_XRC) {
632 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
418d5130 633 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
418d5130 634 }
d41fcc67
RD
635 atomic_inc(&pd->usecnt);
636 atomic_set(&srq->usecnt, 0);
637 }
638
639 return srq;
640}
641EXPORT_SYMBOL(ib_create_srq);
642
643int ib_modify_srq(struct ib_srq *srq,
644 struct ib_srq_attr *srq_attr,
645 enum ib_srq_attr_mask srq_attr_mask)
646{
7ce5eacb
DB
647 return srq->device->modify_srq ?
648 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
649 -ENOSYS;
d41fcc67
RD
650}
651EXPORT_SYMBOL(ib_modify_srq);
652
653int ib_query_srq(struct ib_srq *srq,
654 struct ib_srq_attr *srq_attr)
655{
656 return srq->device->query_srq ?
657 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
658}
659EXPORT_SYMBOL(ib_query_srq);
660
661int ib_destroy_srq(struct ib_srq *srq)
662{
663 struct ib_pd *pd;
418d5130
SH
664 enum ib_srq_type srq_type;
665 struct ib_xrcd *uninitialized_var(xrcd);
666 struct ib_cq *uninitialized_var(cq);
d41fcc67
RD
667 int ret;
668
669 if (atomic_read(&srq->usecnt))
670 return -EBUSY;
671
672 pd = srq->pd;
418d5130 673 srq_type = srq->srq_type;
1a56ff6d
AK
674 if (ib_srq_has_cq(srq_type))
675 cq = srq->ext.cq;
676 if (srq_type == IB_SRQT_XRC)
418d5130 677 xrcd = srq->ext.xrc.xrcd;
d41fcc67
RD
678
679 ret = srq->device->destroy_srq(srq);
418d5130 680 if (!ret) {
d41fcc67 681 atomic_dec(&pd->usecnt);
1a56ff6d 682 if (srq_type == IB_SRQT_XRC)
418d5130 683 atomic_dec(&xrcd->usecnt);
1a56ff6d 684 if (ib_srq_has_cq(srq_type))
418d5130 685 atomic_dec(&cq->usecnt);
418d5130 686 }
d41fcc67
RD
687
688 return ret;
689}
690EXPORT_SYMBOL(ib_destroy_srq);
691
1da177e4
LT
692/* Queue pairs */
693
0e0ec7e0
SH
694static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
695{
696 struct ib_qp *qp = context;
73c40c61 697 unsigned long flags;
0e0ec7e0 698
73c40c61 699 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
0e0ec7e0 700 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
eec9e29f
SP
701 if (event->element.qp->event_handler)
702 event->element.qp->event_handler(event, event->element.qp->qp_context);
73c40c61 703 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
0e0ec7e0
SH
704}
705
d3d72d90
SH
706static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
707{
708 mutex_lock(&xrcd->tgt_qp_mutex);
709 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
710 mutex_unlock(&xrcd->tgt_qp_mutex);
711}
712
0e0ec7e0
SH
713static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
714 void (*event_handler)(struct ib_event *, void *),
715 void *qp_context)
d3d72d90 716{
0e0ec7e0
SH
717 struct ib_qp *qp;
718 unsigned long flags;
d291f1a6 719 int err;
0e0ec7e0
SH
720
721 qp = kzalloc(sizeof *qp, GFP_KERNEL);
722 if (!qp)
723 return ERR_PTR(-ENOMEM);
724
d291f1a6
DJ
725 qp->real_qp = real_qp;
726 err = ib_open_shared_qp_security(qp, real_qp->device);
727 if (err) {
728 kfree(qp);
729 return ERR_PTR(err);
730 }
731
0e0ec7e0
SH
732 qp->real_qp = real_qp;
733 atomic_inc(&real_qp->usecnt);
734 qp->device = real_qp->device;
735 qp->event_handler = event_handler;
736 qp->qp_context = qp_context;
737 qp->qp_num = real_qp->qp_num;
738 qp->qp_type = real_qp->qp_type;
739
740 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
741 list_add(&qp->open_list, &real_qp->open_list);
742 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
743
744 return qp;
745}
746
747struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
748 struct ib_qp_open_attr *qp_open_attr)
749{
750 struct ib_qp *qp, *real_qp;
751
752 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
753 return ERR_PTR(-EINVAL);
754
755 qp = ERR_PTR(-EINVAL);
d3d72d90 756 mutex_lock(&xrcd->tgt_qp_mutex);
0e0ec7e0
SH
757 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
758 if (real_qp->qp_num == qp_open_attr->qp_num) {
759 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
760 qp_open_attr->qp_context);
761 break;
762 }
763 }
d3d72d90 764 mutex_unlock(&xrcd->tgt_qp_mutex);
0e0ec7e0 765 return qp;
d3d72d90 766}
0e0ec7e0 767EXPORT_SYMBOL(ib_open_qp);
d3d72d90 768
04c41bf3
CH
769static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
770 struct ib_qp_init_attr *qp_init_attr)
771{
772 struct ib_qp *real_qp = qp;
773
774 qp->event_handler = __ib_shared_qp_event_handler;
775 qp->qp_context = qp;
776 qp->pd = NULL;
777 qp->send_cq = qp->recv_cq = NULL;
778 qp->srq = NULL;
779 qp->xrcd = qp_init_attr->xrcd;
780 atomic_inc(&qp_init_attr->xrcd->usecnt);
781 INIT_LIST_HEAD(&qp->open_list);
782
783 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
784 qp_init_attr->qp_context);
785 if (!IS_ERR(qp))
786 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
787 else
788 real_qp->device->destroy_qp(real_qp);
789 return qp;
790}
791
1da177e4
LT
792struct ib_qp *ib_create_qp(struct ib_pd *pd,
793 struct ib_qp_init_attr *qp_init_attr)
794{
04c41bf3
CH
795 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
796 struct ib_qp *qp;
a060b562
CH
797 int ret;
798
a9017e23
YH
799 if (qp_init_attr->rwq_ind_tbl &&
800 (qp_init_attr->recv_cq ||
801 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
802 qp_init_attr->cap.max_recv_sge))
803 return ERR_PTR(-EINVAL);
804
a060b562
CH
805 /*
806 * If the callers is using the RDMA API calculate the resources
807 * needed for the RDMA READ/WRITE operations.
808 *
809 * Note that these callers need to pass in a port number.
810 */
811 if (qp_init_attr->cap.max_rdma_ctxs)
812 rdma_rw_init_qp(device, qp_init_attr);
1da177e4 813
b42b63cf 814 qp = device->create_qp(pd, qp_init_attr, NULL);
04c41bf3
CH
815 if (IS_ERR(qp))
816 return qp;
817
d291f1a6
DJ
818 ret = ib_create_qp_security(qp, device);
819 if (ret) {
820 ib_destroy_qp(qp);
821 return ERR_PTR(ret);
822 }
823
04c41bf3
CH
824 qp->device = device;
825 qp->real_qp = qp;
826 qp->uobject = NULL;
827 qp->qp_type = qp_init_attr->qp_type;
a9017e23 828 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
04c41bf3
CH
829
830 atomic_set(&qp->usecnt, 0);
fffb0383
CH
831 qp->mrs_used = 0;
832 spin_lock_init(&qp->mr_lock);
a060b562 833 INIT_LIST_HEAD(&qp->rdma_mrs);
0e353e34 834 INIT_LIST_HEAD(&qp->sig_mrs);
498ca3c8 835 qp->port = 0;
fffb0383 836
04c41bf3
CH
837 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
838 return ib_create_xrc_qp(qp, qp_init_attr);
839
840 qp->event_handler = qp_init_attr->event_handler;
841 qp->qp_context = qp_init_attr->qp_context;
842 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
843 qp->recv_cq = NULL;
844 qp->srq = NULL;
845 } else {
846 qp->recv_cq = qp_init_attr->recv_cq;
a9017e23
YH
847 if (qp_init_attr->recv_cq)
848 atomic_inc(&qp_init_attr->recv_cq->usecnt);
04c41bf3
CH
849 qp->srq = qp_init_attr->srq;
850 if (qp->srq)
851 atomic_inc(&qp_init_attr->srq->usecnt);
1da177e4
LT
852 }
853
04c41bf3
CH
854 qp->pd = pd;
855 qp->send_cq = qp_init_attr->send_cq;
856 qp->xrcd = NULL;
857
858 atomic_inc(&pd->usecnt);
a9017e23
YH
859 if (qp_init_attr->send_cq)
860 atomic_inc(&qp_init_attr->send_cq->usecnt);
861 if (qp_init_attr->rwq_ind_tbl)
862 atomic_inc(&qp->rwq_ind_tbl->usecnt);
a060b562
CH
863
864 if (qp_init_attr->cap.max_rdma_ctxs) {
865 ret = rdma_rw_init_mrs(qp, qp_init_attr);
866 if (ret) {
867 pr_err("failed to init MR pool ret= %d\n", ret);
868 ib_destroy_qp(qp);
b6bc1c73 869 return ERR_PTR(ret);
a060b562
CH
870 }
871 }
872
632bc3f6
BVA
873 /*
874 * Note: all hw drivers guarantee that max_send_sge is lower than
875 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
876 * max_send_sge <= max_sge_rd.
877 */
878 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
879 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
880 device->attrs.max_sge_rd);
881
1da177e4
LT
882 return qp;
883}
884EXPORT_SYMBOL(ib_create_qp);
885
8a51866f
RD
886static const struct {
887 int valid;
b42b63cf
SH
888 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
889 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
8a51866f
RD
890} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
891 [IB_QPS_RESET] = {
892 [IB_QPS_RESET] = { .valid = 1 },
8a51866f
RD
893 [IB_QPS_INIT] = {
894 .valid = 1,
895 .req_param = {
896 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
897 IB_QP_PORT |
898 IB_QP_QKEY),
c938a616 899 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
8a51866f
RD
900 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
901 IB_QP_PORT |
902 IB_QP_ACCESS_FLAGS),
903 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
904 IB_QP_PORT |
905 IB_QP_ACCESS_FLAGS),
b42b63cf
SH
906 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
907 IB_QP_PORT |
908 IB_QP_ACCESS_FLAGS),
909 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
910 IB_QP_PORT |
911 IB_QP_ACCESS_FLAGS),
8a51866f
RD
912 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
913 IB_QP_QKEY),
914 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
915 IB_QP_QKEY),
916 }
917 },
918 },
919 [IB_QPS_INIT] = {
920 [IB_QPS_RESET] = { .valid = 1 },
921 [IB_QPS_ERR] = { .valid = 1 },
922 [IB_QPS_INIT] = {
923 .valid = 1,
924 .opt_param = {
925 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
926 IB_QP_PORT |
927 IB_QP_QKEY),
928 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
929 IB_QP_PORT |
930 IB_QP_ACCESS_FLAGS),
931 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
932 IB_QP_PORT |
933 IB_QP_ACCESS_FLAGS),
b42b63cf
SH
934 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
935 IB_QP_PORT |
936 IB_QP_ACCESS_FLAGS),
937 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
938 IB_QP_PORT |
939 IB_QP_ACCESS_FLAGS),
8a51866f
RD
940 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
941 IB_QP_QKEY),
942 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
943 IB_QP_QKEY),
944 }
945 },
946 [IB_QPS_RTR] = {
947 .valid = 1,
948 .req_param = {
949 [IB_QPT_UC] = (IB_QP_AV |
950 IB_QP_PATH_MTU |
951 IB_QP_DEST_QPN |
952 IB_QP_RQ_PSN),
953 [IB_QPT_RC] = (IB_QP_AV |
954 IB_QP_PATH_MTU |
955 IB_QP_DEST_QPN |
956 IB_QP_RQ_PSN |
957 IB_QP_MAX_DEST_RD_ATOMIC |
958 IB_QP_MIN_RNR_TIMER),
b42b63cf
SH
959 [IB_QPT_XRC_INI] = (IB_QP_AV |
960 IB_QP_PATH_MTU |
961 IB_QP_DEST_QPN |
962 IB_QP_RQ_PSN),
963 [IB_QPT_XRC_TGT] = (IB_QP_AV |
964 IB_QP_PATH_MTU |
965 IB_QP_DEST_QPN |
966 IB_QP_RQ_PSN |
967 IB_QP_MAX_DEST_RD_ATOMIC |
968 IB_QP_MIN_RNR_TIMER),
8a51866f
RD
969 },
970 .opt_param = {
971 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
972 IB_QP_QKEY),
973 [IB_QPT_UC] = (IB_QP_ALT_PATH |
974 IB_QP_ACCESS_FLAGS |
975 IB_QP_PKEY_INDEX),
976 [IB_QPT_RC] = (IB_QP_ALT_PATH |
977 IB_QP_ACCESS_FLAGS |
978 IB_QP_PKEY_INDEX),
b42b63cf
SH
979 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
980 IB_QP_ACCESS_FLAGS |
981 IB_QP_PKEY_INDEX),
982 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
983 IB_QP_ACCESS_FLAGS |
984 IB_QP_PKEY_INDEX),
8a51866f
RD
985 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
986 IB_QP_QKEY),
987 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
988 IB_QP_QKEY),
dd5f03be 989 },
dbf727de 990 },
8a51866f
RD
991 },
992 [IB_QPS_RTR] = {
993 [IB_QPS_RESET] = { .valid = 1 },
994 [IB_QPS_ERR] = { .valid = 1 },
995 [IB_QPS_RTS] = {
996 .valid = 1,
997 .req_param = {
998 [IB_QPT_UD] = IB_QP_SQ_PSN,
999 [IB_QPT_UC] = IB_QP_SQ_PSN,
1000 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1001 IB_QP_RETRY_CNT |
1002 IB_QP_RNR_RETRY |
1003 IB_QP_SQ_PSN |
1004 IB_QP_MAX_QP_RD_ATOMIC),
b42b63cf
SH
1005 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1006 IB_QP_RETRY_CNT |
1007 IB_QP_RNR_RETRY |
1008 IB_QP_SQ_PSN |
1009 IB_QP_MAX_QP_RD_ATOMIC),
1010 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1011 IB_QP_SQ_PSN),
8a51866f
RD
1012 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1013 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1014 },
1015 .opt_param = {
1016 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1017 IB_QP_QKEY),
1018 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1019 IB_QP_ALT_PATH |
1020 IB_QP_ACCESS_FLAGS |
1021 IB_QP_PATH_MIG_STATE),
1022 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1023 IB_QP_ALT_PATH |
1024 IB_QP_ACCESS_FLAGS |
1025 IB_QP_MIN_RNR_TIMER |
1026 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
1027 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1028 IB_QP_ALT_PATH |
1029 IB_QP_ACCESS_FLAGS |
1030 IB_QP_PATH_MIG_STATE),
1031 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1032 IB_QP_ALT_PATH |
1033 IB_QP_ACCESS_FLAGS |
1034 IB_QP_MIN_RNR_TIMER |
1035 IB_QP_PATH_MIG_STATE),
8a51866f
RD
1036 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1037 IB_QP_QKEY),
1038 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1039 IB_QP_QKEY),
528e5a1b 1040 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
8a51866f
RD
1041 }
1042 }
1043 },
1044 [IB_QPS_RTS] = {
1045 [IB_QPS_RESET] = { .valid = 1 },
1046 [IB_QPS_ERR] = { .valid = 1 },
1047 [IB_QPS_RTS] = {
1048 .valid = 1,
1049 .opt_param = {
1050 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1051 IB_QP_QKEY),
4546d31d
DB
1052 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1053 IB_QP_ACCESS_FLAGS |
8a51866f
RD
1054 IB_QP_ALT_PATH |
1055 IB_QP_PATH_MIG_STATE),
4546d31d
DB
1056 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1057 IB_QP_ACCESS_FLAGS |
8a51866f
RD
1058 IB_QP_ALT_PATH |
1059 IB_QP_PATH_MIG_STATE |
1060 IB_QP_MIN_RNR_TIMER),
b42b63cf
SH
1061 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1062 IB_QP_ACCESS_FLAGS |
1063 IB_QP_ALT_PATH |
1064 IB_QP_PATH_MIG_STATE),
1065 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1066 IB_QP_ACCESS_FLAGS |
1067 IB_QP_ALT_PATH |
1068 IB_QP_PATH_MIG_STATE |
1069 IB_QP_MIN_RNR_TIMER),
8a51866f
RD
1070 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1071 IB_QP_QKEY),
1072 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1073 IB_QP_QKEY),
528e5a1b 1074 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
8a51866f
RD
1075 }
1076 },
1077 [IB_QPS_SQD] = {
1078 .valid = 1,
1079 .opt_param = {
1080 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1081 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1082 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
b42b63cf
SH
1083 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1084 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
8a51866f
RD
1085 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1086 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1087 }
1088 },
1089 },
1090 [IB_QPS_SQD] = {
1091 [IB_QPS_RESET] = { .valid = 1 },
1092 [IB_QPS_ERR] = { .valid = 1 },
1093 [IB_QPS_RTS] = {
1094 .valid = 1,
1095 .opt_param = {
1096 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1097 IB_QP_QKEY),
1098 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1099 IB_QP_ALT_PATH |
1100 IB_QP_ACCESS_FLAGS |
1101 IB_QP_PATH_MIG_STATE),
1102 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1103 IB_QP_ALT_PATH |
1104 IB_QP_ACCESS_FLAGS |
1105 IB_QP_MIN_RNR_TIMER |
1106 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
1107 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1108 IB_QP_ALT_PATH |
1109 IB_QP_ACCESS_FLAGS |
1110 IB_QP_PATH_MIG_STATE),
1111 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1112 IB_QP_ALT_PATH |
1113 IB_QP_ACCESS_FLAGS |
1114 IB_QP_MIN_RNR_TIMER |
1115 IB_QP_PATH_MIG_STATE),
8a51866f
RD
1116 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1117 IB_QP_QKEY),
1118 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1119 IB_QP_QKEY),
1120 }
1121 },
1122 [IB_QPS_SQD] = {
1123 .valid = 1,
1124 .opt_param = {
1125 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1126 IB_QP_QKEY),
1127 [IB_QPT_UC] = (IB_QP_AV |
8a51866f
RD
1128 IB_QP_ALT_PATH |
1129 IB_QP_ACCESS_FLAGS |
1130 IB_QP_PKEY_INDEX |
1131 IB_QP_PATH_MIG_STATE),
1132 [IB_QPT_RC] = (IB_QP_PORT |
1133 IB_QP_AV |
1134 IB_QP_TIMEOUT |
1135 IB_QP_RETRY_CNT |
1136 IB_QP_RNR_RETRY |
1137 IB_QP_MAX_QP_RD_ATOMIC |
1138 IB_QP_MAX_DEST_RD_ATOMIC |
8a51866f
RD
1139 IB_QP_ALT_PATH |
1140 IB_QP_ACCESS_FLAGS |
1141 IB_QP_PKEY_INDEX |
1142 IB_QP_MIN_RNR_TIMER |
1143 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
1144 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1145 IB_QP_AV |
1146 IB_QP_TIMEOUT |
1147 IB_QP_RETRY_CNT |
1148 IB_QP_RNR_RETRY |
1149 IB_QP_MAX_QP_RD_ATOMIC |
1150 IB_QP_ALT_PATH |
1151 IB_QP_ACCESS_FLAGS |
1152 IB_QP_PKEY_INDEX |
1153 IB_QP_PATH_MIG_STATE),
1154 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1155 IB_QP_AV |
1156 IB_QP_TIMEOUT |
1157 IB_QP_MAX_DEST_RD_ATOMIC |
1158 IB_QP_ALT_PATH |
1159 IB_QP_ACCESS_FLAGS |
1160 IB_QP_PKEY_INDEX |
1161 IB_QP_MIN_RNR_TIMER |
1162 IB_QP_PATH_MIG_STATE),
8a51866f
RD
1163 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1164 IB_QP_QKEY),
1165 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1166 IB_QP_QKEY),
1167 }
1168 }
1169 },
1170 [IB_QPS_SQE] = {
1171 [IB_QPS_RESET] = { .valid = 1 },
1172 [IB_QPS_ERR] = { .valid = 1 },
1173 [IB_QPS_RTS] = {
1174 .valid = 1,
1175 .opt_param = {
1176 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1177 IB_QP_QKEY),
1178 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1179 IB_QP_ACCESS_FLAGS),
1180 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1181 IB_QP_QKEY),
1182 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1183 IB_QP_QKEY),
1184 }
1185 }
1186 },
1187 [IB_QPS_ERR] = {
1188 [IB_QPS_RESET] = { .valid = 1 },
1189 [IB_QPS_ERR] = { .valid = 1 }
1190 }
1191};
1192
1193int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
dd5f03be
MB
1194 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1195 enum rdma_link_layer ll)
8a51866f
RD
1196{
1197 enum ib_qp_attr_mask req_param, opt_param;
1198
1199 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1200 next_state < 0 || next_state > IB_QPS_ERR)
1201 return 0;
1202
1203 if (mask & IB_QP_CUR_STATE &&
1204 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1205 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1206 return 0;
1207
1208 if (!qp_state_table[cur_state][next_state].valid)
1209 return 0;
1210
1211 req_param = qp_state_table[cur_state][next_state].req_param[type];
1212 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1213
1214 if ((mask & req_param) != req_param)
1215 return 0;
1216
1217 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1218 return 0;
1219
1220 return 1;
1221}
1222EXPORT_SYMBOL(ib_modify_qp_is_ok);
1223
c90ea9d8 1224int ib_resolve_eth_dmac(struct ib_device *device,
90898850 1225 struct rdma_ah_attr *ah_attr)
ed4c54e5
OG
1226{
1227 int ret = 0;
d8966fcd 1228 struct ib_global_route *grh;
ed4c54e5 1229
d8966fcd 1230 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
c90ea9d8 1231 return -EINVAL;
dbf727de 1232
44c58487 1233 if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
c90ea9d8 1234 return 0;
dbf727de 1235
d8966fcd
DC
1236 grh = rdma_ah_retrieve_grh(ah_attr);
1237
1238 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
1239 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
44c58487 1240 ah_attr->roce.dmac);
9636a56f
NO
1241 return 0;
1242 }
1243 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1244 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1245 __be32 addr = 0;
1246
1247 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1248 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1249 } else {
1250 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1251 (char *)ah_attr->roce.dmac);
1252 }
c90ea9d8
MS
1253 } else {
1254 union ib_gid sgid;
1255 struct ib_gid_attr sgid_attr;
1256 int ifindex;
1257 int hop_limit;
1258
1259 ret = ib_query_gid(device,
d8966fcd
DC
1260 rdma_ah_get_port_num(ah_attr),
1261 grh->sgid_index,
c90ea9d8
MS
1262 &sgid, &sgid_attr);
1263
1264 if (ret || !sgid_attr.ndev) {
1265 if (!ret)
1266 ret = -ENXIO;
1267 goto out;
1268 }
dbf727de 1269
c90ea9d8 1270 ifindex = sgid_attr.ndev->ifindex;
c3efe750 1271
d8966fcd
DC
1272 ret =
1273 rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
44c58487 1274 ah_attr->roce.dmac,
d8966fcd 1275 NULL, &ifindex, &hop_limit);
c90ea9d8
MS
1276
1277 dev_put(sgid_attr.ndev);
1278
d8966fcd 1279 grh->hop_limit = hop_limit;
ed4c54e5
OG
1280 }
1281out:
1282 return ret;
1283}
dbf727de 1284EXPORT_SYMBOL(ib_resolve_eth_dmac);
ed4c54e5 1285
a512c2fb
PP
1286/**
1287 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1288 * @qp: The QP to modify.
1289 * @attr: On input, specifies the QP attributes to modify. On output,
1290 * the current values of selected QP attributes are returned.
1291 * @attr_mask: A bit-mask used to specify which attributes of the QP
1292 * are being modified.
1293 * @udata: pointer to user's input output buffer information
1294 * are being modified.
1295 * It returns 0 on success and returns appropriate error code on error.
1296 */
1297int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1298 int attr_mask, struct ib_udata *udata)
1da177e4 1299{
a512c2fb 1300 int ret;
ed4c54e5 1301
a512c2fb
PP
1302 if (attr_mask & IB_QP_AV) {
1303 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
c90ea9d8
MS
1304 if (ret)
1305 return ret;
1306 }
498ca3c8
NO
1307 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1308 if (!ret && (attr_mask & IB_QP_PORT))
1309 qp->port = attr->port_num;
1310
1311 return ret;
a512c2fb
PP
1312}
1313EXPORT_SYMBOL(ib_modify_qp_with_udata);
ed4c54e5 1314
d4186194
YS
1315int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1316{
1317 int rc;
1318 u32 netdev_speed;
1319 struct net_device *netdev;
1320 struct ethtool_link_ksettings lksettings;
1321
1322 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1323 return -EINVAL;
1324
1325 if (!dev->get_netdev)
1326 return -EOPNOTSUPP;
1327
1328 netdev = dev->get_netdev(dev, port_num);
1329 if (!netdev)
1330 return -ENODEV;
1331
1332 rtnl_lock();
1333 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1334 rtnl_unlock();
1335
1336 dev_put(netdev);
1337
1338 if (!rc) {
1339 netdev_speed = lksettings.base.speed;
1340 } else {
1341 netdev_speed = SPEED_1000;
1342 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1343 netdev_speed);
1344 }
1345
1346 if (netdev_speed <= SPEED_1000) {
1347 *width = IB_WIDTH_1X;
1348 *speed = IB_SPEED_SDR;
1349 } else if (netdev_speed <= SPEED_10000) {
1350 *width = IB_WIDTH_1X;
1351 *speed = IB_SPEED_FDR10;
1352 } else if (netdev_speed <= SPEED_20000) {
1353 *width = IB_WIDTH_4X;
1354 *speed = IB_SPEED_DDR;
1355 } else if (netdev_speed <= SPEED_25000) {
1356 *width = IB_WIDTH_1X;
1357 *speed = IB_SPEED_EDR;
1358 } else if (netdev_speed <= SPEED_40000) {
1359 *width = IB_WIDTH_4X;
1360 *speed = IB_SPEED_FDR10;
1361 } else {
1362 *width = IB_WIDTH_4X;
1363 *speed = IB_SPEED_EDR;
1364 }
1365
1366 return 0;
1367}
1368EXPORT_SYMBOL(ib_get_eth_speed);
1369
a512c2fb
PP
1370int ib_modify_qp(struct ib_qp *qp,
1371 struct ib_qp_attr *qp_attr,
1372 int qp_attr_mask)
1373{
1374 return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
1da177e4
LT
1375}
1376EXPORT_SYMBOL(ib_modify_qp);
1377
1378int ib_query_qp(struct ib_qp *qp,
1379 struct ib_qp_attr *qp_attr,
1380 int qp_attr_mask,
1381 struct ib_qp_init_attr *qp_init_attr)
1382{
1383 return qp->device->query_qp ?
0e0ec7e0 1384 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1da177e4
LT
1385 -ENOSYS;
1386}
1387EXPORT_SYMBOL(ib_query_qp);
1388
0e0ec7e0
SH
1389int ib_close_qp(struct ib_qp *qp)
1390{
1391 struct ib_qp *real_qp;
1392 unsigned long flags;
1393
1394 real_qp = qp->real_qp;
1395 if (real_qp == qp)
1396 return -EINVAL;
1397
1398 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1399 list_del(&qp->open_list);
1400 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1401
1402 atomic_dec(&real_qp->usecnt);
d291f1a6 1403 ib_close_shared_qp_security(qp->qp_sec);
0e0ec7e0
SH
1404 kfree(qp);
1405
1406 return 0;
1407}
1408EXPORT_SYMBOL(ib_close_qp);
1409
1410static int __ib_destroy_shared_qp(struct ib_qp *qp)
1411{
1412 struct ib_xrcd *xrcd;
1413 struct ib_qp *real_qp;
1414 int ret;
1415
1416 real_qp = qp->real_qp;
1417 xrcd = real_qp->xrcd;
1418
1419 mutex_lock(&xrcd->tgt_qp_mutex);
1420 ib_close_qp(qp);
1421 if (atomic_read(&real_qp->usecnt) == 0)
1422 list_del(&real_qp->xrcd_list);
1423 else
1424 real_qp = NULL;
1425 mutex_unlock(&xrcd->tgt_qp_mutex);
1426
1427 if (real_qp) {
1428 ret = ib_destroy_qp(real_qp);
1429 if (!ret)
1430 atomic_dec(&xrcd->usecnt);
1431 else
1432 __ib_insert_xrcd_qp(xrcd, real_qp);
1433 }
1434
1435 return 0;
1436}
1437
1da177e4
LT
1438int ib_destroy_qp(struct ib_qp *qp)
1439{
1440 struct ib_pd *pd;
1441 struct ib_cq *scq, *rcq;
1442 struct ib_srq *srq;
a9017e23 1443 struct ib_rwq_ind_table *ind_tbl;
d291f1a6 1444 struct ib_qp_security *sec;
1da177e4
LT
1445 int ret;
1446
fffb0383
CH
1447 WARN_ON_ONCE(qp->mrs_used > 0);
1448
0e0ec7e0
SH
1449 if (atomic_read(&qp->usecnt))
1450 return -EBUSY;
1451
1452 if (qp->real_qp != qp)
1453 return __ib_destroy_shared_qp(qp);
1454
b42b63cf
SH
1455 pd = qp->pd;
1456 scq = qp->send_cq;
1457 rcq = qp->recv_cq;
1458 srq = qp->srq;
a9017e23 1459 ind_tbl = qp->rwq_ind_tbl;
d291f1a6
DJ
1460 sec = qp->qp_sec;
1461 if (sec)
1462 ib_destroy_qp_security_begin(sec);
1da177e4 1463
a060b562
CH
1464 if (!qp->uobject)
1465 rdma_rw_cleanup_mrs(qp);
1466
1da177e4
LT
1467 ret = qp->device->destroy_qp(qp);
1468 if (!ret) {
b42b63cf
SH
1469 if (pd)
1470 atomic_dec(&pd->usecnt);
1471 if (scq)
1472 atomic_dec(&scq->usecnt);
1473 if (rcq)
1474 atomic_dec(&rcq->usecnt);
1da177e4
LT
1475 if (srq)
1476 atomic_dec(&srq->usecnt);
a9017e23
YH
1477 if (ind_tbl)
1478 atomic_dec(&ind_tbl->usecnt);
d291f1a6
DJ
1479 if (sec)
1480 ib_destroy_qp_security_end(sec);
1481 } else {
1482 if (sec)
1483 ib_destroy_qp_security_abort(sec);
1da177e4
LT
1484 }
1485
1486 return ret;
1487}
1488EXPORT_SYMBOL(ib_destroy_qp);
1489
1490/* Completion queues */
1491
1492struct ib_cq *ib_create_cq(struct ib_device *device,
1493 ib_comp_handler comp_handler,
1494 void (*event_handler)(struct ib_event *, void *),
8e37210b
MB
1495 void *cq_context,
1496 const struct ib_cq_init_attr *cq_attr)
1da177e4
LT
1497{
1498 struct ib_cq *cq;
1499
8e37210b 1500 cq = device->create_cq(device, cq_attr, NULL, NULL);
1da177e4
LT
1501
1502 if (!IS_ERR(cq)) {
1503 cq->device = device;
b5e81bf5 1504 cq->uobject = NULL;
1da177e4
LT
1505 cq->comp_handler = comp_handler;
1506 cq->event_handler = event_handler;
1507 cq->cq_context = cq_context;
1508 atomic_set(&cq->usecnt, 0);
1509 }
1510
1511 return cq;
1512}
1513EXPORT_SYMBOL(ib_create_cq);
1514
2dd57162
EC
1515int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1516{
1517 return cq->device->modify_cq ?
1518 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1519}
1520EXPORT_SYMBOL(ib_modify_cq);
1521
1da177e4
LT
1522int ib_destroy_cq(struct ib_cq *cq)
1523{
1524 if (atomic_read(&cq->usecnt))
1525 return -EBUSY;
1526
1527 return cq->device->destroy_cq(cq);
1528}
1529EXPORT_SYMBOL(ib_destroy_cq);
1530
a74cd4af 1531int ib_resize_cq(struct ib_cq *cq, int cqe)
1da177e4 1532{
40de2e54 1533 return cq->device->resize_cq ?
33b9b3ee 1534 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1da177e4
LT
1535}
1536EXPORT_SYMBOL(ib_resize_cq);
1537
1538/* Memory regions */
1539
1da177e4
LT
1540int ib_dereg_mr(struct ib_mr *mr)
1541{
ab67ed8d 1542 struct ib_pd *pd = mr->pd;
1da177e4
LT
1543 int ret;
1544
1da177e4
LT
1545 ret = mr->device->dereg_mr(mr);
1546 if (!ret)
1547 atomic_dec(&pd->usecnt);
1548
1549 return ret;
1550}
1551EXPORT_SYMBOL(ib_dereg_mr);
1552
9bee178b
SG
1553/**
1554 * ib_alloc_mr() - Allocates a memory region
1555 * @pd: protection domain associated with the region
1556 * @mr_type: memory region type
1557 * @max_num_sg: maximum sg entries available for registration.
1558 *
1559 * Notes:
1560 * Memory registeration page/sg lists must not exceed max_num_sg.
1561 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1562 * max_num_sg * used_page_size.
1563 *
1564 */
1565struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1566 enum ib_mr_type mr_type,
1567 u32 max_num_sg)
00f7ec36
SW
1568{
1569 struct ib_mr *mr;
1570
d9f272c5 1571 if (!pd->device->alloc_mr)
00f7ec36
SW
1572 return ERR_PTR(-ENOSYS);
1573
d9f272c5 1574 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
00f7ec36
SW
1575 if (!IS_ERR(mr)) {
1576 mr->device = pd->device;
1577 mr->pd = pd;
1578 mr->uobject = NULL;
1579 atomic_inc(&pd->usecnt);
d4a85c30 1580 mr->need_inval = false;
00f7ec36
SW
1581 }
1582
1583 return mr;
1584}
d9f272c5 1585EXPORT_SYMBOL(ib_alloc_mr);
00f7ec36 1586
1da177e4
LT
1587/* "Fast" memory regions */
1588
1589struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1590 int mr_access_flags,
1591 struct ib_fmr_attr *fmr_attr)
1592{
1593 struct ib_fmr *fmr;
1594
1595 if (!pd->device->alloc_fmr)
1596 return ERR_PTR(-ENOSYS);
1597
1598 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1599 if (!IS_ERR(fmr)) {
1600 fmr->device = pd->device;
1601 fmr->pd = pd;
1602 atomic_inc(&pd->usecnt);
1603 }
1604
1605 return fmr;
1606}
1607EXPORT_SYMBOL(ib_alloc_fmr);
1608
1609int ib_unmap_fmr(struct list_head *fmr_list)
1610{
1611 struct ib_fmr *fmr;
1612
1613 if (list_empty(fmr_list))
1614 return 0;
1615
1616 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1617 return fmr->device->unmap_fmr(fmr_list);
1618}
1619EXPORT_SYMBOL(ib_unmap_fmr);
1620
1621int ib_dealloc_fmr(struct ib_fmr *fmr)
1622{
1623 struct ib_pd *pd;
1624 int ret;
1625
1626 pd = fmr->pd;
1627 ret = fmr->device->dealloc_fmr(fmr);
1628 if (!ret)
1629 atomic_dec(&pd->usecnt);
1630
1631 return ret;
1632}
1633EXPORT_SYMBOL(ib_dealloc_fmr);
1634
1635/* Multicast groups */
1636
52363335
NO
1637static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1638{
1639 struct ib_qp_init_attr init_attr = {};
1640 struct ib_qp_attr attr = {};
1641 int num_eth_ports = 0;
1642 int port;
1643
1644 /* If QP state >= init, it is assigned to a port and we can check this
1645 * port only.
1646 */
1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1648 if (attr.qp_state >= IB_QPS_INIT) {
1649 if (qp->device->get_link_layer(qp->device, attr.port_num) !=
1650 IB_LINK_LAYER_INFINIBAND)
1651 return true;
1652 goto lid_check;
1653 }
1654 }
1655
1656 /* Can't get a quick answer, iterate over all ports */
1657 for (port = 0; port < qp->device->phys_port_cnt; port++)
1658 if (qp->device->get_link_layer(qp->device, port) !=
1659 IB_LINK_LAYER_INFINIBAND)
1660 num_eth_ports++;
1661
1662 /* If we have at lease one Ethernet port, RoCE annex declares that
1663 * multicast LID should be ignored. We can't tell at this step if the
1664 * QP belongs to an IB or Ethernet port.
1665 */
1666 if (num_eth_ports)
1667 return true;
1668
1669 /* If all the ports are IB, we can check according to IB spec. */
1670lid_check:
1671 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1672 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1673}
1674
1da177e4
LT
1675int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1676{
c3bccbfb
OG
1677 int ret;
1678
0c33aeed
JM
1679 if (!qp->device->attach_mcast)
1680 return -ENOSYS;
be1d325a
NO
1681
1682 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1683 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
0c33aeed
JM
1684 return -EINVAL;
1685
c3bccbfb
OG
1686 ret = qp->device->attach_mcast(qp, gid, lid);
1687 if (!ret)
1688 atomic_inc(&qp->usecnt);
1689 return ret;
1da177e4
LT
1690}
1691EXPORT_SYMBOL(ib_attach_mcast);
1692
1693int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1694{
c3bccbfb
OG
1695 int ret;
1696
0c33aeed
JM
1697 if (!qp->device->detach_mcast)
1698 return -ENOSYS;
be1d325a
NO
1699
1700 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1701 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
0c33aeed
JM
1702 return -EINVAL;
1703
c3bccbfb
OG
1704 ret = qp->device->detach_mcast(qp, gid, lid);
1705 if (!ret)
1706 atomic_dec(&qp->usecnt);
1707 return ret;
1da177e4
LT
1708}
1709EXPORT_SYMBOL(ib_detach_mcast);
59991f94
SH
1710
1711struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1712{
1713 struct ib_xrcd *xrcd;
1714
1715 if (!device->alloc_xrcd)
1716 return ERR_PTR(-ENOSYS);
1717
1718 xrcd = device->alloc_xrcd(device, NULL, NULL);
1719 if (!IS_ERR(xrcd)) {
1720 xrcd->device = device;
53d0bd1e 1721 xrcd->inode = NULL;
59991f94 1722 atomic_set(&xrcd->usecnt, 0);
d3d72d90
SH
1723 mutex_init(&xrcd->tgt_qp_mutex);
1724 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
59991f94
SH
1725 }
1726
1727 return xrcd;
1728}
1729EXPORT_SYMBOL(ib_alloc_xrcd);
1730
1731int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1732{
d3d72d90
SH
1733 struct ib_qp *qp;
1734 int ret;
1735
59991f94
SH
1736 if (atomic_read(&xrcd->usecnt))
1737 return -EBUSY;
1738
d3d72d90
SH
1739 while (!list_empty(&xrcd->tgt_qp_list)) {
1740 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1741 ret = ib_destroy_qp(qp);
1742 if (ret)
1743 return ret;
1744 }
1745
59991f94
SH
1746 return xrcd->device->dealloc_xrcd(xrcd);
1747}
1748EXPORT_SYMBOL(ib_dealloc_xrcd);
319a441d 1749
5fd251c8
YH
1750/**
1751 * ib_create_wq - Creates a WQ associated with the specified protection
1752 * domain.
1753 * @pd: The protection domain associated with the WQ.
1754 * @wq_init_attr: A list of initial attributes required to create the
1755 * WQ. If WQ creation succeeds, then the attributes are updated to
1756 * the actual capabilities of the created WQ.
1757 *
1758 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1759 * the requested size of the WQ, and set to the actual values allocated
1760 * on return.
1761 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1762 * at least as large as the requested values.
1763 */
1764struct ib_wq *ib_create_wq(struct ib_pd *pd,
1765 struct ib_wq_init_attr *wq_attr)
1766{
1767 struct ib_wq *wq;
1768
1769 if (!pd->device->create_wq)
1770 return ERR_PTR(-ENOSYS);
1771
1772 wq = pd->device->create_wq(pd, wq_attr, NULL);
1773 if (!IS_ERR(wq)) {
1774 wq->event_handler = wq_attr->event_handler;
1775 wq->wq_context = wq_attr->wq_context;
1776 wq->wq_type = wq_attr->wq_type;
1777 wq->cq = wq_attr->cq;
1778 wq->device = pd->device;
1779 wq->pd = pd;
1780 wq->uobject = NULL;
1781 atomic_inc(&pd->usecnt);
1782 atomic_inc(&wq_attr->cq->usecnt);
1783 atomic_set(&wq->usecnt, 0);
1784 }
1785 return wq;
1786}
1787EXPORT_SYMBOL(ib_create_wq);
1788
1789/**
1790 * ib_destroy_wq - Destroys the specified WQ.
1791 * @wq: The WQ to destroy.
1792 */
1793int ib_destroy_wq(struct ib_wq *wq)
1794{
1795 int err;
1796 struct ib_cq *cq = wq->cq;
1797 struct ib_pd *pd = wq->pd;
1798
1799 if (atomic_read(&wq->usecnt))
1800 return -EBUSY;
1801
1802 err = wq->device->destroy_wq(wq);
1803 if (!err) {
1804 atomic_dec(&pd->usecnt);
1805 atomic_dec(&cq->usecnt);
1806 }
1807 return err;
1808}
1809EXPORT_SYMBOL(ib_destroy_wq);
1810
1811/**
1812 * ib_modify_wq - Modifies the specified WQ.
1813 * @wq: The WQ to modify.
1814 * @wq_attr: On input, specifies the WQ attributes to modify.
1815 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1816 * are being modified.
1817 * On output, the current values of selected WQ attributes are returned.
1818 */
1819int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1820 u32 wq_attr_mask)
1821{
1822 int err;
1823
1824 if (!wq->device->modify_wq)
1825 return -ENOSYS;
1826
1827 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1828 return err;
1829}
1830EXPORT_SYMBOL(ib_modify_wq);
1831
6d39786b
YH
1832/*
1833 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1834 * @device: The device on which to create the rwq indirection table.
1835 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1836 * create the Indirection Table.
1837 *
1838 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1839 * than the created ib_rwq_ind_table object and the caller is responsible
1840 * for its memory allocation/free.
1841 */
1842struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1843 struct ib_rwq_ind_table_init_attr *init_attr)
1844{
1845 struct ib_rwq_ind_table *rwq_ind_table;
1846 int i;
1847 u32 table_size;
1848
1849 if (!device->create_rwq_ind_table)
1850 return ERR_PTR(-ENOSYS);
1851
1852 table_size = (1 << init_attr->log_ind_tbl_size);
1853 rwq_ind_table = device->create_rwq_ind_table(device,
1854 init_attr, NULL);
1855 if (IS_ERR(rwq_ind_table))
1856 return rwq_ind_table;
1857
1858 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1859 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1860 rwq_ind_table->device = device;
1861 rwq_ind_table->uobject = NULL;
1862 atomic_set(&rwq_ind_table->usecnt, 0);
1863
1864 for (i = 0; i < table_size; i++)
1865 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1866
1867 return rwq_ind_table;
1868}
1869EXPORT_SYMBOL(ib_create_rwq_ind_table);
1870
1871/*
1872 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1873 * @wq_ind_table: The Indirection Table to destroy.
1874*/
1875int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1876{
1877 int err, i;
1878 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1879 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1880
1881 if (atomic_read(&rwq_ind_table->usecnt))
1882 return -EBUSY;
1883
1884 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1885 if (!err) {
1886 for (i = 0; i < table_size; i++)
1887 atomic_dec(&ind_tbl[i]->usecnt);
1888 }
1889
1890 return err;
1891}
1892EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1893
319a441d
HHZ
1894struct ib_flow *ib_create_flow(struct ib_qp *qp,
1895 struct ib_flow_attr *flow_attr,
1896 int domain)
1897{
1898 struct ib_flow *flow_id;
1899 if (!qp->device->create_flow)
1900 return ERR_PTR(-ENOSYS);
1901
1902 flow_id = qp->device->create_flow(qp, flow_attr, domain);
8ecc7985 1903 if (!IS_ERR(flow_id)) {
319a441d 1904 atomic_inc(&qp->usecnt);
8ecc7985
MB
1905 flow_id->qp = qp;
1906 }
319a441d
HHZ
1907 return flow_id;
1908}
1909EXPORT_SYMBOL(ib_create_flow);
1910
1911int ib_destroy_flow(struct ib_flow *flow_id)
1912{
1913 int err;
1914 struct ib_qp *qp = flow_id->qp;
1915
1916 err = qp->device->destroy_flow(flow_id);
1917 if (!err)
1918 atomic_dec(&qp->usecnt);
1919 return err;
1920}
1921EXPORT_SYMBOL(ib_destroy_flow);
1b01d335
SG
1922
1923int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1924 struct ib_mr_status *mr_status)
1925{
1926 return mr->device->check_mr_status ?
1927 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1928}
1929EXPORT_SYMBOL(ib_check_mr_status);
4c67e2bf 1930
50174a7f
EC
1931int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1932 int state)
1933{
1934 if (!device->set_vf_link_state)
1935 return -ENOSYS;
1936
1937 return device->set_vf_link_state(device, vf, port, state);
1938}
1939EXPORT_SYMBOL(ib_set_vf_link_state);
1940
1941int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1942 struct ifla_vf_info *info)
1943{
1944 if (!device->get_vf_config)
1945 return -ENOSYS;
1946
1947 return device->get_vf_config(device, vf, port, info);
1948}
1949EXPORT_SYMBOL(ib_get_vf_config);
1950
1951int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1952 struct ifla_vf_stats *stats)
1953{
1954 if (!device->get_vf_stats)
1955 return -ENOSYS;
1956
1957 return device->get_vf_stats(device, vf, port, stats);
1958}
1959EXPORT_SYMBOL(ib_get_vf_stats);
1960
1961int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1962 int type)
1963{
1964 if (!device->set_vf_guid)
1965 return -ENOSYS;
1966
1967 return device->set_vf_guid(device, vf, port, guid, type);
1968}
1969EXPORT_SYMBOL(ib_set_vf_guid);
1970
4c67e2bf
SG
1971/**
1972 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1973 * and set it the memory region.
1974 * @mr: memory region
1975 * @sg: dma mapped scatterlist
1976 * @sg_nents: number of entries in sg
ff2ba993 1977 * @sg_offset: offset in bytes into sg
4c67e2bf
SG
1978 * @page_size: page vector desired page size
1979 *
1980 * Constraints:
1981 * - The first sg element is allowed to have an offset.
52746129
BVA
1982 * - Each sg element must either be aligned to page_size or virtually
1983 * contiguous to the previous element. In case an sg element has a
1984 * non-contiguous offset, the mapping prefix will not include it.
4c67e2bf
SG
1985 * - The last sg element is allowed to have length less than page_size.
1986 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1987 * then only max_num_sg entries will be mapped.
52746129 1988 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
f5aa9159 1989 * constraints holds and the page_size argument is ignored.
4c67e2bf
SG
1990 *
1991 * Returns the number of sg elements that were mapped to the memory region.
1992 *
1993 * After this completes successfully, the memory region
1994 * is ready for registration.
1995 */
ff2ba993 1996int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
9aa8b321 1997 unsigned int *sg_offset, unsigned int page_size)
4c67e2bf
SG
1998{
1999 if (unlikely(!mr->device->map_mr_sg))
2000 return -ENOSYS;
2001
2002 mr->page_size = page_size;
2003
ff2ba993 2004 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
4c67e2bf
SG
2005}
2006EXPORT_SYMBOL(ib_map_mr_sg);
2007
2008/**
2009 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2010 * to a page vector
2011 * @mr: memory region
2012 * @sgl: dma mapped scatterlist
2013 * @sg_nents: number of entries in sg
9aa8b321
BVA
2014 * @sg_offset_p: IN: start offset in bytes into sg
2015 * OUT: offset in bytes for element n of the sg of the first
2016 * byte that has not been processed where n is the return
2017 * value of this function.
4c67e2bf
SG
2018 * @set_page: driver page assignment function pointer
2019 *
8f5ba10e 2020 * Core service helper for drivers to convert the largest
4c67e2bf
SG
2021 * prefix of given sg list to a page vector. The sg list
2022 * prefix converted is the prefix that meet the requirements
2023 * of ib_map_mr_sg.
2024 *
2025 * Returns the number of sg elements that were assigned to
2026 * a page vector.
2027 */
ff2ba993 2028int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
9aa8b321 2029 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
4c67e2bf
SG
2030{
2031 struct scatterlist *sg;
b6aeb980 2032 u64 last_end_dma_addr = 0;
9aa8b321 2033 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
4c67e2bf
SG
2034 unsigned int last_page_off = 0;
2035 u64 page_mask = ~((u64)mr->page_size - 1);
8f5ba10e 2036 int i, ret;
4c67e2bf 2037
9aa8b321
BVA
2038 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2039 return -EINVAL;
2040
ff2ba993 2041 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
4c67e2bf
SG
2042 mr->length = 0;
2043
2044 for_each_sg(sgl, sg, sg_nents, i) {
ff2ba993 2045 u64 dma_addr = sg_dma_address(sg) + sg_offset;
9aa8b321 2046 u64 prev_addr = dma_addr;
ff2ba993 2047 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
4c67e2bf
SG
2048 u64 end_dma_addr = dma_addr + dma_len;
2049 u64 page_addr = dma_addr & page_mask;
2050
8f5ba10e
BVA
2051 /*
2052 * For the second and later elements, check whether either the
2053 * end of element i-1 or the start of element i is not aligned
2054 * on a page boundary.
2055 */
2056 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2057 /* Stop mapping if there is a gap. */
2058 if (last_end_dma_addr != dma_addr)
2059 break;
2060
2061 /*
2062 * Coalesce this element with the last. If it is small
2063 * enough just update mr->length. Otherwise start
2064 * mapping from the next page.
2065 */
2066 goto next_page;
4c67e2bf
SG
2067 }
2068
2069 do {
8f5ba10e 2070 ret = set_page(mr, page_addr);
9aa8b321
BVA
2071 if (unlikely(ret < 0)) {
2072 sg_offset = prev_addr - sg_dma_address(sg);
2073 mr->length += prev_addr - dma_addr;
2074 if (sg_offset_p)
2075 *sg_offset_p = sg_offset;
2076 return i || sg_offset ? i : ret;
2077 }
2078 prev_addr = page_addr;
8f5ba10e 2079next_page:
4c67e2bf
SG
2080 page_addr += mr->page_size;
2081 } while (page_addr < end_dma_addr);
2082
2083 mr->length += dma_len;
2084 last_end_dma_addr = end_dma_addr;
4c67e2bf 2085 last_page_off = end_dma_addr & ~page_mask;
ff2ba993
CH
2086
2087 sg_offset = 0;
4c67e2bf
SG
2088 }
2089
9aa8b321
BVA
2090 if (sg_offset_p)
2091 *sg_offset_p = 0;
4c67e2bf
SG
2092 return i;
2093}
2094EXPORT_SYMBOL(ib_sg_to_pages);
765d6774
SW
2095
2096struct ib_drain_cqe {
2097 struct ib_cqe cqe;
2098 struct completion done;
2099};
2100
2101static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2102{
2103 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2104 cqe);
2105
2106 complete(&cqe->done);
2107}
2108
2109/*
2110 * Post a WR and block until its completion is reaped for the SQ.
2111 */
2112static void __ib_drain_sq(struct ib_qp *qp)
2113{
f039f44f 2114 struct ib_cq *cq = qp->send_cq;
765d6774
SW
2115 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2116 struct ib_drain_cqe sdrain;
2117 struct ib_send_wr swr = {}, *bad_swr;
2118 int ret;
2119
765d6774
SW
2120 swr.wr_cqe = &sdrain.cqe;
2121 sdrain.cqe.done = ib_drain_qp_done;
2122 init_completion(&sdrain.done);
2123
2124 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2125 if (ret) {
2126 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2127 return;
2128 }
2129
2130 ret = ib_post_send(qp, &swr, &bad_swr);
2131 if (ret) {
2132 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2133 return;
2134 }
2135
f039f44f
BVA
2136 if (cq->poll_ctx == IB_POLL_DIRECT)
2137 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2138 ib_process_cq_direct(cq, -1);
2139 else
2140 wait_for_completion(&sdrain.done);
765d6774
SW
2141}
2142
2143/*
2144 * Post a WR and block until its completion is reaped for the RQ.
2145 */
2146static void __ib_drain_rq(struct ib_qp *qp)
2147{
f039f44f 2148 struct ib_cq *cq = qp->recv_cq;
765d6774
SW
2149 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2150 struct ib_drain_cqe rdrain;
2151 struct ib_recv_wr rwr = {}, *bad_rwr;
2152 int ret;
2153
765d6774
SW
2154 rwr.wr_cqe = &rdrain.cqe;
2155 rdrain.cqe.done = ib_drain_qp_done;
2156 init_completion(&rdrain.done);
2157
2158 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2159 if (ret) {
2160 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2161 return;
2162 }
2163
2164 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2165 if (ret) {
2166 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2167 return;
2168 }
2169
f039f44f
BVA
2170 if (cq->poll_ctx == IB_POLL_DIRECT)
2171 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2172 ib_process_cq_direct(cq, -1);
2173 else
2174 wait_for_completion(&rdrain.done);
765d6774
SW
2175}
2176
2177/**
2178 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2179 * application.
2180 * @qp: queue pair to drain
2181 *
2182 * If the device has a provider-specific drain function, then
2183 * call that. Otherwise call the generic drain function
2184 * __ib_drain_sq().
2185 *
2186 * The caller must:
2187 *
2188 * ensure there is room in the CQ and SQ for the drain work request and
2189 * completion.
2190 *
f039f44f 2191 * allocate the CQ using ib_alloc_cq().
765d6774
SW
2192 *
2193 * ensure that there are no other contexts that are posting WRs concurrently.
2194 * Otherwise the drain is not guaranteed.
2195 */
2196void ib_drain_sq(struct ib_qp *qp)
2197{
2198 if (qp->device->drain_sq)
2199 qp->device->drain_sq(qp);
2200 else
2201 __ib_drain_sq(qp);
2202}
2203EXPORT_SYMBOL(ib_drain_sq);
2204
2205/**
2206 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2207 * application.
2208 * @qp: queue pair to drain
2209 *
2210 * If the device has a provider-specific drain function, then
2211 * call that. Otherwise call the generic drain function
2212 * __ib_drain_rq().
2213 *
2214 * The caller must:
2215 *
2216 * ensure there is room in the CQ and RQ for the drain work request and
2217 * completion.
2218 *
f039f44f 2219 * allocate the CQ using ib_alloc_cq().
765d6774
SW
2220 *
2221 * ensure that there are no other contexts that are posting WRs concurrently.
2222 * Otherwise the drain is not guaranteed.
2223 */
2224void ib_drain_rq(struct ib_qp *qp)
2225{
2226 if (qp->device->drain_rq)
2227 qp->device->drain_rq(qp);
2228 else
2229 __ib_drain_rq(qp);
2230}
2231EXPORT_SYMBOL(ib_drain_rq);
2232
2233/**
2234 * ib_drain_qp() - Block until all CQEs have been consumed by the
2235 * application on both the RQ and SQ.
2236 * @qp: queue pair to drain
2237 *
2238 * The caller must:
2239 *
2240 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2241 * and completions.
2242 *
f039f44f 2243 * allocate the CQs using ib_alloc_cq().
765d6774
SW
2244 *
2245 * ensure that there are no other contexts that are posting WRs concurrently.
2246 * Otherwise the drain is not guaranteed.
2247 */
2248void ib_drain_qp(struct ib_qp *qp)
2249{
2250 ib_drain_sq(qp);
42235f80
SG
2251 if (!qp->srq)
2252 ib_drain_rq(qp);
765d6774
SW
2253}
2254EXPORT_SYMBOL(ib_drain_qp);