]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
Merge tag 'trace-v4.15-rc4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_srq.c
1 /*
2 * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
22 *
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
26 *
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52
53 #include "pvrdma.h"
54
55 int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
56 struct ib_recv_wr **bad_wr)
57 {
58 /* No support for kernel clients. */
59 return -EOPNOTSUPP;
60 }
61
62 /**
63 * pvrdma_query_srq - query shared receive queue
64 * @ibsrq: the shared receive queue to query
65 * @srq_attr: attributes to query and return to client
66 *
67 * @return: 0 for success, otherwise returns an errno.
68 */
69 int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
70 {
71 struct pvrdma_dev *dev = to_vdev(ibsrq->device);
72 struct pvrdma_srq *srq = to_vsrq(ibsrq);
73 union pvrdma_cmd_req req;
74 union pvrdma_cmd_resp rsp;
75 struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
76 struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
77 int ret;
78
79 memset(cmd, 0, sizeof(*cmd));
80 cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
81 cmd->srq_handle = srq->srq_handle;
82
83 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
84 if (ret < 0) {
85 dev_warn(&dev->pdev->dev,
86 "could not query shared receive queue, error: %d\n",
87 ret);
88 return -EINVAL;
89 }
90
91 srq_attr->srq_limit = resp->attrs.srq_limit;
92 srq_attr->max_wr = resp->attrs.max_wr;
93 srq_attr->max_sge = resp->attrs.max_sge;
94
95 return 0;
96 }
97
98 /**
99 * pvrdma_create_srq - create shared receive queue
100 * @pd: protection domain
101 * @init_attr: shared receive queue attributes
102 * @udata: user data
103 *
104 * @return: the ib_srq pointer on success, otherwise returns an errno.
105 */
106 struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
107 struct ib_srq_init_attr *init_attr,
108 struct ib_udata *udata)
109 {
110 struct pvrdma_srq *srq = NULL;
111 struct pvrdma_dev *dev = to_vdev(pd->device);
112 union pvrdma_cmd_req req;
113 union pvrdma_cmd_resp rsp;
114 struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
115 struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
116 struct pvrdma_create_srq ucmd;
117 unsigned long flags;
118 int ret;
119
120 if (!(pd->uobject && udata)) {
121 /* No support for kernel clients. */
122 dev_warn(&dev->pdev->dev,
123 "no shared receive queue support for kernel client\n");
124 return ERR_PTR(-EOPNOTSUPP);
125 }
126
127 if (init_attr->srq_type != IB_SRQT_BASIC) {
128 dev_warn(&dev->pdev->dev,
129 "shared receive queue type %d not supported\n",
130 init_attr->srq_type);
131 return ERR_PTR(-EINVAL);
132 }
133
134 if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
135 init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
136 dev_warn(&dev->pdev->dev,
137 "shared receive queue size invalid\n");
138 return ERR_PTR(-EINVAL);
139 }
140
141 if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
142 return ERR_PTR(-ENOMEM);
143
144 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
145 if (!srq) {
146 ret = -ENOMEM;
147 goto err_srq;
148 }
149
150 spin_lock_init(&srq->lock);
151 refcount_set(&srq->refcnt, 1);
152 init_completion(&srq->free);
153
154 dev_dbg(&dev->pdev->dev,
155 "create shared receive queue from user space\n");
156
157 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
158 ret = -EFAULT;
159 goto err_srq;
160 }
161
162 srq->umem = ib_umem_get(pd->uobject->context,
163 ucmd.buf_addr,
164 ucmd.buf_size, 0, 0);
165 if (IS_ERR(srq->umem)) {
166 ret = PTR_ERR(srq->umem);
167 goto err_srq;
168 }
169
170 srq->npages = ib_umem_page_count(srq->umem);
171
172 if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
173 dev_warn(&dev->pdev->dev,
174 "overflow pages in shared receive queue\n");
175 ret = -EINVAL;
176 goto err_umem;
177 }
178
179 ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
180 if (ret) {
181 dev_warn(&dev->pdev->dev,
182 "could not allocate page directory\n");
183 goto err_umem;
184 }
185
186 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
187
188 memset(cmd, 0, sizeof(*cmd));
189 cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
190 cmd->srq_type = init_attr->srq_type;
191 cmd->nchunks = srq->npages;
192 cmd->pd_handle = to_vpd(pd)->pd_handle;
193 cmd->attrs.max_wr = init_attr->attr.max_wr;
194 cmd->attrs.max_sge = init_attr->attr.max_sge;
195 cmd->attrs.srq_limit = init_attr->attr.srq_limit;
196 cmd->pdir_dma = srq->pdir.dir_dma;
197
198 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
199 if (ret < 0) {
200 dev_warn(&dev->pdev->dev,
201 "could not create shared receive queue, error: %d\n",
202 ret);
203 goto err_page_dir;
204 }
205
206 srq->srq_handle = resp->srqn;
207 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
208 dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
209 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
210
211 /* Copy udata back. */
212 if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
213 dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
214 pvrdma_destroy_srq(&srq->ibsrq);
215 return ERR_PTR(-EINVAL);
216 }
217
218 return &srq->ibsrq;
219
220 err_page_dir:
221 pvrdma_page_dir_cleanup(dev, &srq->pdir);
222 err_umem:
223 ib_umem_release(srq->umem);
224 err_srq:
225 kfree(srq);
226 atomic_dec(&dev->num_srqs);
227
228 return ERR_PTR(ret);
229 }
230
231 static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
232 {
233 unsigned long flags;
234
235 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
236 dev->srq_tbl[srq->srq_handle] = NULL;
237 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
238
239 if (refcount_dec_and_test(&srq->refcnt))
240 complete(&srq->free);
241 wait_for_completion(&srq->free);
242
243 /* There is no support for kernel clients, so this is safe. */
244 ib_umem_release(srq->umem);
245
246 pvrdma_page_dir_cleanup(dev, &srq->pdir);
247
248 kfree(srq);
249
250 atomic_dec(&dev->num_srqs);
251 }
252
253 /**
254 * pvrdma_destroy_srq - destroy shared receive queue
255 * @srq: the shared receive queue to destroy
256 *
257 * @return: 0 for success.
258 */
259 int pvrdma_destroy_srq(struct ib_srq *srq)
260 {
261 struct pvrdma_srq *vsrq = to_vsrq(srq);
262 union pvrdma_cmd_req req;
263 struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
264 struct pvrdma_dev *dev = to_vdev(srq->device);
265 int ret;
266
267 memset(cmd, 0, sizeof(*cmd));
268 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
269 cmd->srq_handle = vsrq->srq_handle;
270
271 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
272 if (ret < 0)
273 dev_warn(&dev->pdev->dev,
274 "destroy shared receive queue failed, error: %d\n",
275 ret);
276
277 pvrdma_free_srq(dev, vsrq);
278
279 return 0;
280 }
281
282 /**
283 * pvrdma_modify_srq - modify shared receive queue attributes
284 * @ibsrq: the shared receive queue to modify
285 * @attr: the shared receive queue's new attributes
286 * @attr_mask: attributes mask
287 * @udata: user data
288 *
289 * @returns 0 on success, otherwise returns an errno.
290 */
291 int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
292 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
293 {
294 struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
295 union pvrdma_cmd_req req;
296 struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
297 struct pvrdma_dev *dev = to_vdev(ibsrq->device);
298 int ret;
299
300 /* Only support SRQ limit. */
301 if (!(attr_mask & IB_SRQ_LIMIT))
302 return -EINVAL;
303
304 memset(cmd, 0, sizeof(*cmd));
305 cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
306 cmd->srq_handle = vsrq->srq_handle;
307 cmd->attrs.srq_limit = attr->srq_limit;
308 cmd->attr_mask = attr_mask;
309
310 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
311 if (ret < 0) {
312 dev_warn(&dev->pdev->dev,
313 "could not modify shared receive queue, error: %d\n",
314 ret);
315
316 return -EINVAL;
317 }
318
319 return ret;
320 }