]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/ipath/ipath_srq.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / ipath / ipath_srq.c
1 /*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37
38 #include "ipath_verbs.h"
39
40 /**
41 * ipath_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: the first WR to cause a problem is put here
45 *
46 * This may be called from interrupt context.
47 */
48 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
49 struct ib_recv_wr **bad_wr)
50 {
51 struct ipath_srq *srq = to_isrq(ibsrq);
52 struct ipath_rwq *wq;
53 unsigned long flags;
54 int ret;
55
56 for (; wr; wr = wr->next) {
57 struct ipath_rwqe *wqe;
58 u32 next;
59 int i;
60
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
62 *bad_wr = wr;
63 ret = -EINVAL;
64 goto bail;
65 }
66
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
69 next = wq->head + 1;
70 if (next >= srq->rq.size)
71 next = 0;
72 if (next == wq->tail) {
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
74 *bad_wr = wr;
75 ret = -ENOMEM;
76 goto bail;
77 }
78
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
80 wqe->wr_id = wr->wr_id;
81 wqe->num_sge = wr->num_sge;
82 for (i = 0; i < wr->num_sge; i++)
83 wqe->sg_list[i] = wr->sg_list[i];
84 /* Make sure queue entry is written before the head index. */
85 smp_wmb();
86 wq->head = next;
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
88 }
89 ret = 0;
90
91 bail:
92 return ret;
93 }
94
95 /**
96 * ipath_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libipathverbs when creating a user SRQ
100 */
101 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
102 struct ib_srq_init_attr *srq_init_attr,
103 struct ib_udata *udata)
104 {
105 struct ipath_ibdev *dev = to_idev(ibpd->device);
106 struct ipath_srq *srq;
107 u32 sz;
108 struct ib_srq *ret;
109
110 if (srq_init_attr->attr.max_wr == 0) {
111 ret = ERR_PTR(-EINVAL);
112 goto done;
113 }
114
115 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
116 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
117 ret = ERR_PTR(-EINVAL);
118 goto done;
119 }
120
121 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
122 if (!srq) {
123 ret = ERR_PTR(-ENOMEM);
124 goto done;
125 }
126
127 /*
128 * Need to use vmalloc() if we want to support large #s of entries.
129 */
130 srq->rq.size = srq_init_attr->attr.max_wr + 1;
131 srq->rq.max_sge = srq_init_attr->attr.max_sge;
132 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
133 sizeof(struct ipath_rwqe);
134 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
135 if (!srq->rq.wq) {
136 ret = ERR_PTR(-ENOMEM);
137 goto bail_srq;
138 }
139
140 /*
141 * Return the address of the RWQ as the offset to mmap.
142 * See ipath_mmap() for details.
143 */
144 if (udata && udata->outlen >= sizeof(__u64)) {
145 int err;
146 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
147
148 srq->ip =
149 ipath_create_mmap_info(dev, s,
150 ibpd->uobject->context,
151 srq->rq.wq);
152 if (!srq->ip) {
153 ret = ERR_PTR(-ENOMEM);
154 goto bail_wq;
155 }
156
157 err = ib_copy_to_udata(udata, &srq->ip->offset,
158 sizeof(srq->ip->offset));
159 if (err) {
160 ret = ERR_PTR(err);
161 goto bail_ip;
162 }
163 } else
164 srq->ip = NULL;
165
166 /*
167 * ib_create_srq() will initialize srq->ibsrq.
168 */
169 spin_lock_init(&srq->rq.lock);
170 srq->rq.wq->head = 0;
171 srq->rq.wq->tail = 0;
172 srq->limit = srq_init_attr->attr.srq_limit;
173
174 spin_lock(&dev->n_srqs_lock);
175 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
176 spin_unlock(&dev->n_srqs_lock);
177 ret = ERR_PTR(-ENOMEM);
178 goto bail_ip;
179 }
180
181 dev->n_srqs_allocated++;
182 spin_unlock(&dev->n_srqs_lock);
183
184 if (srq->ip) {
185 spin_lock_irq(&dev->pending_lock);
186 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
187 spin_unlock_irq(&dev->pending_lock);
188 }
189
190 ret = &srq->ibsrq;
191 goto done;
192
193 bail_ip:
194 kfree(srq->ip);
195 bail_wq:
196 vfree(srq->rq.wq);
197 bail_srq:
198 kfree(srq);
199 done:
200 return ret;
201 }
202
203 /**
204 * ipath_modify_srq - modify a shared receive queue
205 * @ibsrq: the SRQ to modify
206 * @attr: the new attributes of the SRQ
207 * @attr_mask: indicates which attributes to modify
208 * @udata: user data for ipathverbs.so
209 */
210 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
211 enum ib_srq_attr_mask attr_mask,
212 struct ib_udata *udata)
213 {
214 struct ipath_srq *srq = to_isrq(ibsrq);
215 struct ipath_rwq *wq;
216 int ret = 0;
217
218 if (attr_mask & IB_SRQ_MAX_WR) {
219 struct ipath_rwq *owq;
220 struct ipath_rwqe *p;
221 u32 sz, size, n, head, tail;
222
223 /* Check that the requested sizes are below the limits. */
224 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
225 ((attr_mask & IB_SRQ_LIMIT) ?
226 attr->srq_limit : srq->limit) > attr->max_wr) {
227 ret = -EINVAL;
228 goto bail;
229 }
230
231 sz = sizeof(struct ipath_rwqe) +
232 srq->rq.max_sge * sizeof(struct ib_sge);
233 size = attr->max_wr + 1;
234 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
235 if (!wq) {
236 ret = -ENOMEM;
237 goto bail;
238 }
239
240 /* Check that we can write the offset to mmap. */
241 if (udata && udata->inlen >= sizeof(__u64)) {
242 __u64 offset_addr;
243 __u64 offset = 0;
244
245 ret = ib_copy_from_udata(&offset_addr, udata,
246 sizeof(offset_addr));
247 if (ret)
248 goto bail_free;
249 udata->outbuf =
250 (void __user *) (unsigned long) offset_addr;
251 ret = ib_copy_to_udata(udata, &offset,
252 sizeof(offset));
253 if (ret)
254 goto bail_free;
255 }
256
257 spin_lock_irq(&srq->rq.lock);
258 /*
259 * validate head pointer value and compute
260 * the number of remaining WQEs.
261 */
262 owq = srq->rq.wq;
263 head = owq->head;
264 if (head >= srq->rq.size)
265 head = 0;
266 tail = owq->tail;
267 if (tail >= srq->rq.size)
268 tail = 0;
269 n = head;
270 if (n < tail)
271 n += srq->rq.size - tail;
272 else
273 n -= tail;
274 if (size <= n) {
275 ret = -EINVAL;
276 goto bail_unlock;
277 }
278 n = 0;
279 p = wq->wq;
280 while (tail != head) {
281 struct ipath_rwqe *wqe;
282 int i;
283
284 wqe = get_rwqe_ptr(&srq->rq, tail);
285 p->wr_id = wqe->wr_id;
286 p->num_sge = wqe->num_sge;
287 for (i = 0; i < wqe->num_sge; i++)
288 p->sg_list[i] = wqe->sg_list[i];
289 n++;
290 p = (struct ipath_rwqe *)((char *) p + sz);
291 if (++tail >= srq->rq.size)
292 tail = 0;
293 }
294 srq->rq.wq = wq;
295 srq->rq.size = size;
296 wq->head = n;
297 wq->tail = 0;
298 if (attr_mask & IB_SRQ_LIMIT)
299 srq->limit = attr->srq_limit;
300 spin_unlock_irq(&srq->rq.lock);
301
302 vfree(owq);
303
304 if (srq->ip) {
305 struct ipath_mmap_info *ip = srq->ip;
306 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
307 u32 s = sizeof(struct ipath_rwq) + size * sz;
308
309 ipath_update_mmap_info(dev, ip, s, wq);
310
311 /*
312 * Return the offset to mmap.
313 * See ipath_mmap() for details.
314 */
315 if (udata && udata->inlen >= sizeof(__u64)) {
316 ret = ib_copy_to_udata(udata, &ip->offset,
317 sizeof(ip->offset));
318 if (ret)
319 goto bail;
320 }
321
322 spin_lock_irq(&dev->pending_lock);
323 if (list_empty(&ip->pending_mmaps))
324 list_add(&ip->pending_mmaps,
325 &dev->pending_mmaps);
326 spin_unlock_irq(&dev->pending_lock);
327 }
328 } else if (attr_mask & IB_SRQ_LIMIT) {
329 spin_lock_irq(&srq->rq.lock);
330 if (attr->srq_limit >= srq->rq.size)
331 ret = -EINVAL;
332 else
333 srq->limit = attr->srq_limit;
334 spin_unlock_irq(&srq->rq.lock);
335 }
336 goto bail;
337
338 bail_unlock:
339 spin_unlock_irq(&srq->rq.lock);
340 bail_free:
341 vfree(wq);
342 bail:
343 return ret;
344 }
345
346 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
347 {
348 struct ipath_srq *srq = to_isrq(ibsrq);
349
350 attr->max_wr = srq->rq.size - 1;
351 attr->max_sge = srq->rq.max_sge;
352 attr->srq_limit = srq->limit;
353 return 0;
354 }
355
356 /**
357 * ipath_destroy_srq - destroy a shared receive queue
358 * @ibsrq: the SRQ to destroy
359 */
360 int ipath_destroy_srq(struct ib_srq *ibsrq)
361 {
362 struct ipath_srq *srq = to_isrq(ibsrq);
363 struct ipath_ibdev *dev = to_idev(ibsrq->device);
364
365 spin_lock(&dev->n_srqs_lock);
366 dev->n_srqs_allocated--;
367 spin_unlock(&dev->n_srqs_lock);
368 if (srq->ip)
369 kref_put(&srq->ip->ref, ipath_release_mmap_info);
370 else
371 vfree(srq->rq.wq);
372 kfree(srq);
373
374 return 0;
375 }