2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/slab.h>
50 #include <linux/vmalloc.h>
51 #include <rdma/uverbs_ioctl.h>
57 * rvt_driver_srq_init - init srq resources on a per driver basis
58 * @rdi: rvt dev structure
60 * Do any initialization needed when a driver registers with rdmavt.
62 void rvt_driver_srq_init(struct rvt_dev_info
*rdi
)
64 spin_lock_init(&rdi
->n_srqs_lock
);
65 rdi
->n_srqs_allocated
= 0;
69 * rvt_create_srq - create a shared receive queue
70 * @ibpd: the protection domain of the SRQ to create
71 * @srq_init_attr: the attributes of the SRQ
72 * @udata: data from libibverbs when creating a user SRQ
74 * Return: Allocated srq object
76 struct ib_srq
*rvt_create_srq(struct ib_pd
*ibpd
,
77 struct ib_srq_init_attr
*srq_init_attr
,
78 struct ib_udata
*udata
)
80 struct rvt_dev_info
*dev
= ib_to_rvt(ibpd
->device
);
81 struct rvt_ucontext
*ucontext
= rdma_udata_to_drv_context(
82 udata
, struct rvt_ucontext
, ibucontext
);
87 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
)
88 return ERR_PTR(-EOPNOTSUPP
);
90 if (srq_init_attr
->attr
.max_sge
== 0 ||
91 srq_init_attr
->attr
.max_sge
> dev
->dparms
.props
.max_srq_sge
||
92 srq_init_attr
->attr
.max_wr
== 0 ||
93 srq_init_attr
->attr
.max_wr
> dev
->dparms
.props
.max_srq_wr
)
94 return ERR_PTR(-EINVAL
);
96 srq
= kzalloc_node(sizeof(*srq
), GFP_KERNEL
, dev
->dparms
.node
);
98 return ERR_PTR(-ENOMEM
);
101 * Need to use vmalloc() if we want to support large #s of entries.
103 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
104 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
105 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
106 sizeof(struct rvt_rwqe
);
108 vmalloc_user(sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
) :
109 vzalloc_node(sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
,
112 ret
= ERR_PTR(-ENOMEM
);
117 * Return the address of the RWQ as the offset to mmap.
118 * See rvt_mmap() for details.
120 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
122 u32 s
= sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
;
125 rvt_create_mmap_info(dev
, s
, &ucontext
->ibucontext
,
128 ret
= ERR_PTR(-ENOMEM
);
132 err
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
133 sizeof(srq
->ip
->offset
));
141 * ib_create_srq() will initialize srq->ibsrq.
143 spin_lock_init(&srq
->rq
.lock
);
144 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
146 spin_lock(&dev
->n_srqs_lock
);
147 if (dev
->n_srqs_allocated
== dev
->dparms
.props
.max_srq
) {
148 spin_unlock(&dev
->n_srqs_lock
);
149 ret
= ERR_PTR(-ENOMEM
);
153 dev
->n_srqs_allocated
++;
154 spin_unlock(&dev
->n_srqs_lock
);
157 spin_lock_irq(&dev
->pending_lock
);
158 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
159 spin_unlock_irq(&dev
->pending_lock
);
174 * rvt_modify_srq - modify a shared receive queue
175 * @ibsrq: the SRQ to modify
176 * @attr: the new attributes of the SRQ
177 * @attr_mask: indicates which attributes to modify
178 * @udata: user data for libibverbs.so
180 * Return: 0 on success
182 int rvt_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
183 enum ib_srq_attr_mask attr_mask
,
184 struct ib_udata
*udata
)
186 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
187 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
191 if (attr_mask
& IB_SRQ_MAX_WR
) {
194 u32 sz
, size
, n
, head
, tail
;
196 /* Check that the requested sizes are below the limits. */
197 if ((attr
->max_wr
> dev
->dparms
.props
.max_srq_wr
) ||
198 ((attr_mask
& IB_SRQ_LIMIT
) ?
199 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
)
202 sz
= sizeof(struct rvt_rwqe
) +
203 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
204 size
= attr
->max_wr
+ 1;
206 vmalloc_user(sizeof(struct rvt_rwq
) + size
* sz
) :
207 vzalloc_node(sizeof(struct rvt_rwq
) + size
* sz
,
212 /* Check that we can write the offset to mmap. */
213 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
217 ret
= ib_copy_from_udata(&offset_addr
, udata
,
218 sizeof(offset_addr
));
221 udata
->outbuf
= (void __user
*)
222 (unsigned long)offset_addr
;
223 ret
= ib_copy_to_udata(udata
, &offset
,
229 spin_lock_irq(&srq
->rq
.lock
);
231 * validate head and tail pointer values and compute
232 * the number of remaining WQEs.
237 if (head
>= srq
->rq
.size
|| tail
>= srq
->rq
.size
) {
243 n
+= srq
->rq
.size
- tail
;
252 while (tail
!= head
) {
253 struct rvt_rwqe
*wqe
;
256 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, tail
);
257 p
->wr_id
= wqe
->wr_id
;
258 p
->num_sge
= wqe
->num_sge
;
259 for (i
= 0; i
< wqe
->num_sge
; i
++)
260 p
->sg_list
[i
] = wqe
->sg_list
[i
];
262 p
= (struct rvt_rwqe
*)((char *)p
+ sz
);
263 if (++tail
>= srq
->rq
.size
)
270 if (attr_mask
& IB_SRQ_LIMIT
)
271 srq
->limit
= attr
->srq_limit
;
272 spin_unlock_irq(&srq
->rq
.lock
);
277 struct rvt_mmap_info
*ip
= srq
->ip
;
278 struct rvt_dev_info
*dev
= ib_to_rvt(srq
->ibsrq
.device
);
279 u32 s
= sizeof(struct rvt_rwq
) + size
* sz
;
281 rvt_update_mmap_info(dev
, ip
, s
, wq
);
284 * Return the offset to mmap.
285 * See rvt_mmap() for details.
287 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
288 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
295 * Put user mapping info onto the pending list
296 * unless it already is on the list.
298 spin_lock_irq(&dev
->pending_lock
);
299 if (list_empty(&ip
->pending_mmaps
))
300 list_add(&ip
->pending_mmaps
,
301 &dev
->pending_mmaps
);
302 spin_unlock_irq(&dev
->pending_lock
);
304 } else if (attr_mask
& IB_SRQ_LIMIT
) {
305 spin_lock_irq(&srq
->rq
.lock
);
306 if (attr
->srq_limit
>= srq
->rq
.size
)
309 srq
->limit
= attr
->srq_limit
;
310 spin_unlock_irq(&srq
->rq
.lock
);
315 spin_unlock_irq(&srq
->rq
.lock
);
321 /** rvt_query_srq - query srq data
322 * @ibsrq: srq to query
323 * @attr: return info in attr
327 int rvt_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
329 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
331 attr
->max_wr
= srq
->rq
.size
- 1;
332 attr
->max_sge
= srq
->rq
.max_sge
;
333 attr
->srq_limit
= srq
->limit
;
338 * rvt_destroy_srq - destory an srq
339 * @ibsrq: srq object to destroy
343 int rvt_destroy_srq(struct ib_srq
*ibsrq
)
345 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
346 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
348 spin_lock(&dev
->n_srqs_lock
);
349 dev
->n_srqs_allocated
--;
350 spin_unlock(&dev
->n_srqs_lock
);
352 kref_put(&srq
->ip
->ref
, rvt_release_mmap_info
);