]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/sw/rxe/rxe_verbs.c
IB: Pass uverbs_attr_bundle down ib_x destroy path
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / sw / rxe / rxe_verbs.c
CommitLineData
8700e3e7
MS
1/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
0bbb3b74 34#include <linux/dma-mapping.h>
4d6f2859 35#include <net/addrconf.h>
89944450 36#include <rdma/uverbs_ioctl.h>
8700e3e7
MS
37#include "rxe.h"
38#include "rxe_loc.h"
39#include "rxe_queue.h"
0b1e5b99 40#include "rxe_hw_counters.h"
8700e3e7
MS
41
42static int rxe_query_device(struct ib_device *dev,
43 struct ib_device_attr *attr,
44 struct ib_udata *uhw)
45{
46 struct rxe_dev *rxe = to_rdev(dev);
47
48 if (uhw->inlen || uhw->outlen)
49 return -EINVAL;
50
51 *attr = rxe->attr;
52 return 0;
53}
54
8700e3e7
MS
55static int rxe_query_port(struct ib_device *dev,
56 u8 port_num, struct ib_port_attr *attr)
57{
58 struct rxe_dev *rxe = to_rdev(dev);
59 struct rxe_port *port;
59590b8a 60 int rc;
8700e3e7
MS
61
62 port = &rxe->port;
63
c4550c63 64 /* *attr being zeroed by the caller, avoid zeroing it here */
8700e3e7
MS
65 *attr = port->attr;
66
67 mutex_lock(&rxe->usdev_lock);
d4186194
YS
68 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 &attr->active_width);
5736c7c4
AB
70
71 if (attr->state == IB_PORT_ACTIVE)
72 attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
73 else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
75 else
76 attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
77
8700e3e7
MS
78 mutex_unlock(&rxe->usdev_lock);
79
d4186194 80 return rc;
8700e3e7
MS
81}
82
8700e3e7
MS
83static int rxe_query_pkey(struct ib_device *device,
84 u8 port_num, u16 index, u16 *pkey)
85{
86 struct rxe_dev *rxe = to_rdev(device);
87 struct rxe_port *port;
88
8700e3e7
MS
89 port = &rxe->port;
90
91 if (unlikely(index >= port->attr.pkey_tbl_len)) {
85e9f1db 92 dev_warn(device->dev.parent, "invalid index = %d\n",
8700e3e7
MS
93 index);
94 goto err1;
95 }
96
97 *pkey = port->pkey_tbl[index];
98 return 0;
99
100err1:
101 return -EINVAL;
102}
103
104static int rxe_modify_device(struct ib_device *dev,
105 int mask, struct ib_device_modify *attr)
106{
107 struct rxe_dev *rxe = to_rdev(dev);
108
109 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
110 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
111
112 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
113 memcpy(rxe->ib_dev.node_desc,
114 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
115 }
116
117 return 0;
118}
119
120static int rxe_modify_port(struct ib_device *dev,
121 u8 port_num, int mask, struct ib_port_modify *attr)
122{
123 struct rxe_dev *rxe = to_rdev(dev);
124 struct rxe_port *port;
125
8700e3e7
MS
126 port = &rxe->port;
127
128 port->attr.port_cap_flags |= attr->set_port_cap_mask;
129 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
130
131 if (mask & IB_PORT_RESET_QKEY_CNTR)
132 port->attr.qkey_viol_cntr = 0;
133
134 return 0;
8700e3e7
MS
135}
136
137static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
138 u8 port_num)
139{
140 struct rxe_dev *rxe = to_rdev(dev);
141
839f5ac0 142 return rxe_link_layer(rxe, port_num);
8700e3e7
MS
143}
144
a2a074ef 145static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
8700e3e7 146{
a2a074ef
LR
147 struct rxe_dev *rxe = to_rdev(uctx->device);
148 struct rxe_ucontext *uc = to_ruc(uctx);
8700e3e7 149
a2a074ef 150 return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
8700e3e7
MS
151}
152
a2a074ef 153static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
8700e3e7
MS
154{
155 struct rxe_ucontext *uc = to_ruc(ibuc);
156
157 rxe_drop_ref(uc);
8700e3e7
MS
158}
159
160static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
161 struct ib_port_immutable *immutable)
162{
163 int err;
164 struct ib_port_attr attr;
165
c4550c63
OG
166 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
167
168 err = ib_query_port(dev, port_num, &attr);
8700e3e7
MS
169 if (err)
170 return err;
171
172 immutable->pkey_tbl_len = attr.pkey_tbl_len;
173 immutable->gid_tbl_len = attr.gid_tbl_len;
8700e3e7
MS
174 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
175
176 return 0;
177}
178
21a428a0
LR
179static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
180 struct ib_udata *udata)
8700e3e7 181{
21a428a0
LR
182 struct rxe_dev *rxe = to_rdev(ibpd->device);
183 struct rxe_pd *pd = to_rpd(ibpd);
8700e3e7 184
21a428a0 185 return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
8700e3e7
MS
186}
187
c4367a26 188static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
8700e3e7
MS
189{
190 struct rxe_pd *pd = to_rpd(ibpd);
191
192 rxe_drop_ref(pd);
8700e3e7
MS
193}
194
90898850
DC
195static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
196 struct rdma_ah_attr *attr,
b090c4e3 197 u32 flags,
477864c8
MS
198 struct ib_udata *udata)
199
8700e3e7
MS
200{
201 int err;
202 struct rxe_dev *rxe = to_rdev(ibpd->device);
203 struct rxe_pd *pd = to_rpd(ibpd);
204 struct rxe_ah *ah;
205
206 err = rxe_av_chk_attr(rxe, attr);
207 if (err)
2f229bcf 208 return ERR_PTR(err);
8700e3e7
MS
209
210 ah = rxe_alloc(&rxe->ah_pool);
2f229bcf
BVA
211 if (!ah)
212 return ERR_PTR(-ENOMEM);
8700e3e7
MS
213
214 rxe_add_ref(pd);
215 ah->pd = pd;
216
fa407188 217 rxe_init_av(attr, &ah->av);
8700e3e7 218 return &ah->ibah;
8700e3e7
MS
219}
220
90898850 221static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
8700e3e7
MS
222{
223 int err;
224 struct rxe_dev *rxe = to_rdev(ibah->device);
225 struct rxe_ah *ah = to_rah(ibah);
226
227 err = rxe_av_chk_attr(rxe, attr);
228 if (err)
229 return err;
230
fa407188 231 rxe_init_av(attr, &ah->av);
8700e3e7
MS
232 return 0;
233}
234
90898850 235static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
8700e3e7 236{
8700e3e7
MS
237 struct rxe_ah *ah = to_rah(ibah);
238
eca7ddf9 239 memset(attr, 0, sizeof(*attr));
44c58487 240 attr->type = ibah->type;
9c96f3d4 241 rxe_av_to_attr(&ah->av, attr);
8700e3e7
MS
242 return 0;
243}
244
c4367a26 245static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata)
8700e3e7
MS
246{
247 struct rxe_ah *ah = to_rah(ibah);
248
249 rxe_drop_ref(ah->pd);
250 rxe_drop_ref(ah);
251 return 0;
252}
253
d34ac5cd 254static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
8700e3e7
MS
255{
256 int err;
257 int i;
258 u32 length;
259 struct rxe_recv_wqe *recv_wqe;
260 int num_sge = ibwr->num_sge;
261
262 if (unlikely(queue_full(rq->queue))) {
263 err = -ENOMEM;
264 goto err1;
265 }
266
267 if (unlikely(num_sge > rq->max_sge)) {
268 err = -EINVAL;
269 goto err1;
270 }
271
272 length = 0;
273 for (i = 0; i < num_sge; i++)
274 length += ibwr->sg_list[i].length;
275
276 recv_wqe = producer_addr(rq->queue);
277 recv_wqe->wr_id = ibwr->wr_id;
278 recv_wqe->num_sge = num_sge;
279
280 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
281 num_sge * sizeof(struct ib_sge));
282
283 recv_wqe->dma.length = length;
284 recv_wqe->dma.resid = length;
285 recv_wqe->dma.num_sge = num_sge;
286 recv_wqe->dma.cur_sge = 0;
287 recv_wqe->dma.sge_offset = 0;
288
289 /* make sure all changes to the work queue are written before we
290 * update the producer pointer
291 */
292 smp_wmb();
293
294 advance_producer(rq->queue);
295 return 0;
296
297err1:
298 return err;
299}
300
301static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
302 struct ib_srq_init_attr *init,
303 struct ib_udata *udata)
304{
305 int err;
306 struct rxe_dev *rxe = to_rdev(ibpd->device);
307 struct rxe_pd *pd = to_rpd(ibpd);
89944450
SR
308 struct rxe_ucontext *ucontext =
309 rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
8700e3e7 310 struct rxe_srq *srq;
0c43ab37
JG
311 struct rxe_create_srq_resp __user *uresp = NULL;
312
313 if (udata) {
314 if (udata->outlen < sizeof(*uresp))
315 return ERR_PTR(-EINVAL);
316 uresp = udata->outbuf;
317 }
8700e3e7
MS
318
319 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
320 if (err)
321 goto err1;
322
323 srq = rxe_alloc(&rxe->srq_pool);
324 if (!srq) {
325 err = -ENOMEM;
326 goto err1;
327 }
328
329 rxe_add_index(srq);
330 rxe_add_ref(pd);
331 srq->pd = pd;
332
89944450 333 err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
8700e3e7
MS
334 if (err)
335 goto err2;
336
337 return &srq->ibsrq;
338
339err2:
340 rxe_drop_ref(pd);
341 rxe_drop_index(srq);
342 rxe_drop_ref(srq);
343err1:
344 return ERR_PTR(err);
345}
346
347static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
348 enum ib_srq_attr_mask mask,
349 struct ib_udata *udata)
350{
351 int err;
352 struct rxe_srq *srq = to_rsrq(ibsrq);
353 struct rxe_dev *rxe = to_rdev(ibsrq->device);
0c43ab37
JG
354 struct rxe_modify_srq_cmd ucmd = {};
355
356 if (udata) {
357 if (udata->inlen < sizeof(ucmd))
358 return -EINVAL;
359
360 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
361 if (err)
362 return err;
363 }
8700e3e7
MS
364
365 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
366 if (err)
367 goto err1;
368
0c43ab37 369 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
8700e3e7
MS
370 if (err)
371 goto err1;
372
373 return 0;
374
375err1:
376 return err;
377}
378
379static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
380{
381 struct rxe_srq *srq = to_rsrq(ibsrq);
382
383 if (srq->error)
384 return -EINVAL;
385
386 attr->max_wr = srq->rq.queue->buf->index_mask;
387 attr->max_sge = srq->rq.max_sge;
388 attr->srq_limit = srq->limit;
389 return 0;
390}
391
c4367a26 392static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
8700e3e7
MS
393{
394 struct rxe_srq *srq = to_rsrq(ibsrq);
395
396 if (srq->rq.queue)
397 rxe_queue_cleanup(srq->rq.queue);
398
399 rxe_drop_ref(srq->pd);
400 rxe_drop_index(srq);
401 rxe_drop_ref(srq);
402
403 return 0;
404}
405
d34ac5cd
BVA
406static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
407 const struct ib_recv_wr **bad_wr)
8700e3e7
MS
408{
409 int err = 0;
410 unsigned long flags;
411 struct rxe_srq *srq = to_rsrq(ibsrq);
412
413 spin_lock_irqsave(&srq->rq.producer_lock, flags);
414
415 while (wr) {
416 err = post_one_recv(&srq->rq, wr);
417 if (unlikely(err))
418 break;
419 wr = wr->next;
420 }
421
422 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
423
424 if (err)
425 *bad_wr = wr;
426
427 return err;
428}
429
430static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
431 struct ib_qp_init_attr *init,
432 struct ib_udata *udata)
433{
434 int err;
435 struct rxe_dev *rxe = to_rdev(ibpd->device);
436 struct rxe_pd *pd = to_rpd(ibpd);
437 struct rxe_qp *qp;
0c43ab37
JG
438 struct rxe_create_qp_resp __user *uresp = NULL;
439
440 if (udata) {
441 if (udata->outlen < sizeof(*uresp))
442 return ERR_PTR(-EINVAL);
443 uresp = udata->outbuf;
444 }
8700e3e7
MS
445
446 err = rxe_qp_chk_init(rxe, init);
447 if (err)
448 goto err1;
449
450 qp = rxe_alloc(&rxe->qp_pool);
451 if (!qp) {
452 err = -ENOMEM;
453 goto err1;
454 }
455
456 if (udata) {
457 if (udata->inlen) {
458 err = -EINVAL;
5b9ea16c 459 goto err2;
8700e3e7
MS
460 }
461 qp->is_user = 1;
462 }
463
464 rxe_add_index(qp);
465
e00b64f7 466 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
8700e3e7 467 if (err)
5b9ea16c 468 goto err3;
8700e3e7
MS
469
470 return &qp->ibqp;
471
5b9ea16c 472err3:
8700e3e7 473 rxe_drop_index(qp);
5b9ea16c 474err2:
8700e3e7
MS
475 rxe_drop_ref(qp);
476err1:
477 return ERR_PTR(err);
478}
479
480static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
481 int mask, struct ib_udata *udata)
482{
483 int err;
484 struct rxe_dev *rxe = to_rdev(ibqp->device);
485 struct rxe_qp *qp = to_rqp(ibqp);
486
487 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
488 if (err)
489 goto err1;
490
491 err = rxe_qp_from_attr(qp, attr, mask, udata);
492 if (err)
493 goto err1;
494
495 return 0;
496
497err1:
498 return err;
499}
500
501static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
502 int mask, struct ib_qp_init_attr *init)
503{
504 struct rxe_qp *qp = to_rqp(ibqp);
505
506 rxe_qp_to_init(qp, init);
507 rxe_qp_to_attr(qp, attr, mask);
508
509 return 0;
510}
511
c4367a26 512static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
8700e3e7
MS
513{
514 struct rxe_qp *qp = to_rqp(ibqp);
515
516 rxe_qp_destroy(qp);
517 rxe_drop_index(qp);
518 rxe_drop_ref(qp);
519 return 0;
520}
521
f696bf6d 522static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
8700e3e7
MS
523 unsigned int mask, unsigned int length)
524{
525 int num_sge = ibwr->num_sge;
526 struct rxe_sq *sq = &qp->sq;
527
528 if (unlikely(num_sge > sq->max_sge))
529 goto err1;
530
531 if (unlikely(mask & WR_ATOMIC_MASK)) {
532 if (length < 8)
533 goto err1;
534
535 if (atomic_wr(ibwr)->remote_addr & 0x7)
536 goto err1;
537 }
538
539 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
540 (length > sq->max_inline)))
541 goto err1;
542
543 return 0;
544
545err1:
546 return -EINVAL;
547}
548
549static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
f696bf6d 550 const struct ib_send_wr *ibwr)
8700e3e7
MS
551{
552 wr->wr_id = ibwr->wr_id;
553 wr->num_sge = ibwr->num_sge;
554 wr->opcode = ibwr->opcode;
555 wr->send_flags = ibwr->send_flags;
556
557 if (qp_type(qp) == IB_QPT_UD ||
558 qp_type(qp) == IB_QPT_SMI ||
559 qp_type(qp) == IB_QPT_GSI) {
560 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
561 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
562 if (qp_type(qp) == IB_QPT_GSI)
563 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
564 if (wr->opcode == IB_WR_SEND_WITH_IMM)
565 wr->ex.imm_data = ibwr->ex.imm_data;
566 } else {
567 switch (wr->opcode) {
568 case IB_WR_RDMA_WRITE_WITH_IMM:
569 wr->ex.imm_data = ibwr->ex.imm_data;
ea6ee93b 570 /* fall through */
8700e3e7
MS
571 case IB_WR_RDMA_READ:
572 case IB_WR_RDMA_WRITE:
573 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
574 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
575 break;
576 case IB_WR_SEND_WITH_IMM:
577 wr->ex.imm_data = ibwr->ex.imm_data;
578 break;
579 case IB_WR_SEND_WITH_INV:
580 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
581 break;
582 case IB_WR_ATOMIC_CMP_AND_SWP:
583 case IB_WR_ATOMIC_FETCH_AND_ADD:
584 wr->wr.atomic.remote_addr =
585 atomic_wr(ibwr)->remote_addr;
586 wr->wr.atomic.compare_add =
587 atomic_wr(ibwr)->compare_add;
588 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
589 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
590 break;
591 case IB_WR_LOCAL_INV:
592 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
593 break;
594 case IB_WR_REG_MR:
595 wr->wr.reg.mr = reg_wr(ibwr)->mr;
596 wr->wr.reg.key = reg_wr(ibwr)->key;
597 wr->wr.reg.access = reg_wr(ibwr)->access;
598 break;
599 default:
600 break;
601 }
602 }
603}
604
f696bf6d 605static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
8700e3e7
MS
606 unsigned int mask, unsigned int length,
607 struct rxe_send_wqe *wqe)
608{
609 int num_sge = ibwr->num_sge;
610 struct ib_sge *sge;
611 int i;
612 u8 *p;
613
614 init_send_wr(qp, &wqe->wr, ibwr);
615
616 if (qp_type(qp) == IB_QPT_UD ||
617 qp_type(qp) == IB_QPT_SMI ||
618 qp_type(qp) == IB_QPT_GSI)
619 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
620
621 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
622 p = wqe->dma.inline_data;
623
624 sge = ibwr->sg_list;
625 for (i = 0; i < num_sge; i++, sge++) {
07d432bb
JJB
626 memcpy(p, (void *)(uintptr_t)sge->addr,
627 sge->length);
8700e3e7
MS
628
629 p += sge->length;
630 }
631 } else if (mask & WR_REG_MASK) {
632 wqe->mask = mask;
633 wqe->state = wqe_state_posted;
634 return 0;
635 } else
636 memcpy(wqe->dma.sge, ibwr->sg_list,
637 num_sge * sizeof(struct ib_sge));
638
a6544a62
BVA
639 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
640 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
8700e3e7
MS
641 wqe->mask = mask;
642 wqe->dma.length = length;
643 wqe->dma.resid = length;
644 wqe->dma.num_sge = num_sge;
645 wqe->dma.cur_sge = 0;
646 wqe->dma.sge_offset = 0;
647 wqe->state = wqe_state_posted;
648 wqe->ssn = atomic_add_return(1, &qp->ssn);
649
650 return 0;
651}
652
f696bf6d 653static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
e404f945 654 unsigned int mask, u32 length)
8700e3e7
MS
655{
656 int err;
657 struct rxe_sq *sq = &qp->sq;
658 struct rxe_send_wqe *send_wqe;
659 unsigned long flags;
660
661 err = validate_send_wr(qp, ibwr, mask, length);
662 if (err)
663 return err;
664
665 spin_lock_irqsave(&qp->sq.sq_lock, flags);
666
667 if (unlikely(queue_full(sq->queue))) {
668 err = -ENOMEM;
669 goto err1;
670 }
671
672 send_wqe = producer_addr(sq->queue);
673
674 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
675 if (unlikely(err))
676 goto err1;
677
678 /*
679 * make sure all changes to the work queue are
680 * written before we update the producer pointer
681 */
682 smp_wmb();
683
684 advance_producer(sq->queue);
685 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
686
687 return 0;
688
689err1:
690 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
691 return err;
692}
693
d34ac5cd
BVA
694static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
695 const struct ib_send_wr **bad_wr)
8700e3e7
MS
696{
697 int err = 0;
8700e3e7
MS
698 unsigned int mask;
699 unsigned int length = 0;
700 int i;
8700e3e7 701
8700e3e7
MS
702 while (wr) {
703 mask = wr_opcode_mask(wr->opcode, qp);
704 if (unlikely(!mask)) {
705 err = -EINVAL;
706 *bad_wr = wr;
707 break;
708 }
709
710 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
711 !(mask & WR_INLINE_MASK))) {
712 err = -EINVAL;
713 *bad_wr = wr;
714 break;
715 }
716
717 length = 0;
718 for (i = 0; i < wr->num_sge; i++)
719 length += wr->sg_list[i].length;
720
721 err = post_one_send(qp, wr, mask, length);
722
723 if (err) {
724 *bad_wr = wr;
725 break;
726 }
727 wr = wr->next;
728 }
729
1661d3b0 730 rxe_run_task(&qp->req.task, 1);
6f301e06
BVA
731 if (unlikely(qp->req.state == QP_STATE_ERROR))
732 rxe_run_task(&qp->comp.task, 1);
8700e3e7
MS
733
734 return err;
735}
736
d34ac5cd
BVA
737static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
738 const struct ib_send_wr **bad_wr)
063af595
PP
739{
740 struct rxe_qp *qp = to_rqp(ibqp);
741
742 if (unlikely(!qp->valid)) {
743 *bad_wr = wr;
744 return -EINVAL;
745 }
746
747 if (unlikely(qp->req.state < QP_STATE_READY)) {
748 *bad_wr = wr;
749 return -EINVAL;
750 }
751
752 if (qp->is_user) {
753 /* Utilize process context to do protocol processing */
754 rxe_run_task(&qp->req.task, 0);
755 return 0;
756 } else
757 return rxe_post_send_kernel(qp, wr, bad_wr);
758}
759
d34ac5cd
BVA
760static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
761 const struct ib_recv_wr **bad_wr)
8700e3e7
MS
762{
763 int err = 0;
764 struct rxe_qp *qp = to_rqp(ibqp);
765 struct rxe_rq *rq = &qp->rq;
766 unsigned long flags;
767
768 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
769 *bad_wr = wr;
770 err = -EINVAL;
771 goto err1;
772 }
773
774 if (unlikely(qp->srq)) {
775 *bad_wr = wr;
776 err = -EINVAL;
777 goto err1;
778 }
779
780 spin_lock_irqsave(&rq->producer_lock, flags);
781
782 while (wr) {
783 err = post_one_recv(rq, wr);
784 if (unlikely(err)) {
785 *bad_wr = wr;
786 break;
787 }
788 wr = wr->next;
789 }
790
791 spin_unlock_irqrestore(&rq->producer_lock, flags);
792
12171971
VI
793 if (qp->resp.state == QP_STATE_ERROR)
794 rxe_run_task(&qp->resp.task, 1);
795
8700e3e7
MS
796err1:
797 return err;
798}
799
800static struct ib_cq *rxe_create_cq(struct ib_device *dev,
801 const struct ib_cq_init_attr *attr,
802 struct ib_ucontext *context,
803 struct ib_udata *udata)
804{
805 int err;
806 struct rxe_dev *rxe = to_rdev(dev);
807 struct rxe_cq *cq;
0c43ab37
JG
808 struct rxe_create_cq_resp __user *uresp = NULL;
809
810 if (udata) {
811 if (udata->outlen < sizeof(*uresp))
812 return ERR_PTR(-EINVAL);
813 uresp = udata->outbuf;
814 }
8700e3e7
MS
815
816 if (attr->flags)
817 return ERR_PTR(-EINVAL);
818
b92ec0fe 819 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
8700e3e7
MS
820 if (err)
821 goto err1;
822
823 cq = rxe_alloc(&rxe->cq_pool);
824 if (!cq) {
825 err = -ENOMEM;
826 goto err1;
827 }
828
829 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
0c43ab37 830 context, uresp);
8700e3e7
MS
831 if (err)
832 goto err2;
833
834 return &cq->ibcq;
835
836err2:
837 rxe_drop_ref(cq);
838err1:
839 return ERR_PTR(err);
840}
841
c4367a26 842static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
8700e3e7
MS
843{
844 struct rxe_cq *cq = to_rcq(ibcq);
845
bfc3ae05
AB
846 rxe_cq_disable(cq);
847
8700e3e7
MS
848 rxe_drop_ref(cq);
849 return 0;
850}
851
852static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
853{
854 int err;
855 struct rxe_cq *cq = to_rcq(ibcq);
856 struct rxe_dev *rxe = to_rdev(ibcq->device);
0c43ab37
JG
857 struct rxe_resize_cq_resp __user *uresp = NULL;
858
859 if (udata) {
860 if (udata->outlen < sizeof(*uresp))
861 return -EINVAL;
862 uresp = udata->outbuf;
863 }
8700e3e7 864
b92ec0fe 865 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
8700e3e7
MS
866 if (err)
867 goto err1;
868
0c43ab37 869 err = rxe_cq_resize_queue(cq, cqe, uresp);
8700e3e7
MS
870 if (err)
871 goto err1;
872
873 return 0;
874
875err1:
876 return err;
877}
878
879static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
880{
881 int i;
882 struct rxe_cq *cq = to_rcq(ibcq);
883 struct rxe_cqe *cqe;
884 unsigned long flags;
885
886 spin_lock_irqsave(&cq->cq_lock, flags);
887 for (i = 0; i < num_entries; i++) {
888 cqe = queue_head(cq->queue);
889 if (!cqe)
890 break;
891
892 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
893 advance_consumer(cq->queue);
894 }
895 spin_unlock_irqrestore(&cq->cq_lock, flags);
896
897 return i;
898}
899
900static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
901{
902 struct rxe_cq *cq = to_rcq(ibcq);
903 int count = queue_count(cq->queue);
904
905 return (count > wc_cnt) ? wc_cnt : count;
906}
907
908static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
909{
910 struct rxe_cq *cq = to_rcq(ibcq);
accacb8f
AB
911 unsigned long irq_flags;
912 int ret = 0;
8700e3e7 913
accacb8f 914 spin_lock_irqsave(&cq->cq_lock, irq_flags);
8700e3e7
MS
915 if (cq->notify != IB_CQ_NEXT_COMP)
916 cq->notify = flags & IB_CQ_SOLICITED_MASK;
917
accacb8f
AB
918 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
919 ret = 1;
920
921 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
922
923 return ret;
8700e3e7
MS
924}
925
926static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
927{
928 struct rxe_dev *rxe = to_rdev(ibpd->device);
929 struct rxe_pd *pd = to_rpd(ibpd);
930 struct rxe_mem *mr;
931 int err;
932
933 mr = rxe_alloc(&rxe->mr_pool);
934 if (!mr) {
935 err = -ENOMEM;
936 goto err1;
937 }
938
939 rxe_add_index(mr);
940
941 rxe_add_ref(pd);
942
e12ee8ce 943 err = rxe_mem_init_dma(pd, access, mr);
8700e3e7
MS
944 if (err)
945 goto err2;
946
947 return &mr->ibmr;
948
949err2:
950 rxe_drop_ref(pd);
951 rxe_drop_index(mr);
952 rxe_drop_ref(mr);
953err1:
954 return ERR_PTR(err);
955}
956
957static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
958 u64 start,
959 u64 length,
960 u64 iova,
961 int access, struct ib_udata *udata)
962{
963 int err;
964 struct rxe_dev *rxe = to_rdev(ibpd->device);
965 struct rxe_pd *pd = to_rpd(ibpd);
966 struct rxe_mem *mr;
967
968 mr = rxe_alloc(&rxe->mr_pool);
969 if (!mr) {
970 err = -ENOMEM;
971 goto err2;
972 }
973
974 rxe_add_index(mr);
975
976 rxe_add_ref(pd);
977
e12ee8ce 978 err = rxe_mem_init_user(pd, start, length, iova,
8700e3e7
MS
979 access, udata, mr);
980 if (err)
981 goto err3;
982
983 return &mr->ibmr;
984
985err3:
986 rxe_drop_ref(pd);
987 rxe_drop_index(mr);
988 rxe_drop_ref(mr);
989err2:
990 return ERR_PTR(err);
991}
992
c4367a26 993static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
8700e3e7
MS
994{
995 struct rxe_mem *mr = to_rmr(ibmr);
996
997 mr->state = RXE_MEM_STATE_ZOMBIE;
998 rxe_drop_ref(mr->pd);
999 rxe_drop_index(mr);
1000 rxe_drop_ref(mr);
1001 return 0;
1002}
1003
c4367a26
SR
1004static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1005 u32 max_num_sg, struct ib_udata *udata)
8700e3e7
MS
1006{
1007 struct rxe_dev *rxe = to_rdev(ibpd->device);
1008 struct rxe_pd *pd = to_rpd(ibpd);
1009 struct rxe_mem *mr;
1010 int err;
1011
1012 if (mr_type != IB_MR_TYPE_MEM_REG)
1013 return ERR_PTR(-EINVAL);
1014
1015 mr = rxe_alloc(&rxe->mr_pool);
1016 if (!mr) {
1017 err = -ENOMEM;
1018 goto err1;
1019 }
1020
1021 rxe_add_index(mr);
1022
1023 rxe_add_ref(pd);
1024
e12ee8ce 1025 err = rxe_mem_init_fast(pd, max_num_sg, mr);
8700e3e7
MS
1026 if (err)
1027 goto err2;
1028
1029 return &mr->ibmr;
1030
1031err2:
1032 rxe_drop_ref(pd);
1033 rxe_drop_index(mr);
1034 rxe_drop_ref(mr);
1035err1:
1036 return ERR_PTR(err);
1037}
1038
1039static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1040{
1041 struct rxe_mem *mr = to_rmr(ibmr);
1042 struct rxe_map *map;
1043 struct rxe_phys_buf *buf;
1044
1045 if (unlikely(mr->nbuf == mr->num_buf))
1046 return -ENOMEM;
1047
1048 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1049 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1050
1051 buf->addr = addr;
1052 buf->size = ibmr->page_size;
1053 mr->nbuf++;
1054
1055 return 0;
1056}
1057
e404f945
PP
1058static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1059 int sg_nents, unsigned int *sg_offset)
8700e3e7
MS
1060{
1061 struct rxe_mem *mr = to_rmr(ibmr);
1062 int n;
1063
1064 mr->nbuf = 0;
1065
1066 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1067
1068 mr->va = ibmr->iova;
1069 mr->iova = ibmr->iova;
1070 mr->length = ibmr->length;
1071 mr->page_shift = ilog2(ibmr->page_size);
1072 mr->page_mask = ibmr->page_size - 1;
1073 mr->offset = mr->iova & mr->page_mask;
1074
1075 return n;
1076}
1077
1078static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1079{
1080 int err;
1081 struct rxe_dev *rxe = to_rdev(ibqp->device);
1082 struct rxe_qp *qp = to_rqp(ibqp);
1083 struct rxe_mc_grp *grp;
1084
1085 /* takes a ref on grp if successful */
1086 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1087 if (err)
1088 return err;
1089
1090 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1091
1092 rxe_drop_ref(grp);
1093 return err;
1094}
1095
1096static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1097{
1098 struct rxe_dev *rxe = to_rdev(ibqp->device);
1099 struct rxe_qp *qp = to_rqp(ibqp);
1100
1101 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1102}
1103
c05d2664
KH
1104static ssize_t parent_show(struct device *device,
1105 struct device_attribute *attr, char *buf)
8700e3e7 1106{
54747231
PP
1107 struct rxe_dev *rxe =
1108 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
8700e3e7 1109
839f5ac0 1110 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
8700e3e7
MS
1111}
1112
c05d2664 1113static DEVICE_ATTR_RO(parent);
8700e3e7 1114
508a523f
PP
1115static struct attribute *rxe_dev_attributes[] = {
1116 &dev_attr_parent.attr,
1117 NULL
1118};
1119
1120static const struct attribute_group rxe_attr_group = {
1121 .attrs = rxe_dev_attributes,
8700e3e7
MS
1122};
1123
ca22354b
JG
1124static int rxe_enable_driver(struct ib_device *ib_dev)
1125{
1126 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1127
1128 rxe_set_port_state(rxe);
1129 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1130 return 0;
1131}
1132
573efc4b
KH
1133static const struct ib_device_ops rxe_dev_ops = {
1134 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1135 .alloc_mr = rxe_alloc_mr,
1136 .alloc_pd = rxe_alloc_pd,
1137 .alloc_ucontext = rxe_alloc_ucontext,
1138 .attach_mcast = rxe_attach_mcast,
1139 .create_ah = rxe_create_ah,
1140 .create_cq = rxe_create_cq,
1141 .create_qp = rxe_create_qp,
1142 .create_srq = rxe_create_srq,
c367074b 1143 .dealloc_driver = rxe_dealloc,
573efc4b
KH
1144 .dealloc_pd = rxe_dealloc_pd,
1145 .dealloc_ucontext = rxe_dealloc_ucontext,
1146 .dereg_mr = rxe_dereg_mr,
1147 .destroy_ah = rxe_destroy_ah,
1148 .destroy_cq = rxe_destroy_cq,
1149 .destroy_qp = rxe_destroy_qp,
1150 .destroy_srq = rxe_destroy_srq,
1151 .detach_mcast = rxe_detach_mcast,
ca22354b 1152 .enable_driver = rxe_enable_driver,
573efc4b
KH
1153 .get_dma_mr = rxe_get_dma_mr,
1154 .get_hw_stats = rxe_ib_get_hw_stats,
1155 .get_link_layer = rxe_get_link_layer,
573efc4b
KH
1156 .get_port_immutable = rxe_port_immutable,
1157 .map_mr_sg = rxe_map_mr_sg,
1158 .mmap = rxe_mmap,
1159 .modify_ah = rxe_modify_ah,
1160 .modify_device = rxe_modify_device,
1161 .modify_port = rxe_modify_port,
1162 .modify_qp = rxe_modify_qp,
1163 .modify_srq = rxe_modify_srq,
1164 .peek_cq = rxe_peek_cq,
1165 .poll_cq = rxe_poll_cq,
1166 .post_recv = rxe_post_recv,
1167 .post_send = rxe_post_send,
1168 .post_srq_recv = rxe_post_srq_recv,
1169 .query_ah = rxe_query_ah,
1170 .query_device = rxe_query_device,
1171 .query_pkey = rxe_query_pkey,
1172 .query_port = rxe_query_port,
1173 .query_qp = rxe_query_qp,
1174 .query_srq = rxe_query_srq,
1175 .reg_user_mr = rxe_reg_user_mr,
1176 .req_notify_cq = rxe_req_notify_cq,
1177 .resize_cq = rxe_resize_cq,
21a428a0 1178 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
a2a074ef 1179 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
573efc4b
KH
1180};
1181
66920e1b 1182int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
8700e3e7
MS
1183{
1184 int err;
8700e3e7 1185 struct ib_device *dev = &rxe->ib_dev;
3192c53e 1186 struct crypto_shash *tfm;
8700e3e7 1187
8700e3e7
MS
1188 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1189
1190 dev->owner = THIS_MODULE;
1191 dev->node_type = RDMA_NODE_IB_CA;
1192 dev->phys_port_cnt = 1;
67cf3623 1193 dev->num_comp_vectors = num_possible_cpus();
85e9f1db 1194 dev->dev.parent = rxe_dma_device(rxe);
8700e3e7 1195 dev->local_dma_lkey = 0;
4d6f2859
YS
1196 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1197 rxe->ndev->dev_addr);
0bbb3b74 1198 dev->dev.dma_ops = &dma_virt_ops;
56012e1c 1199 dma_coerce_mask_and_coherent(&dev->dev,
efc365e7 1200 dma_get_required_mask(&dev->dev));
8700e3e7
MS
1201
1202 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1203 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1204 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1205 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1206 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1207 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1208 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1209 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1210 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1211 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1212 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1213 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1214 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1215 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1216 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1217 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1218 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1219 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1220 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1221 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1222 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1223 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1224 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1225 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1226 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1227 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1228 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1229 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1230 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1231 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1232 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1233 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1234 ;
1235
573efc4b 1236 ib_set_device_ops(dev, &rxe_dev_ops);
4c173f59
JG
1237 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1238 if (err)
1239 return err;
8700e3e7 1240
3192c53e
TB
1241 tfm = crypto_alloc_shash("crc32", 0, 0);
1242 if (IS_ERR(tfm)) {
27b0b832 1243 pr_err("failed to allocate crc algorithm err:%ld\n",
3192c53e
TB
1244 PTR_ERR(tfm));
1245 return PTR_ERR(tfm);
cee2688e 1246 }
3192c53e 1247 rxe->tfm = tfm;
cee2688e 1248
508a523f 1249 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
0ede73bc 1250 dev->driver_id = RDMA_DRIVER_RXE;
66920e1b 1251 err = ib_register_device(dev, ibdev_name);
c367074b 1252 if (err)
61013828 1253 pr_warn("%s failed with error %d\n", __func__, err);
cee2688e 1254
ca22354b
JG
1255 /*
1256 * Note that rxe may be invalid at this point if another thread
1257 * unregistered it.
1258 */
8700e3e7
MS
1259 return err;
1260}