]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/qp.c
IB/core: Add umem function to read data from user-space
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / qp.c
CommitLineData
e126ba97
EC
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
35#include "mlx5_ib.h"
36#include "user.h"
37
38/* not supported currently */
39static int wq_signature;
40
41enum {
42 MLX5_IB_ACK_REQ_FREQ = 8,
43};
44
45enum {
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
50};
51
52enum {
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
55};
56
57static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
71};
72
e126ba97
EC
73
74static int is_qp0(enum ib_qp_type qp_type)
75{
76 return qp_type == IB_QPT_SMI;
77}
78
79static int is_qp1(enum ib_qp_type qp_type)
80{
81 return qp_type == IB_QPT_GSI;
82}
83
84static int is_sqp(enum ib_qp_type qp_type)
85{
86 return is_qp0(qp_type) || is_qp1(qp_type);
87}
88
89static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
90{
91 return mlx5_buf_offset(&qp->buf, offset);
92}
93
94static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
95{
96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
97}
98
99void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
100{
101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
102}
103
104static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
105{
106 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
107 struct ib_event event;
108
109 if (type == MLX5_EVENT_TYPE_PATH_MIG)
110 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
111
112 if (ibqp->event_handler) {
113 event.device = ibqp->device;
114 event.element.qp = ibqp;
115 switch (type) {
116 case MLX5_EVENT_TYPE_PATH_MIG:
117 event.event = IB_EVENT_PATH_MIG;
118 break;
119 case MLX5_EVENT_TYPE_COMM_EST:
120 event.event = IB_EVENT_COMM_EST;
121 break;
122 case MLX5_EVENT_TYPE_SQ_DRAINED:
123 event.event = IB_EVENT_SQ_DRAINED;
124 break;
125 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
126 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
127 break;
128 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
129 event.event = IB_EVENT_QP_FATAL;
130 break;
131 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
132 event.event = IB_EVENT_PATH_MIG_ERR;
133 break;
134 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
135 event.event = IB_EVENT_QP_REQ_ERR;
136 break;
137 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
138 event.event = IB_EVENT_QP_ACCESS_ERR;
139 break;
140 default:
141 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
142 return;
143 }
144
145 ibqp->event_handler(&event, ibqp->qp_context);
146 }
147}
148
149static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
150 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
151{
c7a08ac7 152 struct mlx5_general_caps *gen;
e126ba97
EC
153 int wqe_size;
154 int wq_size;
155
c7a08ac7 156 gen = &dev->mdev->caps.gen;
e126ba97 157 /* Sanity check RQ size before proceeding */
c7a08ac7 158 if (cap->max_recv_wr > gen->max_wqes)
e126ba97
EC
159 return -EINVAL;
160
161 if (!has_rq) {
162 qp->rq.max_gs = 0;
163 qp->rq.wqe_cnt = 0;
164 qp->rq.wqe_shift = 0;
165 } else {
166 if (ucmd) {
167 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
168 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
169 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
170 qp->rq.max_post = qp->rq.wqe_cnt;
171 } else {
172 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
173 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
174 wqe_size = roundup_pow_of_two(wqe_size);
175 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
176 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
177 qp->rq.wqe_cnt = wq_size / wqe_size;
c7a08ac7 178 if (wqe_size > gen->max_rq_desc_sz) {
e126ba97
EC
179 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
180 wqe_size,
c7a08ac7 181 gen->max_rq_desc_sz);
e126ba97
EC
182 return -EINVAL;
183 }
184 qp->rq.wqe_shift = ilog2(wqe_size);
185 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
186 qp->rq.max_post = qp->rq.wqe_cnt;
187 }
188 }
189
190 return 0;
191}
192
193static int sq_overhead(enum ib_qp_type qp_type)
194{
618af384 195 int size = 0;
e126ba97
EC
196
197 switch (qp_type) {
198 case IB_QPT_XRC_INI:
b125a54b 199 size += sizeof(struct mlx5_wqe_xrc_seg);
e126ba97
EC
200 /* fall through */
201 case IB_QPT_RC:
202 size += sizeof(struct mlx5_wqe_ctrl_seg) +
203 sizeof(struct mlx5_wqe_atomic_seg) +
204 sizeof(struct mlx5_wqe_raddr_seg);
205 break;
206
b125a54b
EC
207 case IB_QPT_XRC_TGT:
208 return 0;
209
e126ba97 210 case IB_QPT_UC:
b125a54b 211 size += sizeof(struct mlx5_wqe_ctrl_seg) +
9e65dc37
EC
212 sizeof(struct mlx5_wqe_raddr_seg) +
213 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
214 sizeof(struct mlx5_mkey_seg);
e126ba97
EC
215 break;
216
217 case IB_QPT_UD:
218 case IB_QPT_SMI:
219 case IB_QPT_GSI:
b125a54b 220 size += sizeof(struct mlx5_wqe_ctrl_seg) +
e126ba97
EC
221 sizeof(struct mlx5_wqe_datagram_seg);
222 break;
223
224 case MLX5_IB_QPT_REG_UMR:
b125a54b 225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
e126ba97
EC
226 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
227 sizeof(struct mlx5_mkey_seg);
228 break;
229
230 default:
231 return -EINVAL;
232 }
233
234 return size;
235}
236
237static int calc_send_wqe(struct ib_qp_init_attr *attr)
238{
239 int inl_size = 0;
240 int size;
241
242 size = sq_overhead(attr->qp_type);
243 if (size < 0)
244 return size;
245
246 if (attr->cap.max_inline_data) {
247 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
248 attr->cap.max_inline_data;
249 }
250
251 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
e1e66cc2
SG
252 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
253 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
254 return MLX5_SIG_WQE_SIZE;
255 else
256 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
e126ba97
EC
257}
258
259static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
260 struct mlx5_ib_qp *qp)
261{
c7a08ac7 262 struct mlx5_general_caps *gen;
e126ba97
EC
263 int wqe_size;
264 int wq_size;
265
c7a08ac7 266 gen = &dev->mdev->caps.gen;
e126ba97
EC
267 if (!attr->cap.max_send_wr)
268 return 0;
269
270 wqe_size = calc_send_wqe(attr);
271 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
272 if (wqe_size < 0)
273 return wqe_size;
274
c7a08ac7 275 if (wqe_size > gen->max_sq_desc_sz) {
b125a54b 276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
c7a08ac7 277 wqe_size, gen->max_sq_desc_sz);
e126ba97
EC
278 return -EINVAL;
279 }
280
281 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
282 sizeof(struct mlx5_wqe_inline_seg);
283 attr->cap.max_inline_data = qp->max_inline_data;
284
e1e66cc2
SG
285 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
286 qp->signature_en = true;
287
e126ba97
EC
288 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
289 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
c7a08ac7 290 if (qp->sq.wqe_cnt > gen->max_wqes) {
b125a54b 291 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
c7a08ac7 292 qp->sq.wqe_cnt, gen->max_wqes);
b125a54b
EC
293 return -ENOMEM;
294 }
e126ba97
EC
295 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
296 qp->sq.max_gs = attr->cap.max_send_sge;
b125a54b
EC
297 qp->sq.max_post = wq_size / wqe_size;
298 attr->cap.max_send_wr = qp->sq.max_post;
e126ba97
EC
299
300 return wq_size;
301}
302
303static int set_user_buf_size(struct mlx5_ib_dev *dev,
304 struct mlx5_ib_qp *qp,
305 struct mlx5_ib_create_qp *ucmd)
306{
c7a08ac7 307 struct mlx5_general_caps *gen;
e126ba97
EC
308 int desc_sz = 1 << qp->sq.wqe_shift;
309
c7a08ac7
EC
310 gen = &dev->mdev->caps.gen;
311 if (desc_sz > gen->max_sq_desc_sz) {
e126ba97 312 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
c7a08ac7 313 desc_sz, gen->max_sq_desc_sz);
e126ba97
EC
314 return -EINVAL;
315 }
316
317 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
318 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
319 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
320 return -EINVAL;
321 }
322
323 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
324
c7a08ac7 325 if (qp->sq.wqe_cnt > gen->max_wqes) {
e126ba97 326 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
c7a08ac7 327 qp->sq.wqe_cnt, gen->max_wqes);
e126ba97
EC
328 return -EINVAL;
329 }
330
331 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
332 (qp->sq.wqe_cnt << 6);
333
334 return 0;
335}
336
337static int qp_has_rq(struct ib_qp_init_attr *attr)
338{
339 if (attr->qp_type == IB_QPT_XRC_INI ||
340 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
341 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
342 !attr->cap.max_recv_wr)
343 return 0;
344
345 return 1;
346}
347
c1be5232
EC
348static int first_med_uuar(void)
349{
350 return 1;
351}
352
353static int next_uuar(int n)
354{
355 n++;
356
357 while (((n % 4) & 2))
358 n++;
359
360 return n;
361}
362
363static int num_med_uuar(struct mlx5_uuar_info *uuari)
364{
365 int n;
366
367 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
368 uuari->num_low_latency_uuars - 1;
369
370 return n >= 0 ? n : 0;
371}
372
373static int max_uuari(struct mlx5_uuar_info *uuari)
374{
375 return uuari->num_uars * 4;
376}
377
378static int first_hi_uuar(struct mlx5_uuar_info *uuari)
379{
380 int med;
381 int i;
382 int t;
383
384 med = num_med_uuar(uuari);
385 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
386 t++;
387 if (t == med)
388 return next_uuar(i);
389 }
390
391 return 0;
392}
393
e126ba97
EC
394static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
395{
e126ba97
EC
396 int i;
397
c1be5232 398 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
e126ba97
EC
399 if (!test_bit(i, uuari->bitmap)) {
400 set_bit(i, uuari->bitmap);
401 uuari->count[i]++;
402 return i;
403 }
404 }
405
406 return -ENOMEM;
407}
408
409static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
410{
c1be5232 411 int minidx = first_med_uuar();
e126ba97
EC
412 int i;
413
c1be5232 414 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
e126ba97
EC
415 if (uuari->count[i] < uuari->count[minidx])
416 minidx = i;
417 }
418
419 uuari->count[minidx]++;
420 return minidx;
421}
422
423static int alloc_uuar(struct mlx5_uuar_info *uuari,
424 enum mlx5_ib_latency_class lat)
425{
426 int uuarn = -EINVAL;
427
428 mutex_lock(&uuari->lock);
429 switch (lat) {
430 case MLX5_IB_LATENCY_CLASS_LOW:
431 uuarn = 0;
432 uuari->count[uuarn]++;
433 break;
434
435 case MLX5_IB_LATENCY_CLASS_MEDIUM:
78c0f98c
EC
436 if (uuari->ver < 2)
437 uuarn = -ENOMEM;
438 else
439 uuarn = alloc_med_class_uuar(uuari);
e126ba97
EC
440 break;
441
442 case MLX5_IB_LATENCY_CLASS_HIGH:
78c0f98c
EC
443 if (uuari->ver < 2)
444 uuarn = -ENOMEM;
445 else
446 uuarn = alloc_high_class_uuar(uuari);
e126ba97
EC
447 break;
448
449 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
450 uuarn = 2;
451 break;
452 }
453 mutex_unlock(&uuari->lock);
454
455 return uuarn;
456}
457
458static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
459{
460 clear_bit(uuarn, uuari->bitmap);
461 --uuari->count[uuarn];
462}
463
464static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
465{
466 clear_bit(uuarn, uuari->bitmap);
467 --uuari->count[uuarn];
468}
469
470static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
471{
472 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
473 int high_uuar = nuuars - uuari->num_low_latency_uuars;
474
475 mutex_lock(&uuari->lock);
476 if (uuarn == 0) {
477 --uuari->count[uuarn];
478 goto out;
479 }
480
481 if (uuarn < high_uuar) {
482 free_med_class_uuar(uuari, uuarn);
483 goto out;
484 }
485
486 free_high_class_uuar(uuari, uuarn);
487
488out:
489 mutex_unlock(&uuari->lock);
490}
491
492static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
493{
494 switch (state) {
495 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
496 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
497 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
498 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
499 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
500 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
501 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
502 default: return -1;
503 }
504}
505
506static int to_mlx5_st(enum ib_qp_type type)
507{
508 switch (type) {
509 case IB_QPT_RC: return MLX5_QP_ST_RC;
510 case IB_QPT_UC: return MLX5_QP_ST_UC;
511 case IB_QPT_UD: return MLX5_QP_ST_UD;
512 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
513 case IB_QPT_XRC_INI:
514 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
515 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
516 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
517 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
518 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
519 case IB_QPT_RAW_PACKET:
520 case IB_QPT_MAX:
521 default: return -EINVAL;
522 }
523}
524
525static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
526{
527 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
528}
529
530static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
531 struct mlx5_ib_qp *qp, struct ib_udata *udata,
532 struct mlx5_create_qp_mbox_in **in,
533 struct mlx5_ib_create_qp_resp *resp, int *inlen)
534{
535 struct mlx5_ib_ucontext *context;
536 struct mlx5_ib_create_qp ucmd;
9e9c47d0 537 int page_shift = 0;
e126ba97
EC
538 int uar_index;
539 int npages;
9e9c47d0 540 u32 offset = 0;
e126ba97 541 int uuarn;
9e9c47d0 542 int ncont = 0;
e126ba97
EC
543 int err;
544
545 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
546 if (err) {
547 mlx5_ib_dbg(dev, "copy failed\n");
548 return err;
549 }
550
551 context = to_mucontext(pd->uobject->context);
552 /*
553 * TBD: should come from the verbs when we have the API
554 */
555 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
556 if (uuarn < 0) {
557 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
c1be5232
EC
558 mlx5_ib_dbg(dev, "reverting to medium latency\n");
559 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
e126ba97 560 if (uuarn < 0) {
c1be5232
EC
561 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
562 mlx5_ib_dbg(dev, "reverting to high latency\n");
563 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
564 if (uuarn < 0) {
565 mlx5_ib_warn(dev, "uuar allocation failed\n");
566 return uuarn;
567 }
e126ba97
EC
568 }
569 }
570
571 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
572 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
573
48fea837
HE
574 qp->rq.offset = 0;
575 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
576 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
577
e126ba97
EC
578 err = set_user_buf_size(dev, qp, &ucmd);
579 if (err)
580 goto err_uuar;
581
9e9c47d0
EC
582 if (ucmd.buf_addr && qp->buf_size) {
583 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
584 qp->buf_size, 0, 0);
585 if (IS_ERR(qp->umem)) {
586 mlx5_ib_dbg(dev, "umem_get failed\n");
587 err = PTR_ERR(qp->umem);
588 goto err_uuar;
589 }
590 } else {
591 qp->umem = NULL;
e126ba97
EC
592 }
593
9e9c47d0
EC
594 if (qp->umem) {
595 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
596 &ncont, NULL);
597 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
598 if (err) {
599 mlx5_ib_warn(dev, "bad offset\n");
600 goto err_umem;
601 }
602 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
603 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
e126ba97 604 }
e126ba97
EC
605
606 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
607 *in = mlx5_vzalloc(*inlen);
608 if (!*in) {
609 err = -ENOMEM;
610 goto err_umem;
611 }
9e9c47d0
EC
612 if (qp->umem)
613 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
e126ba97 614 (*in)->ctx.log_pg_sz_remote_qpn =
1b77d2bd 615 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
e126ba97
EC
616 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
617
618 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
619 resp->uuar_index = uuarn;
620 qp->uuarn = uuarn;
621
622 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
623 if (err) {
624 mlx5_ib_dbg(dev, "map failed\n");
625 goto err_free;
626 }
627
628 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
629 if (err) {
630 mlx5_ib_dbg(dev, "copy failed\n");
631 goto err_unmap;
632 }
633 qp->create_type = MLX5_QP_USER;
634
635 return 0;
636
637err_unmap:
638 mlx5_ib_db_unmap_user(context, &qp->db);
639
640err_free:
479163f4 641 kvfree(*in);
e126ba97
EC
642
643err_umem:
9e9c47d0
EC
644 if (qp->umem)
645 ib_umem_release(qp->umem);
e126ba97
EC
646
647err_uuar:
648 free_uuar(&context->uuari, uuarn);
649 return err;
650}
651
652static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
653{
654 struct mlx5_ib_ucontext *context;
655
656 context = to_mucontext(pd->uobject->context);
657 mlx5_ib_db_unmap_user(context, &qp->db);
9e9c47d0
EC
658 if (qp->umem)
659 ib_umem_release(qp->umem);
e126ba97
EC
660 free_uuar(&context->uuari, qp->uuarn);
661}
662
663static int create_kernel_qp(struct mlx5_ib_dev *dev,
664 struct ib_qp_init_attr *init_attr,
665 struct mlx5_ib_qp *qp,
666 struct mlx5_create_qp_mbox_in **in, int *inlen)
667{
668 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
669 struct mlx5_uuar_info *uuari;
670 int uar_index;
671 int uuarn;
672 int err;
673
9603b61d 674 uuari = &dev->mdev->priv.uuari;
652c1a05 675 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
1a4c3a3d 676 return -EINVAL;
e126ba97
EC
677
678 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
679 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
680
681 uuarn = alloc_uuar(uuari, lc);
682 if (uuarn < 0) {
683 mlx5_ib_dbg(dev, "\n");
684 return -ENOMEM;
685 }
686
687 qp->bf = &uuari->bfs[uuarn];
688 uar_index = qp->bf->uar->index;
689
690 err = calc_sq_size(dev, init_attr, qp);
691 if (err < 0) {
692 mlx5_ib_dbg(dev, "err %d\n", err);
693 goto err_uuar;
694 }
695
696 qp->rq.offset = 0;
697 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
698 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
699
9603b61d 700 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
e126ba97
EC
701 if (err) {
702 mlx5_ib_dbg(dev, "err %d\n", err);
703 goto err_uuar;
704 }
705
706 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
707 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
708 *in = mlx5_vzalloc(*inlen);
709 if (!*in) {
710 err = -ENOMEM;
711 goto err_buf;
712 }
713 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
1b77d2bd
EC
714 (*in)->ctx.log_pg_sz_remote_qpn =
715 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
e126ba97
EC
716 /* Set "fast registration enabled" for all kernel QPs */
717 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
718 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
719
720 mlx5_fill_page_array(&qp->buf, (*in)->pas);
721
9603b61d 722 err = mlx5_db_alloc(dev->mdev, &qp->db);
e126ba97
EC
723 if (err) {
724 mlx5_ib_dbg(dev, "err %d\n", err);
725 goto err_free;
726 }
727
728 qp->db.db[0] = 0;
729 qp->db.db[1] = 0;
730
731 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
732 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
733 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
734 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
735 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
736
737 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
738 !qp->sq.w_list || !qp->sq.wqe_head) {
739 err = -ENOMEM;
740 goto err_wrid;
741 }
742 qp->create_type = MLX5_QP_KERNEL;
743
744 return 0;
745
746err_wrid:
9603b61d 747 mlx5_db_free(dev->mdev, &qp->db);
e126ba97
EC
748 kfree(qp->sq.wqe_head);
749 kfree(qp->sq.w_list);
750 kfree(qp->sq.wrid);
751 kfree(qp->sq.wr_data);
752 kfree(qp->rq.wrid);
753
754err_free:
479163f4 755 kvfree(*in);
e126ba97
EC
756
757err_buf:
9603b61d 758 mlx5_buf_free(dev->mdev, &qp->buf);
e126ba97
EC
759
760err_uuar:
9603b61d 761 free_uuar(&dev->mdev->priv.uuari, uuarn);
e126ba97
EC
762 return err;
763}
764
765static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
766{
9603b61d 767 mlx5_db_free(dev->mdev, &qp->db);
e126ba97
EC
768 kfree(qp->sq.wqe_head);
769 kfree(qp->sq.w_list);
770 kfree(qp->sq.wrid);
771 kfree(qp->sq.wr_data);
772 kfree(qp->rq.wrid);
9603b61d
JM
773 mlx5_buf_free(dev->mdev, &qp->buf);
774 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
e126ba97
EC
775}
776
777static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
778{
779 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
780 (attr->qp_type == IB_QPT_XRC_INI))
781 return cpu_to_be32(MLX5_SRQ_RQ);
782 else if (!qp->has_rq)
783 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
784 else
785 return cpu_to_be32(MLX5_NON_ZERO_RQ);
786}
787
788static int is_connected(enum ib_qp_type qp_type)
789{
790 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
791 return 1;
792
793 return 0;
794}
795
796static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
797 struct ib_qp_init_attr *init_attr,
798 struct ib_udata *udata, struct mlx5_ib_qp *qp)
799{
800 struct mlx5_ib_resources *devr = &dev->devr;
801 struct mlx5_ib_create_qp_resp resp;
802 struct mlx5_create_qp_mbox_in *in;
c7a08ac7 803 struct mlx5_general_caps *gen;
e126ba97
EC
804 struct mlx5_ib_create_qp ucmd;
805 int inlen = sizeof(*in);
806 int err;
807
c7a08ac7 808 gen = &dev->mdev->caps.gen;
e126ba97
EC
809 mutex_init(&qp->mutex);
810 spin_lock_init(&qp->sq.lock);
811 spin_lock_init(&qp->rq.lock);
812
f360d88a 813 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
c7a08ac7 814 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
f360d88a
EC
815 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
816 return -EINVAL;
817 } else {
818 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
819 }
820 }
821
e126ba97
EC
822 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
823 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
824
825 if (pd && pd->uobject) {
826 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
827 mlx5_ib_dbg(dev, "copy failed\n");
828 return -EFAULT;
829 }
830
831 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
832 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
833 } else {
834 qp->wq_sig = !!wq_signature;
835 }
836
837 qp->has_rq = qp_has_rq(init_attr);
838 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
839 qp, (pd && pd->uobject) ? &ucmd : NULL);
840 if (err) {
841 mlx5_ib_dbg(dev, "err %d\n", err);
842 return err;
843 }
844
845 if (pd) {
846 if (pd->uobject) {
847 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
848 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
849 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
850 mlx5_ib_dbg(dev, "invalid rq params\n");
851 return -EINVAL;
852 }
c7a08ac7 853 if (ucmd.sq_wqe_count > gen->max_wqes) {
e126ba97 854 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
c7a08ac7 855 ucmd.sq_wqe_count, gen->max_wqes);
e126ba97
EC
856 return -EINVAL;
857 }
858 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
859 if (err)
860 mlx5_ib_dbg(dev, "err %d\n", err);
861 } else {
862 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
863 if (err)
864 mlx5_ib_dbg(dev, "err %d\n", err);
865 else
866 qp->pa_lkey = to_mpd(pd)->pa_lkey;
867 }
868
869 if (err)
870 return err;
871 } else {
872 in = mlx5_vzalloc(sizeof(*in));
873 if (!in)
874 return -ENOMEM;
875
876 qp->create_type = MLX5_QP_EMPTY;
877 }
878
879 if (is_sqp(init_attr->qp_type))
880 qp->port = init_attr->port_num;
881
882 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
883 MLX5_QP_PM_MIGRATED << 11);
884
885 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
886 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
887 else
888 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
889
890 if (qp->wq_sig)
891 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
892
f360d88a
EC
893 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
894 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
895
e126ba97
EC
896 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
897 int rcqe_sz;
898 int scqe_sz;
899
900 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
901 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
902
903 if (rcqe_sz == 128)
904 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
905 else
906 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
907
908 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
909 if (scqe_sz == 128)
910 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
911 else
912 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
913 }
914 }
915
916 if (qp->rq.wqe_cnt) {
917 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
918 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
919 }
920
921 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
922
923 if (qp->sq.wqe_cnt)
924 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
925 else
926 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
927
928 /* Set default resources */
929 switch (init_attr->qp_type) {
930 case IB_QPT_XRC_TGT:
931 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
932 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
933 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
934 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
935 break;
936 case IB_QPT_XRC_INI:
937 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
938 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
939 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
940 break;
941 default:
942 if (init_attr->srq) {
943 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
944 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
945 } else {
946 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
947 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
948 }
949 }
950
951 if (init_attr->send_cq)
952 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
953
954 if (init_attr->recv_cq)
955 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
956
957 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
958
9603b61d 959 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
e126ba97
EC
960 if (err) {
961 mlx5_ib_dbg(dev, "create qp failed\n");
962 goto err_create;
963 }
964
479163f4 965 kvfree(in);
e126ba97
EC
966 /* Hardware wants QPN written in big-endian order (after
967 * shifting) for send doorbell. Precompute this value to save
968 * a little bit when posting sends.
969 */
970 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
971
972 qp->mqp.event = mlx5_ib_qp_event;
973
974 return 0;
975
976err_create:
977 if (qp->create_type == MLX5_QP_USER)
978 destroy_qp_user(pd, qp);
979 else if (qp->create_type == MLX5_QP_KERNEL)
980 destroy_qp_kernel(dev, qp);
981
479163f4 982 kvfree(in);
e126ba97
EC
983 return err;
984}
985
986static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
987 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
988{
989 if (send_cq) {
990 if (recv_cq) {
991 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
992 spin_lock_irq(&send_cq->lock);
993 spin_lock_nested(&recv_cq->lock,
994 SINGLE_DEPTH_NESTING);
995 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
996 spin_lock_irq(&send_cq->lock);
997 __acquire(&recv_cq->lock);
998 } else {
999 spin_lock_irq(&recv_cq->lock);
1000 spin_lock_nested(&send_cq->lock,
1001 SINGLE_DEPTH_NESTING);
1002 }
1003 } else {
1004 spin_lock_irq(&send_cq->lock);
6a4f139a 1005 __acquire(&recv_cq->lock);
e126ba97
EC
1006 }
1007 } else if (recv_cq) {
1008 spin_lock_irq(&recv_cq->lock);
6a4f139a
EC
1009 __acquire(&send_cq->lock);
1010 } else {
1011 __acquire(&send_cq->lock);
1012 __acquire(&recv_cq->lock);
e126ba97
EC
1013 }
1014}
1015
1016static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1017 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1018{
1019 if (send_cq) {
1020 if (recv_cq) {
1021 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1022 spin_unlock(&recv_cq->lock);
1023 spin_unlock_irq(&send_cq->lock);
1024 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1025 __release(&recv_cq->lock);
1026 spin_unlock_irq(&send_cq->lock);
1027 } else {
1028 spin_unlock(&send_cq->lock);
1029 spin_unlock_irq(&recv_cq->lock);
1030 }
1031 } else {
6a4f139a 1032 __release(&recv_cq->lock);
e126ba97
EC
1033 spin_unlock_irq(&send_cq->lock);
1034 }
1035 } else if (recv_cq) {
6a4f139a 1036 __release(&send_cq->lock);
e126ba97 1037 spin_unlock_irq(&recv_cq->lock);
6a4f139a
EC
1038 } else {
1039 __release(&recv_cq->lock);
1040 __release(&send_cq->lock);
e126ba97
EC
1041 }
1042}
1043
1044static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1045{
1046 return to_mpd(qp->ibqp.pd);
1047}
1048
1049static void get_cqs(struct mlx5_ib_qp *qp,
1050 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1051{
1052 switch (qp->ibqp.qp_type) {
1053 case IB_QPT_XRC_TGT:
1054 *send_cq = NULL;
1055 *recv_cq = NULL;
1056 break;
1057 case MLX5_IB_QPT_REG_UMR:
1058 case IB_QPT_XRC_INI:
1059 *send_cq = to_mcq(qp->ibqp.send_cq);
1060 *recv_cq = NULL;
1061 break;
1062
1063 case IB_QPT_SMI:
1064 case IB_QPT_GSI:
1065 case IB_QPT_RC:
1066 case IB_QPT_UC:
1067 case IB_QPT_UD:
1068 case IB_QPT_RAW_IPV6:
1069 case IB_QPT_RAW_ETHERTYPE:
1070 *send_cq = to_mcq(qp->ibqp.send_cq);
1071 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1072 break;
1073
1074 case IB_QPT_RAW_PACKET:
1075 case IB_QPT_MAX:
1076 default:
1077 *send_cq = NULL;
1078 *recv_cq = NULL;
1079 break;
1080 }
1081}
1082
1083static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1084{
1085 struct mlx5_ib_cq *send_cq, *recv_cq;
1086 struct mlx5_modify_qp_mbox_in *in;
1087 int err;
1088
1089 in = kzalloc(sizeof(*in), GFP_KERNEL);
1090 if (!in)
1091 return;
1092 if (qp->state != IB_QPS_RESET)
9603b61d 1093 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
e126ba97
EC
1094 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1095 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1096 qp->mqp.qpn);
1097
1098 get_cqs(qp, &send_cq, &recv_cq);
1099
1100 if (qp->create_type == MLX5_QP_KERNEL) {
1101 mlx5_ib_lock_cqs(send_cq, recv_cq);
1102 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1103 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1104 if (send_cq != recv_cq)
1105 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1106 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1107 }
1108
9603b61d 1109 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
e126ba97
EC
1110 if (err)
1111 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1112 kfree(in);
1113
1114
1115 if (qp->create_type == MLX5_QP_KERNEL)
1116 destroy_qp_kernel(dev, qp);
1117 else if (qp->create_type == MLX5_QP_USER)
1118 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1119}
1120
1121static const char *ib_qp_type_str(enum ib_qp_type type)
1122{
1123 switch (type) {
1124 case IB_QPT_SMI:
1125 return "IB_QPT_SMI";
1126 case IB_QPT_GSI:
1127 return "IB_QPT_GSI";
1128 case IB_QPT_RC:
1129 return "IB_QPT_RC";
1130 case IB_QPT_UC:
1131 return "IB_QPT_UC";
1132 case IB_QPT_UD:
1133 return "IB_QPT_UD";
1134 case IB_QPT_RAW_IPV6:
1135 return "IB_QPT_RAW_IPV6";
1136 case IB_QPT_RAW_ETHERTYPE:
1137 return "IB_QPT_RAW_ETHERTYPE";
1138 case IB_QPT_XRC_INI:
1139 return "IB_QPT_XRC_INI";
1140 case IB_QPT_XRC_TGT:
1141 return "IB_QPT_XRC_TGT";
1142 case IB_QPT_RAW_PACKET:
1143 return "IB_QPT_RAW_PACKET";
1144 case MLX5_IB_QPT_REG_UMR:
1145 return "MLX5_IB_QPT_REG_UMR";
1146 case IB_QPT_MAX:
1147 default:
1148 return "Invalid QP type";
1149 }
1150}
1151
1152struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1153 struct ib_qp_init_attr *init_attr,
1154 struct ib_udata *udata)
1155{
c7a08ac7 1156 struct mlx5_general_caps *gen;
e126ba97
EC
1157 struct mlx5_ib_dev *dev;
1158 struct mlx5_ib_qp *qp;
1159 u16 xrcdn = 0;
1160 int err;
1161
1162 if (pd) {
1163 dev = to_mdev(pd->device);
1164 } else {
1165 /* being cautious here */
1166 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1167 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1168 pr_warn("%s: no PD for transport %s\n", __func__,
1169 ib_qp_type_str(init_attr->qp_type));
1170 return ERR_PTR(-EINVAL);
1171 }
1172 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1173 }
c7a08ac7 1174 gen = &dev->mdev->caps.gen;
e126ba97
EC
1175
1176 switch (init_attr->qp_type) {
1177 case IB_QPT_XRC_TGT:
1178 case IB_QPT_XRC_INI:
c7a08ac7 1179 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
e126ba97
EC
1180 mlx5_ib_dbg(dev, "XRC not supported\n");
1181 return ERR_PTR(-ENOSYS);
1182 }
1183 init_attr->recv_cq = NULL;
1184 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1185 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1186 init_attr->send_cq = NULL;
1187 }
1188
1189 /* fall through */
1190 case IB_QPT_RC:
1191 case IB_QPT_UC:
1192 case IB_QPT_UD:
1193 case IB_QPT_SMI:
1194 case IB_QPT_GSI:
1195 case MLX5_IB_QPT_REG_UMR:
1196 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1197 if (!qp)
1198 return ERR_PTR(-ENOMEM);
1199
1200 err = create_qp_common(dev, pd, init_attr, udata, qp);
1201 if (err) {
1202 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1203 kfree(qp);
1204 return ERR_PTR(err);
1205 }
1206
1207 if (is_qp0(init_attr->qp_type))
1208 qp->ibqp.qp_num = 0;
1209 else if (is_qp1(init_attr->qp_type))
1210 qp->ibqp.qp_num = 1;
1211 else
1212 qp->ibqp.qp_num = qp->mqp.qpn;
1213
1214 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1215 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1216 to_mcq(init_attr->send_cq)->mcq.cqn);
1217
1218 qp->xrcdn = xrcdn;
1219
1220 break;
1221
1222 case IB_QPT_RAW_IPV6:
1223 case IB_QPT_RAW_ETHERTYPE:
1224 case IB_QPT_RAW_PACKET:
1225 case IB_QPT_MAX:
1226 default:
1227 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1228 init_attr->qp_type);
1229 /* Don't support raw QPs */
1230 return ERR_PTR(-EINVAL);
1231 }
1232
1233 return &qp->ibqp;
1234}
1235
1236int mlx5_ib_destroy_qp(struct ib_qp *qp)
1237{
1238 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1239 struct mlx5_ib_qp *mqp = to_mqp(qp);
1240
1241 destroy_qp_common(dev, mqp);
1242
1243 kfree(mqp);
1244
1245 return 0;
1246}
1247
1248static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1249 int attr_mask)
1250{
1251 u32 hw_access_flags = 0;
1252 u8 dest_rd_atomic;
1253 u32 access_flags;
1254
1255 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1256 dest_rd_atomic = attr->max_dest_rd_atomic;
1257 else
1258 dest_rd_atomic = qp->resp_depth;
1259
1260 if (attr_mask & IB_QP_ACCESS_FLAGS)
1261 access_flags = attr->qp_access_flags;
1262 else
1263 access_flags = qp->atomic_rd_en;
1264
1265 if (!dest_rd_atomic)
1266 access_flags &= IB_ACCESS_REMOTE_WRITE;
1267
1268 if (access_flags & IB_ACCESS_REMOTE_READ)
1269 hw_access_flags |= MLX5_QP_BIT_RRE;
1270 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1271 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1272 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1273 hw_access_flags |= MLX5_QP_BIT_RWE;
1274
1275 return cpu_to_be32(hw_access_flags);
1276}
1277
1278enum {
1279 MLX5_PATH_FLAG_FL = 1 << 0,
1280 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1281 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1282};
1283
1284static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1285{
c7a08ac7
EC
1286 struct mlx5_general_caps *gen;
1287
1288 gen = &dev->mdev->caps.gen;
e126ba97
EC
1289 if (rate == IB_RATE_PORT_CURRENT) {
1290 return 0;
1291 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1292 return -EINVAL;
1293 } else {
1294 while (rate != IB_RATE_2_5_GBPS &&
1295 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
c7a08ac7 1296 gen->stat_rate_support))
e126ba97
EC
1297 --rate;
1298 }
1299
1300 return rate + MLX5_STAT_RATE_OFFSET;
1301}
1302
1303static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1304 struct mlx5_qp_path *path, u8 port, int attr_mask,
1305 u32 path_flags, const struct ib_qp_attr *attr)
1306{
c7a08ac7 1307 struct mlx5_general_caps *gen;
e126ba97
EC
1308 int err;
1309
c7a08ac7 1310 gen = &dev->mdev->caps.gen;
e126ba97
EC
1311 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1312 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1313
1314 if (attr_mask & IB_QP_PKEY_INDEX)
1315 path->pkey_index = attr->pkey_index;
1316
1317 path->grh_mlid = ah->src_path_bits & 0x7f;
1318 path->rlid = cpu_to_be16(ah->dlid);
1319
1320 if (ah->ah_flags & IB_AH_GRH) {
2eb7f910 1321 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
f83b4263 1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
2eb7f910 1323 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
f83b4263
EC
1324 return -EINVAL;
1325 }
e126ba97
EC
1326 path->grh_mlid |= 1 << 7;
1327 path->mgid_index = ah->grh.sgid_index;
1328 path->hop_limit = ah->grh.hop_limit;
1329 path->tclass_flowlabel =
1330 cpu_to_be32((ah->grh.traffic_class << 20) |
1331 (ah->grh.flow_label));
1332 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1333 }
1334
1335 err = ib_rate_to_mlx5(dev, ah->static_rate);
1336 if (err < 0)
1337 return err;
1338 path->static_rate = err;
1339 path->port = port;
1340
e126ba97
EC
1341 if (attr_mask & IB_QP_TIMEOUT)
1342 path->ackto_lt = attr->timeout << 3;
1343
1344 path->sl = ah->sl & 0xf;
1345
1346 return 0;
1347}
1348
1349static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1350 [MLX5_QP_STATE_INIT] = {
1351 [MLX5_QP_STATE_INIT] = {
1352 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1353 MLX5_QP_OPTPAR_RAE |
1354 MLX5_QP_OPTPAR_RWE |
1355 MLX5_QP_OPTPAR_PKEY_INDEX |
1356 MLX5_QP_OPTPAR_PRI_PORT,
1357 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1358 MLX5_QP_OPTPAR_PKEY_INDEX |
1359 MLX5_QP_OPTPAR_PRI_PORT,
1360 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1361 MLX5_QP_OPTPAR_Q_KEY |
1362 MLX5_QP_OPTPAR_PRI_PORT,
1363 },
1364 [MLX5_QP_STATE_RTR] = {
1365 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1366 MLX5_QP_OPTPAR_RRE |
1367 MLX5_QP_OPTPAR_RAE |
1368 MLX5_QP_OPTPAR_RWE |
1369 MLX5_QP_OPTPAR_PKEY_INDEX,
1370 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1371 MLX5_QP_OPTPAR_RWE |
1372 MLX5_QP_OPTPAR_PKEY_INDEX,
1373 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1374 MLX5_QP_OPTPAR_Q_KEY,
1375 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1376 MLX5_QP_OPTPAR_Q_KEY,
a4774e90
EC
1377 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1378 MLX5_QP_OPTPAR_RRE |
1379 MLX5_QP_OPTPAR_RAE |
1380 MLX5_QP_OPTPAR_RWE |
1381 MLX5_QP_OPTPAR_PKEY_INDEX,
e126ba97
EC
1382 },
1383 },
1384 [MLX5_QP_STATE_RTR] = {
1385 [MLX5_QP_STATE_RTS] = {
1386 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1387 MLX5_QP_OPTPAR_RRE |
1388 MLX5_QP_OPTPAR_RAE |
1389 MLX5_QP_OPTPAR_RWE |
1390 MLX5_QP_OPTPAR_PM_STATE |
1391 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1392 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1393 MLX5_QP_OPTPAR_RWE |
1394 MLX5_QP_OPTPAR_PM_STATE,
1395 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1396 },
1397 },
1398 [MLX5_QP_STATE_RTS] = {
1399 [MLX5_QP_STATE_RTS] = {
1400 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1401 MLX5_QP_OPTPAR_RAE |
1402 MLX5_QP_OPTPAR_RWE |
1403 MLX5_QP_OPTPAR_RNR_TIMEOUT |
c2a3431e
EC
1404 MLX5_QP_OPTPAR_PM_STATE |
1405 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
e126ba97 1406 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
c2a3431e
EC
1407 MLX5_QP_OPTPAR_PM_STATE |
1408 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
e126ba97
EC
1409 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1410 MLX5_QP_OPTPAR_SRQN |
1411 MLX5_QP_OPTPAR_CQN_RCV,
1412 },
1413 },
1414 [MLX5_QP_STATE_SQER] = {
1415 [MLX5_QP_STATE_RTS] = {
1416 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1417 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
75959f56 1418 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
a4774e90
EC
1419 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1420 MLX5_QP_OPTPAR_RWE |
1421 MLX5_QP_OPTPAR_RAE |
1422 MLX5_QP_OPTPAR_RRE,
e126ba97
EC
1423 },
1424 },
1425};
1426
1427static int ib_nr_to_mlx5_nr(int ib_mask)
1428{
1429 switch (ib_mask) {
1430 case IB_QP_STATE:
1431 return 0;
1432 case IB_QP_CUR_STATE:
1433 return 0;
1434 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1435 return 0;
1436 case IB_QP_ACCESS_FLAGS:
1437 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1438 MLX5_QP_OPTPAR_RAE;
1439 case IB_QP_PKEY_INDEX:
1440 return MLX5_QP_OPTPAR_PKEY_INDEX;
1441 case IB_QP_PORT:
1442 return MLX5_QP_OPTPAR_PRI_PORT;
1443 case IB_QP_QKEY:
1444 return MLX5_QP_OPTPAR_Q_KEY;
1445 case IB_QP_AV:
1446 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1447 MLX5_QP_OPTPAR_PRI_PORT;
1448 case IB_QP_PATH_MTU:
1449 return 0;
1450 case IB_QP_TIMEOUT:
1451 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1452 case IB_QP_RETRY_CNT:
1453 return MLX5_QP_OPTPAR_RETRY_COUNT;
1454 case IB_QP_RNR_RETRY:
1455 return MLX5_QP_OPTPAR_RNR_RETRY;
1456 case IB_QP_RQ_PSN:
1457 return 0;
1458 case IB_QP_MAX_QP_RD_ATOMIC:
1459 return MLX5_QP_OPTPAR_SRA_MAX;
1460 case IB_QP_ALT_PATH:
1461 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1462 case IB_QP_MIN_RNR_TIMER:
1463 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1464 case IB_QP_SQ_PSN:
1465 return 0;
1466 case IB_QP_MAX_DEST_RD_ATOMIC:
1467 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1468 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1469 case IB_QP_PATH_MIG_STATE:
1470 return MLX5_QP_OPTPAR_PM_STATE;
1471 case IB_QP_CAP:
1472 return 0;
1473 case IB_QP_DEST_QPN:
1474 return 0;
1475 }
1476 return 0;
1477}
1478
1479static int ib_mask_to_mlx5_opt(int ib_mask)
1480{
1481 int result = 0;
1482 int i;
1483
1484 for (i = 0; i < 8 * sizeof(int); i++) {
1485 if ((1 << i) & ib_mask)
1486 result |= ib_nr_to_mlx5_nr(1 << i);
1487 }
1488
1489 return result;
1490}
1491
1492static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1493 const struct ib_qp_attr *attr, int attr_mask,
1494 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1495{
1496 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1497 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1498 struct mlx5_ib_cq *send_cq, *recv_cq;
1499 struct mlx5_qp_context *context;
c7a08ac7 1500 struct mlx5_general_caps *gen;
e126ba97
EC
1501 struct mlx5_modify_qp_mbox_in *in;
1502 struct mlx5_ib_pd *pd;
1503 enum mlx5_qp_state mlx5_cur, mlx5_new;
1504 enum mlx5_qp_optpar optpar;
1505 int sqd_event;
1506 int mlx5_st;
1507 int err;
1508
c7a08ac7 1509 gen = &dev->mdev->caps.gen;
e126ba97
EC
1510 in = kzalloc(sizeof(*in), GFP_KERNEL);
1511 if (!in)
1512 return -ENOMEM;
1513
1514 context = &in->ctx;
1515 err = to_mlx5_st(ibqp->qp_type);
1516 if (err < 0)
1517 goto out;
1518
1519 context->flags = cpu_to_be32(err << 16);
1520
1521 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1522 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1523 } else {
1524 switch (attr->path_mig_state) {
1525 case IB_MIG_MIGRATED:
1526 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1527 break;
1528 case IB_MIG_REARM:
1529 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1530 break;
1531 case IB_MIG_ARMED:
1532 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1533 break;
1534 }
1535 }
1536
1537 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1538 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1539 } else if (ibqp->qp_type == IB_QPT_UD ||
1540 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1541 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1542 } else if (attr_mask & IB_QP_PATH_MTU) {
1543 if (attr->path_mtu < IB_MTU_256 ||
1544 attr->path_mtu > IB_MTU_4096) {
1545 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1546 err = -EINVAL;
1547 goto out;
1548 }
c7a08ac7 1549 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
e126ba97
EC
1550 }
1551
1552 if (attr_mask & IB_QP_DEST_QPN)
1553 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1554
1555 if (attr_mask & IB_QP_PKEY_INDEX)
1556 context->pri_path.pkey_index = attr->pkey_index;
1557
1558 /* todo implement counter_index functionality */
1559
1560 if (is_sqp(ibqp->qp_type))
1561 context->pri_path.port = qp->port;
1562
1563 if (attr_mask & IB_QP_PORT)
1564 context->pri_path.port = attr->port_num;
1565
1566 if (attr_mask & IB_QP_AV) {
1567 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1568 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1569 attr_mask, 0, attr);
1570 if (err)
1571 goto out;
1572 }
1573
1574 if (attr_mask & IB_QP_TIMEOUT)
1575 context->pri_path.ackto_lt |= attr->timeout << 3;
1576
1577 if (attr_mask & IB_QP_ALT_PATH) {
1578 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1579 attr->alt_port_num, attr_mask, 0, attr);
1580 if (err)
1581 goto out;
1582 }
1583
1584 pd = get_pd(qp);
1585 get_cqs(qp, &send_cq, &recv_cq);
1586
1587 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1588 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1589 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1590 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1591
1592 if (attr_mask & IB_QP_RNR_RETRY)
1593 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1594
1595 if (attr_mask & IB_QP_RETRY_CNT)
1596 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1597
1598 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1599 if (attr->max_rd_atomic)
1600 context->params1 |=
1601 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1602 }
1603
1604 if (attr_mask & IB_QP_SQ_PSN)
1605 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1606
1607 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1608 if (attr->max_dest_rd_atomic)
1609 context->params2 |=
1610 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1611 }
1612
1613 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1614 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1615
1616 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1617 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1618
1619 if (attr_mask & IB_QP_RQ_PSN)
1620 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1621
1622 if (attr_mask & IB_QP_QKEY)
1623 context->qkey = cpu_to_be32(attr->qkey);
1624
1625 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1626 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1627
1628 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1629 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1630 sqd_event = 1;
1631 else
1632 sqd_event = 0;
1633
1634 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1635 context->sq_crq_size |= cpu_to_be16(1 << 4);
1636
1637
1638 mlx5_cur = to_mlx5_state(cur_state);
1639 mlx5_new = to_mlx5_state(new_state);
1640 mlx5_st = to_mlx5_st(ibqp->qp_type);
07c9113f 1641 if (mlx5_st < 0)
e126ba97
EC
1642 goto out;
1643
1644 optpar = ib_mask_to_mlx5_opt(attr_mask);
1645 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1646 in->optparam = cpu_to_be32(optpar);
9603b61d 1647 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
e126ba97
EC
1648 to_mlx5_state(new_state), in, sqd_event,
1649 &qp->mqp);
1650 if (err)
1651 goto out;
1652
1653 qp->state = new_state;
1654
1655 if (attr_mask & IB_QP_ACCESS_FLAGS)
1656 qp->atomic_rd_en = attr->qp_access_flags;
1657 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1658 qp->resp_depth = attr->max_dest_rd_atomic;
1659 if (attr_mask & IB_QP_PORT)
1660 qp->port = attr->port_num;
1661 if (attr_mask & IB_QP_ALT_PATH)
1662 qp->alt_port = attr->alt_port_num;
1663
1664 /*
1665 * If we moved a kernel QP to RESET, clean up all old CQ
1666 * entries and reinitialize the QP.
1667 */
1668 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1669 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1670 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1671 if (send_cq != recv_cq)
1672 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1673
1674 qp->rq.head = 0;
1675 qp->rq.tail = 0;
1676 qp->sq.head = 0;
1677 qp->sq.tail = 0;
1678 qp->sq.cur_post = 0;
1679 qp->sq.last_poll = 0;
1680 qp->db.db[MLX5_RCV_DBR] = 0;
1681 qp->db.db[MLX5_SND_DBR] = 0;
1682 }
1683
1684out:
1685 kfree(in);
1686 return err;
1687}
1688
1689int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1690 int attr_mask, struct ib_udata *udata)
1691{
1692 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1693 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1694 enum ib_qp_state cur_state, new_state;
c7a08ac7 1695 struct mlx5_general_caps *gen;
e126ba97
EC
1696 int err = -EINVAL;
1697 int port;
1698
c7a08ac7 1699 gen = &dev->mdev->caps.gen;
e126ba97
EC
1700 mutex_lock(&qp->mutex);
1701
1702 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1703 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1704
1705 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
dd5f03be
MB
1706 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1707 IB_LINK_LAYER_UNSPECIFIED))
e126ba97
EC
1708 goto out;
1709
1710 if ((attr_mask & IB_QP_PORT) &&
c7a08ac7 1711 (attr->port_num == 0 || attr->port_num > gen->num_ports))
e126ba97
EC
1712 goto out;
1713
1714 if (attr_mask & IB_QP_PKEY_INDEX) {
1715 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
c7a08ac7 1716 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
e126ba97
EC
1717 goto out;
1718 }
1719
1720 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
c7a08ac7 1721 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
e126ba97
EC
1722 goto out;
1723
1724 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
c7a08ac7 1725 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
e126ba97
EC
1726 goto out;
1727
1728 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1729 err = 0;
1730 goto out;
1731 }
1732
1733 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1734
1735out:
1736 mutex_unlock(&qp->mutex);
1737 return err;
1738}
1739
1740static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1741{
1742 struct mlx5_ib_cq *cq;
1743 unsigned cur;
1744
1745 cur = wq->head - wq->tail;
1746 if (likely(cur + nreq < wq->max_post))
1747 return 0;
1748
1749 cq = to_mcq(ib_cq);
1750 spin_lock(&cq->lock);
1751 cur = wq->head - wq->tail;
1752 spin_unlock(&cq->lock);
1753
1754 return cur + nreq >= wq->max_post;
1755}
1756
1757static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1758 u64 remote_addr, u32 rkey)
1759{
1760 rseg->raddr = cpu_to_be64(remote_addr);
1761 rseg->rkey = cpu_to_be32(rkey);
1762 rseg->reserved = 0;
1763}
1764
e126ba97
EC
1765static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1766 struct ib_send_wr *wr)
1767{
1768 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1769 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1770 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1771}
1772
1773static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1774{
1775 dseg->byte_count = cpu_to_be32(sg->length);
1776 dseg->lkey = cpu_to_be32(sg->lkey);
1777 dseg->addr = cpu_to_be64(sg->addr);
1778}
1779
1780static __be16 get_klm_octo(int npages)
1781{
1782 return cpu_to_be16(ALIGN(npages, 8) / 2);
1783}
1784
1785static __be64 frwr_mkey_mask(void)
1786{
1787 u64 result;
1788
1789 result = MLX5_MKEY_MASK_LEN |
1790 MLX5_MKEY_MASK_PAGE_SIZE |
1791 MLX5_MKEY_MASK_START_ADDR |
1792 MLX5_MKEY_MASK_EN_RINVAL |
1793 MLX5_MKEY_MASK_KEY |
1794 MLX5_MKEY_MASK_LR |
1795 MLX5_MKEY_MASK_LW |
1796 MLX5_MKEY_MASK_RR |
1797 MLX5_MKEY_MASK_RW |
1798 MLX5_MKEY_MASK_A |
1799 MLX5_MKEY_MASK_SMALL_FENCE |
1800 MLX5_MKEY_MASK_FREE;
1801
1802 return cpu_to_be64(result);
1803}
1804
e6631814
SG
1805static __be64 sig_mkey_mask(void)
1806{
1807 u64 result;
1808
1809 result = MLX5_MKEY_MASK_LEN |
1810 MLX5_MKEY_MASK_PAGE_SIZE |
1811 MLX5_MKEY_MASK_START_ADDR |
d5436ba0 1812 MLX5_MKEY_MASK_EN_SIGERR |
e6631814
SG
1813 MLX5_MKEY_MASK_EN_RINVAL |
1814 MLX5_MKEY_MASK_KEY |
1815 MLX5_MKEY_MASK_LR |
1816 MLX5_MKEY_MASK_LW |
1817 MLX5_MKEY_MASK_RR |
1818 MLX5_MKEY_MASK_RW |
1819 MLX5_MKEY_MASK_SMALL_FENCE |
1820 MLX5_MKEY_MASK_FREE |
1821 MLX5_MKEY_MASK_BSF_EN;
1822
1823 return cpu_to_be64(result);
1824}
1825
e126ba97
EC
1826static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1827 struct ib_send_wr *wr, int li)
1828{
1829 memset(umr, 0, sizeof(*umr));
1830
1831 if (li) {
1832 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1833 umr->flags = 1 << 7;
1834 return;
1835 }
1836
1837 umr->flags = (1 << 5); /* fail if not free */
1838 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1839 umr->mkey_mask = frwr_mkey_mask();
1840}
1841
968e78dd
HE
1842static __be64 get_umr_reg_mr_mask(void)
1843{
1844 u64 result;
1845
1846 result = MLX5_MKEY_MASK_LEN |
1847 MLX5_MKEY_MASK_PAGE_SIZE |
1848 MLX5_MKEY_MASK_START_ADDR |
1849 MLX5_MKEY_MASK_PD |
1850 MLX5_MKEY_MASK_LR |
1851 MLX5_MKEY_MASK_LW |
1852 MLX5_MKEY_MASK_KEY |
1853 MLX5_MKEY_MASK_RR |
1854 MLX5_MKEY_MASK_RW |
1855 MLX5_MKEY_MASK_A |
1856 MLX5_MKEY_MASK_FREE;
1857
1858 return cpu_to_be64(result);
1859}
1860
1861static __be64 get_umr_unreg_mr_mask(void)
1862{
1863 u64 result;
1864
1865 result = MLX5_MKEY_MASK_FREE;
1866
1867 return cpu_to_be64(result);
1868}
1869
1870static __be64 get_umr_update_mtt_mask(void)
1871{
1872 u64 result;
1873
1874 result = MLX5_MKEY_MASK_FREE;
1875
1876 return cpu_to_be64(result);
1877}
1878
e126ba97
EC
1879static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1880 struct ib_send_wr *wr)
1881{
968e78dd 1882 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
e126ba97
EC
1883
1884 memset(umr, 0, sizeof(*umr));
1885
968e78dd
HE
1886 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1887 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1888 else
1889 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1890
e126ba97 1891 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
e126ba97 1892 umr->klm_octowords = get_klm_octo(umrwr->npages);
968e78dd
HE
1893 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1894 umr->mkey_mask = get_umr_update_mtt_mask();
1895 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1896 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1897 } else {
1898 umr->mkey_mask = get_umr_reg_mr_mask();
1899 }
e126ba97 1900 } else {
968e78dd 1901 umr->mkey_mask = get_umr_unreg_mr_mask();
e126ba97
EC
1902 }
1903
1904 if (!wr->num_sge)
968e78dd 1905 umr->flags |= MLX5_UMR_INLINE;
e126ba97
EC
1906}
1907
1908static u8 get_umr_flags(int acc)
1909{
1910 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1911 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1912 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1913 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
2ac45934 1914 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
e126ba97
EC
1915}
1916
1917static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1918 int li, int *writ)
1919{
1920 memset(seg, 0, sizeof(*seg));
1921 if (li) {
968e78dd 1922 seg->status = MLX5_MKEY_STATUS_FREE;
e126ba97
EC
1923 return;
1924 }
1925
2ac45934
SG
1926 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
1927 MLX5_ACCESS_MODE_MTT;
e126ba97
EC
1928 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1929 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1930 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1931 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1932 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1933 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1934 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1935}
1936
1937static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1938{
968e78dd
HE
1939 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1940
e126ba97
EC
1941 memset(seg, 0, sizeof(*seg));
1942 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
968e78dd 1943 seg->status = MLX5_MKEY_STATUS_FREE;
e126ba97
EC
1944 return;
1945 }
1946
968e78dd
HE
1947 seg->flags = convert_access(umrwr->access_flags);
1948 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
1949 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
1950 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
1951 }
1952 seg->len = cpu_to_be64(umrwr->length);
1953 seg->log2_page_size = umrwr->page_shift;
746b5583 1954 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
968e78dd 1955 mlx5_mkey_variant(umrwr->mkey));
e126ba97
EC
1956}
1957
1958static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1959 struct ib_send_wr *wr,
1960 struct mlx5_core_dev *mdev,
1961 struct mlx5_ib_pd *pd,
1962 int writ)
1963{
1964 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1965 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1966 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1967 int i;
1968
1969 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1970 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1971 dseg->addr = cpu_to_be64(mfrpl->map);
1972 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1973 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1974}
1975
1976static __be32 send_ieth(struct ib_send_wr *wr)
1977{
1978 switch (wr->opcode) {
1979 case IB_WR_SEND_WITH_IMM:
1980 case IB_WR_RDMA_WRITE_WITH_IMM:
1981 return wr->ex.imm_data;
1982
1983 case IB_WR_SEND_WITH_INV:
1984 return cpu_to_be32(wr->ex.invalidate_rkey);
1985
1986 default:
1987 return 0;
1988 }
1989}
1990
1991static u8 calc_sig(void *wqe, int size)
1992{
1993 u8 *p = wqe;
1994 u8 res = 0;
1995 int i;
1996
1997 for (i = 0; i < size; i++)
1998 res ^= p[i];
1999
2000 return ~res;
2001}
2002
2003static u8 wq_sig(void *wqe)
2004{
2005 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2006}
2007
2008static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2009 void *wqe, int *sz)
2010{
2011 struct mlx5_wqe_inline_seg *seg;
2012 void *qend = qp->sq.qend;
2013 void *addr;
2014 int inl = 0;
2015 int copy;
2016 int len;
2017 int i;
2018
2019 seg = wqe;
2020 wqe += sizeof(*seg);
2021 for (i = 0; i < wr->num_sge; i++) {
2022 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2023 len = wr->sg_list[i].length;
2024 inl += len;
2025
2026 if (unlikely(inl > qp->max_inline_data))
2027 return -ENOMEM;
2028
2029 if (unlikely(wqe + len > qend)) {
2030 copy = qend - wqe;
2031 memcpy(wqe, addr, copy);
2032 addr += copy;
2033 len -= copy;
2034 wqe = mlx5_get_send_wqe(qp, 0);
2035 }
2036 memcpy(wqe, addr, len);
2037 wqe += len;
2038 }
2039
2040 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2041
2042 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2043
2044 return 0;
2045}
2046
e6631814
SG
2047static u16 prot_field_size(enum ib_signature_type type)
2048{
2049 switch (type) {
2050 case IB_SIG_TYPE_T10_DIF:
2051 return MLX5_DIF_SIZE;
2052 default:
2053 return 0;
2054 }
2055}
2056
2057static u8 bs_selector(int block_size)
2058{
2059 switch (block_size) {
2060 case 512: return 0x1;
2061 case 520: return 0x2;
2062 case 4096: return 0x3;
2063 case 4160: return 0x4;
2064 case 1073741824: return 0x5;
2065 default: return 0;
2066 }
2067}
2068
78eda2bb
SG
2069static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2070 struct mlx5_bsf_inl *inl)
e6631814 2071{
142537f4
SG
2072 /* Valid inline section and allow BSF refresh */
2073 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2074 MLX5_BSF_REFRESH_DIF);
2075 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2076 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
78eda2bb
SG
2077 /* repeating block */
2078 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2079 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2080 MLX5_DIF_CRC : MLX5_DIF_IPCS;
e6631814 2081
78eda2bb
SG
2082 if (domain->sig.dif.ref_remap)
2083 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
e6631814 2084
78eda2bb
SG
2085 if (domain->sig.dif.app_escape) {
2086 if (domain->sig.dif.ref_escape)
2087 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2088 else
2089 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
e6631814
SG
2090 }
2091
78eda2bb
SG
2092 inl->dif_app_bitmask_check =
2093 cpu_to_be16(domain->sig.dif.apptag_check_mask);
e6631814
SG
2094}
2095
2096static int mlx5_set_bsf(struct ib_mr *sig_mr,
2097 struct ib_sig_attrs *sig_attrs,
2098 struct mlx5_bsf *bsf, u32 data_size)
2099{
2100 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2101 struct mlx5_bsf_basic *basic = &bsf->basic;
2102 struct ib_sig_domain *mem = &sig_attrs->mem;
2103 struct ib_sig_domain *wire = &sig_attrs->wire;
e6631814 2104
c7f44fbd 2105 memset(bsf, 0, sizeof(*bsf));
78eda2bb
SG
2106
2107 /* Basic + Extended + Inline */
2108 basic->bsf_size_sbs = 1 << 7;
2109 /* Input domain check byte mask */
2110 basic->check_byte_mask = sig_attrs->check_mask;
2111 basic->raw_data_size = cpu_to_be32(data_size);
2112
2113 /* Memory domain */
e6631814 2114 switch (sig_attrs->mem.sig_type) {
78eda2bb
SG
2115 case IB_SIG_TYPE_NONE:
2116 break;
e6631814 2117 case IB_SIG_TYPE_T10_DIF:
78eda2bb
SG
2118 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2119 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2120 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2121 break;
2122 default:
2123 return -EINVAL;
2124 }
e6631814 2125
78eda2bb
SG
2126 /* Wire domain */
2127 switch (sig_attrs->wire.sig_type) {
2128 case IB_SIG_TYPE_NONE:
2129 break;
2130 case IB_SIG_TYPE_T10_DIF:
e6631814 2131 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
78eda2bb 2132 mem->sig_type == wire->sig_type) {
e6631814 2133 /* Same block structure */
142537f4 2134 basic->bsf_size_sbs |= 1 << 4;
e6631814 2135 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
fd22f78c 2136 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
c7f44fbd 2137 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
fd22f78c 2138 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
c7f44fbd 2139 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
fd22f78c 2140 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
e6631814
SG
2141 } else
2142 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2143
142537f4 2144 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
78eda2bb 2145 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
e6631814 2146 break;
e6631814
SG
2147 default:
2148 return -EINVAL;
2149 }
2150
2151 return 0;
2152}
2153
2154static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2155 void **seg, int *size)
2156{
2157 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2158 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2159 struct mlx5_bsf *bsf;
2160 u32 data_len = wr->sg_list->length;
2161 u32 data_key = wr->sg_list->lkey;
2162 u64 data_va = wr->sg_list->addr;
2163 int ret;
2164 int wqe_size;
2165
5c273b16
SG
2166 if (!wr->wr.sig_handover.prot ||
2167 (data_key == wr->wr.sig_handover.prot->lkey &&
2168 data_va == wr->wr.sig_handover.prot->addr &&
2169 data_len == wr->wr.sig_handover.prot->length)) {
e6631814
SG
2170 /**
2171 * Source domain doesn't contain signature information
5c273b16 2172 * or data and protection are interleaved in memory.
e6631814
SG
2173 * So need construct:
2174 * ------------------
2175 * | data_klm |
2176 * ------------------
2177 * | BSF |
2178 * ------------------
2179 **/
2180 struct mlx5_klm *data_klm = *seg;
2181
2182 data_klm->bcount = cpu_to_be32(data_len);
2183 data_klm->key = cpu_to_be32(data_key);
2184 data_klm->va = cpu_to_be64(data_va);
2185 wqe_size = ALIGN(sizeof(*data_klm), 64);
2186 } else {
2187 /**
2188 * Source domain contains signature information
2189 * So need construct a strided block format:
2190 * ---------------------------
2191 * | stride_block_ctrl |
2192 * ---------------------------
2193 * | data_klm |
2194 * ---------------------------
2195 * | prot_klm |
2196 * ---------------------------
2197 * | BSF |
2198 * ---------------------------
2199 **/
2200 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2201 struct mlx5_stride_block_entry *data_sentry;
2202 struct mlx5_stride_block_entry *prot_sentry;
2203 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2204 u64 prot_va = wr->wr.sig_handover.prot->addr;
2205 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2206 int prot_size;
2207
2208 sblock_ctrl = *seg;
2209 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2210 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2211
2212 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2213 if (!prot_size) {
2214 pr_err("Bad block size given: %u\n", block_size);
2215 return -EINVAL;
2216 }
2217 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2218 prot_size);
2219 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2220 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2221 sblock_ctrl->num_entries = cpu_to_be16(2);
2222
2223 data_sentry->bcount = cpu_to_be16(block_size);
2224 data_sentry->key = cpu_to_be32(data_key);
2225 data_sentry->va = cpu_to_be64(data_va);
5c273b16
SG
2226 data_sentry->stride = cpu_to_be16(block_size);
2227
e6631814
SG
2228 prot_sentry->bcount = cpu_to_be16(prot_size);
2229 prot_sentry->key = cpu_to_be32(prot_key);
5c273b16
SG
2230 prot_sentry->va = cpu_to_be64(prot_va);
2231 prot_sentry->stride = cpu_to_be16(prot_size);
e6631814 2232
e6631814
SG
2233 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2234 sizeof(*prot_sentry), 64);
2235 }
2236
2237 *seg += wqe_size;
2238 *size += wqe_size / 16;
2239 if (unlikely((*seg == qp->sq.qend)))
2240 *seg = mlx5_get_send_wqe(qp, 0);
2241
2242 bsf = *seg;
2243 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2244 if (ret)
2245 return -EINVAL;
2246
2247 *seg += sizeof(*bsf);
2248 *size += sizeof(*bsf) / 16;
2249 if (unlikely((*seg == qp->sq.qend)))
2250 *seg = mlx5_get_send_wqe(qp, 0);
2251
2252 return 0;
2253}
2254
2255static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2256 struct ib_send_wr *wr, u32 nelements,
2257 u32 length, u32 pdn)
2258{
2259 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2260 u32 sig_key = sig_mr->rkey;
d5436ba0 2261 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
e6631814
SG
2262
2263 memset(seg, 0, sizeof(*seg));
2264
2265 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2266 MLX5_ACCESS_MODE_KLM;
2267 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
d5436ba0 2268 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
e6631814
SG
2269 MLX5_MKEY_BSF_EN | pdn);
2270 seg->len = cpu_to_be64(length);
2271 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2272 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2273}
2274
2275static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2276 struct ib_send_wr *wr, u32 nelements)
2277{
2278 memset(umr, 0, sizeof(*umr));
2279
2280 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2281 umr->klm_octowords = get_klm_octo(nelements);
2282 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2283 umr->mkey_mask = sig_mkey_mask();
2284}
2285
2286
2287static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2288 void **seg, int *size)
2289{
2290 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2291 u32 pdn = get_pd(qp)->pdn;
2292 u32 klm_oct_size;
2293 int region_len, ret;
2294
2295 if (unlikely(wr->num_sge != 1) ||
2296 unlikely(wr->wr.sig_handover.access_flags &
2297 IB_ACCESS_REMOTE_ATOMIC) ||
d5436ba0
SG
2298 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2299 unlikely(!sig_mr->sig->sig_status_checked))
e6631814
SG
2300 return -EINVAL;
2301
2302 /* length of the protected region, data + protection */
2303 region_len = wr->sg_list->length;
8524867b
SG
2304 if (wr->wr.sig_handover.prot &&
2305 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2306 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2307 wr->wr.sig_handover.prot->length != wr->sg_list->length))
e6631814
SG
2308 region_len += wr->wr.sig_handover.prot->length;
2309
2310 /**
2311 * KLM octoword size - if protection was provided
2312 * then we use strided block format (3 octowords),
2313 * else we use single KLM (1 octoword)
2314 **/
2315 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2316
2317 set_sig_umr_segment(*seg, wr, klm_oct_size);
2318 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2319 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2320 if (unlikely((*seg == qp->sq.qend)))
2321 *seg = mlx5_get_send_wqe(qp, 0);
2322
2323 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2324 *seg += sizeof(struct mlx5_mkey_seg);
2325 *size += sizeof(struct mlx5_mkey_seg) / 16;
2326 if (unlikely((*seg == qp->sq.qend)))
2327 *seg = mlx5_get_send_wqe(qp, 0);
2328
2329 ret = set_sig_data_segment(wr, qp, seg, size);
2330 if (ret)
2331 return ret;
2332
d5436ba0 2333 sig_mr->sig->sig_status_checked = false;
e6631814
SG
2334 return 0;
2335}
2336
2337static int set_psv_wr(struct ib_sig_domain *domain,
2338 u32 psv_idx, void **seg, int *size)
2339{
2340 struct mlx5_seg_set_psv *psv_seg = *seg;
2341
2342 memset(psv_seg, 0, sizeof(*psv_seg));
2343 psv_seg->psv_num = cpu_to_be32(psv_idx);
2344 switch (domain->sig_type) {
78eda2bb
SG
2345 case IB_SIG_TYPE_NONE:
2346 break;
e6631814
SG
2347 case IB_SIG_TYPE_T10_DIF:
2348 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2349 domain->sig.dif.app_tag);
2350 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
e6631814 2351 break;
e6631814
SG
2352 default:
2353 pr_err("Bad signature type given.\n");
2354 return 1;
2355 }
2356
78eda2bb
SG
2357 *seg += sizeof(*psv_seg);
2358 *size += sizeof(*psv_seg) / 16;
2359
e6631814
SG
2360 return 0;
2361}
2362
e126ba97
EC
2363static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2364 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2365{
2366 int writ = 0;
2367 int li;
2368
2369 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2370 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2371 return -EINVAL;
2372
2373 set_frwr_umr_segment(*seg, wr, li);
2374 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2375 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2376 if (unlikely((*seg == qp->sq.qend)))
2377 *seg = mlx5_get_send_wqe(qp, 0);
2378 set_mkey_segment(*seg, wr, li, &writ);
2379 *seg += sizeof(struct mlx5_mkey_seg);
2380 *size += sizeof(struct mlx5_mkey_seg) / 16;
2381 if (unlikely((*seg == qp->sq.qend)))
2382 *seg = mlx5_get_send_wqe(qp, 0);
2383 if (!li) {
9641b74e
EC
2384 if (unlikely(wr->wr.fast_reg.page_list_len >
2385 wr->wr.fast_reg.page_list->max_page_list_len))
2386 return -ENOMEM;
2387
e126ba97
EC
2388 set_frwr_pages(*seg, wr, mdev, pd, writ);
2389 *seg += sizeof(struct mlx5_wqe_data_seg);
2390 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2391 }
2392 return 0;
2393}
2394
2395static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2396{
2397 __be32 *p = NULL;
2398 int tidx = idx;
2399 int i, j;
2400
2401 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2402 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2403 if ((i & 0xf) == 0) {
2404 void *buf = mlx5_get_send_wqe(qp, tidx);
2405 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2406 p = buf;
2407 j = 0;
2408 }
2409 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2410 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2411 be32_to_cpu(p[j + 3]));
2412 }
2413}
2414
2415static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2416 unsigned bytecnt, struct mlx5_ib_qp *qp)
2417{
2418 while (bytecnt > 0) {
2419 __iowrite64_copy(dst++, src++, 8);
2420 __iowrite64_copy(dst++, src++, 8);
2421 __iowrite64_copy(dst++, src++, 8);
2422 __iowrite64_copy(dst++, src++, 8);
2423 __iowrite64_copy(dst++, src++, 8);
2424 __iowrite64_copy(dst++, src++, 8);
2425 __iowrite64_copy(dst++, src++, 8);
2426 __iowrite64_copy(dst++, src++, 8);
2427 bytecnt -= 64;
2428 if (unlikely(src == qp->sq.qend))
2429 src = mlx5_get_send_wqe(qp, 0);
2430 }
2431}
2432
2433static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2434{
2435 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2436 wr->send_flags & IB_SEND_FENCE))
2437 return MLX5_FENCE_MODE_STRONG_ORDERING;
2438
2439 if (unlikely(fence)) {
2440 if (wr->send_flags & IB_SEND_FENCE)
2441 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2442 else
2443 return fence;
2444
2445 } else {
2446 return 0;
2447 }
2448}
2449
6e5eadac
SG
2450static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2451 struct mlx5_wqe_ctrl_seg **ctrl,
6a4f139a 2452 struct ib_send_wr *wr, unsigned *idx,
6e5eadac
SG
2453 int *size, int nreq)
2454{
2455 int err = 0;
2456
2457 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2458 err = -ENOMEM;
2459 return err;
2460 }
2461
2462 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2463 *seg = mlx5_get_send_wqe(qp, *idx);
2464 *ctrl = *seg;
2465 *(uint32_t *)(*seg + 8) = 0;
2466 (*ctrl)->imm = send_ieth(wr);
2467 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2468 (wr->send_flags & IB_SEND_SIGNALED ?
2469 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2470 (wr->send_flags & IB_SEND_SOLICITED ?
2471 MLX5_WQE_CTRL_SOLICITED : 0);
2472
2473 *seg += sizeof(**ctrl);
2474 *size = sizeof(**ctrl) / 16;
2475
2476 return err;
2477}
2478
2479static void finish_wqe(struct mlx5_ib_qp *qp,
2480 struct mlx5_wqe_ctrl_seg *ctrl,
2481 u8 size, unsigned idx, u64 wr_id,
2482 int nreq, u8 fence, u8 next_fence,
2483 u32 mlx5_opcode)
2484{
2485 u8 opmod = 0;
2486
2487 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2488 mlx5_opcode | ((u32)opmod << 24));
2489 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2490 ctrl->fm_ce_se |= fence;
2491 qp->fm_cache = next_fence;
2492 if (unlikely(qp->wq_sig))
2493 ctrl->signature = wq_sig(ctrl);
2494
2495 qp->sq.wrid[idx] = wr_id;
2496 qp->sq.w_list[idx].opcode = mlx5_opcode;
2497 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2498 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2499 qp->sq.w_list[idx].next = qp->sq.cur_post;
2500}
2501
2502
e126ba97
EC
2503int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2504 struct ib_send_wr **bad_wr)
2505{
2506 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2507 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
9603b61d 2508 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 2509 struct mlx5_ib_qp *qp = to_mqp(ibqp);
e6631814 2510 struct mlx5_ib_mr *mr;
e126ba97
EC
2511 struct mlx5_wqe_data_seg *dpseg;
2512 struct mlx5_wqe_xrc_seg *xrc;
2513 struct mlx5_bf *bf = qp->bf;
2514 int uninitialized_var(size);
2515 void *qend = qp->sq.qend;
2516 unsigned long flags;
e126ba97
EC
2517 unsigned idx;
2518 int err = 0;
2519 int inl = 0;
2520 int num_sge;
2521 void *seg;
2522 int nreq;
2523 int i;
2524 u8 next_fence = 0;
e126ba97
EC
2525 u8 fence;
2526
2527 spin_lock_irqsave(&qp->sq.lock, flags);
2528
2529 for (nreq = 0; wr; nreq++, wr = wr->next) {
a8f731eb 2530 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
e126ba97
EC
2531 mlx5_ib_warn(dev, "\n");
2532 err = -EINVAL;
2533 *bad_wr = wr;
2534 goto out;
2535 }
2536
6e5eadac
SG
2537 fence = qp->fm_cache;
2538 num_sge = wr->num_sge;
2539 if (unlikely(num_sge > qp->sq.max_gs)) {
e126ba97
EC
2540 mlx5_ib_warn(dev, "\n");
2541 err = -ENOMEM;
2542 *bad_wr = wr;
2543 goto out;
2544 }
2545
6e5eadac
SG
2546 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2547 if (err) {
e126ba97
EC
2548 mlx5_ib_warn(dev, "\n");
2549 err = -ENOMEM;
2550 *bad_wr = wr;
2551 goto out;
2552 }
2553
e126ba97
EC
2554 switch (ibqp->qp_type) {
2555 case IB_QPT_XRC_INI:
2556 xrc = seg;
2557 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2558 seg += sizeof(*xrc);
2559 size += sizeof(*xrc) / 16;
2560 /* fall through */
2561 case IB_QPT_RC:
2562 switch (wr->opcode) {
2563 case IB_WR_RDMA_READ:
2564 case IB_WR_RDMA_WRITE:
2565 case IB_WR_RDMA_WRITE_WITH_IMM:
2566 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2567 wr->wr.rdma.rkey);
f241e749 2568 seg += sizeof(struct mlx5_wqe_raddr_seg);
e126ba97
EC
2569 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2570 break;
2571
2572 case IB_WR_ATOMIC_CMP_AND_SWP:
2573 case IB_WR_ATOMIC_FETCH_AND_ADD:
e126ba97 2574 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
81bea28f
EC
2575 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2576 err = -ENOSYS;
2577 *bad_wr = wr;
2578 goto out;
e126ba97
EC
2579
2580 case IB_WR_LOCAL_INV:
2581 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2582 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2583 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2584 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2585 if (err) {
2586 mlx5_ib_warn(dev, "\n");
2587 *bad_wr = wr;
2588 goto out;
2589 }
2590 num_sge = 0;
2591 break;
2592
2593 case IB_WR_FAST_REG_MR:
2594 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2595 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2596 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2597 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2598 if (err) {
2599 mlx5_ib_warn(dev, "\n");
2600 *bad_wr = wr;
2601 goto out;
2602 }
2603 num_sge = 0;
2604 break;
2605
e6631814
SG
2606 case IB_WR_REG_SIG_MR:
2607 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2608 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2609
2610 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2611 err = set_sig_umr_wr(wr, qp, &seg, &size);
2612 if (err) {
2613 mlx5_ib_warn(dev, "\n");
2614 *bad_wr = wr;
2615 goto out;
2616 }
2617
2618 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2619 nreq, get_fence(fence, wr),
2620 next_fence, MLX5_OPCODE_UMR);
2621 /*
2622 * SET_PSV WQEs are not signaled and solicited
2623 * on error
2624 */
2625 wr->send_flags &= ~IB_SEND_SIGNALED;
2626 wr->send_flags |= IB_SEND_SOLICITED;
2627 err = begin_wqe(qp, &seg, &ctrl, wr,
2628 &idx, &size, nreq);
2629 if (err) {
2630 mlx5_ib_warn(dev, "\n");
2631 err = -ENOMEM;
2632 *bad_wr = wr;
2633 goto out;
2634 }
2635
2636 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2637 mr->sig->psv_memory.psv_idx, &seg,
2638 &size);
2639 if (err) {
2640 mlx5_ib_warn(dev, "\n");
2641 *bad_wr = wr;
2642 goto out;
2643 }
2644
2645 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2646 nreq, get_fence(fence, wr),
2647 next_fence, MLX5_OPCODE_SET_PSV);
2648 err = begin_wqe(qp, &seg, &ctrl, wr,
2649 &idx, &size, nreq);
2650 if (err) {
2651 mlx5_ib_warn(dev, "\n");
2652 err = -ENOMEM;
2653 *bad_wr = wr;
2654 goto out;
2655 }
2656
2657 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2658 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2659 mr->sig->psv_wire.psv_idx, &seg,
2660 &size);
2661 if (err) {
2662 mlx5_ib_warn(dev, "\n");
2663 *bad_wr = wr;
2664 goto out;
2665 }
2666
2667 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2668 nreq, get_fence(fence, wr),
2669 next_fence, MLX5_OPCODE_SET_PSV);
2670 num_sge = 0;
2671 goto skip_psv;
2672
e126ba97
EC
2673 default:
2674 break;
2675 }
2676 break;
2677
2678 case IB_QPT_UC:
2679 switch (wr->opcode) {
2680 case IB_WR_RDMA_WRITE:
2681 case IB_WR_RDMA_WRITE_WITH_IMM:
2682 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2683 wr->wr.rdma.rkey);
2684 seg += sizeof(struct mlx5_wqe_raddr_seg);
2685 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2686 break;
2687
2688 default:
2689 break;
2690 }
2691 break;
2692
2693 case IB_QPT_UD:
2694 case IB_QPT_SMI:
2695 case IB_QPT_GSI:
2696 set_datagram_seg(seg, wr);
f241e749 2697 seg += sizeof(struct mlx5_wqe_datagram_seg);
e126ba97
EC
2698 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2699 if (unlikely((seg == qend)))
2700 seg = mlx5_get_send_wqe(qp, 0);
2701 break;
2702
2703 case MLX5_IB_QPT_REG_UMR:
2704 if (wr->opcode != MLX5_IB_WR_UMR) {
2705 err = -EINVAL;
2706 mlx5_ib_warn(dev, "bad opcode\n");
2707 goto out;
2708 }
2709 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2710 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2711 set_reg_umr_segment(seg, wr);
2712 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2713 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2714 if (unlikely((seg == qend)))
2715 seg = mlx5_get_send_wqe(qp, 0);
2716 set_reg_mkey_segment(seg, wr);
2717 seg += sizeof(struct mlx5_mkey_seg);
2718 size += sizeof(struct mlx5_mkey_seg) / 16;
2719 if (unlikely((seg == qend)))
2720 seg = mlx5_get_send_wqe(qp, 0);
2721 break;
2722
2723 default:
2724 break;
2725 }
2726
2727 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2728 int uninitialized_var(sz);
2729
2730 err = set_data_inl_seg(qp, wr, seg, &sz);
2731 if (unlikely(err)) {
2732 mlx5_ib_warn(dev, "\n");
2733 *bad_wr = wr;
2734 goto out;
2735 }
2736 inl = 1;
2737 size += sz;
2738 } else {
2739 dpseg = seg;
2740 for (i = 0; i < num_sge; i++) {
2741 if (unlikely(dpseg == qend)) {
2742 seg = mlx5_get_send_wqe(qp, 0);
2743 dpseg = seg;
2744 }
2745 if (likely(wr->sg_list[i].length)) {
2746 set_data_ptr_seg(dpseg, wr->sg_list + i);
2747 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2748 dpseg++;
2749 }
2750 }
2751 }
2752
6e5eadac
SG
2753 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2754 get_fence(fence, wr), next_fence,
2755 mlx5_ib_opcode[wr->opcode]);
e6631814 2756skip_psv:
e126ba97
EC
2757 if (0)
2758 dump_wqe(qp, idx, size);
2759 }
2760
2761out:
2762 if (likely(nreq)) {
2763 qp->sq.head += nreq;
2764
2765 /* Make sure that descriptors are written before
2766 * updating doorbell record and ringing the doorbell
2767 */
2768 wmb();
2769
2770 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2771
ada388f7
EC
2772 /* Make sure doorbell record is visible to the HCA before
2773 * we hit doorbell */
2774 wmb();
2775
e126ba97
EC
2776 if (bf->need_lock)
2777 spin_lock(&bf->lock);
6a4f139a
EC
2778 else
2779 __acquire(&bf->lock);
e126ba97
EC
2780
2781 /* TBD enable WC */
2782 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2783 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2784 /* wc_wmb(); */
2785 } else {
2786 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2787 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2788 /* Make sure doorbells don't leak out of SQ spinlock
2789 * and reach the HCA out of order.
2790 */
2791 mmiowb();
2792 }
2793 bf->offset ^= bf->buf_size;
2794 if (bf->need_lock)
2795 spin_unlock(&bf->lock);
6a4f139a
EC
2796 else
2797 __release(&bf->lock);
e126ba97
EC
2798 }
2799
2800 spin_unlock_irqrestore(&qp->sq.lock, flags);
2801
2802 return err;
2803}
2804
2805static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2806{
2807 sig->signature = calc_sig(sig, size);
2808}
2809
2810int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2811 struct ib_recv_wr **bad_wr)
2812{
2813 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2814 struct mlx5_wqe_data_seg *scat;
2815 struct mlx5_rwqe_sig *sig;
2816 unsigned long flags;
2817 int err = 0;
2818 int nreq;
2819 int ind;
2820 int i;
2821
2822 spin_lock_irqsave(&qp->rq.lock, flags);
2823
2824 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2825
2826 for (nreq = 0; wr; nreq++, wr = wr->next) {
2827 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2828 err = -ENOMEM;
2829 *bad_wr = wr;
2830 goto out;
2831 }
2832
2833 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2834 err = -EINVAL;
2835 *bad_wr = wr;
2836 goto out;
2837 }
2838
2839 scat = get_recv_wqe(qp, ind);
2840 if (qp->wq_sig)
2841 scat++;
2842
2843 for (i = 0; i < wr->num_sge; i++)
2844 set_data_ptr_seg(scat + i, wr->sg_list + i);
2845
2846 if (i < qp->rq.max_gs) {
2847 scat[i].byte_count = 0;
2848 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2849 scat[i].addr = 0;
2850 }
2851
2852 if (qp->wq_sig) {
2853 sig = (struct mlx5_rwqe_sig *)scat;
2854 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2855 }
2856
2857 qp->rq.wrid[ind] = wr->wr_id;
2858
2859 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2860 }
2861
2862out:
2863 if (likely(nreq)) {
2864 qp->rq.head += nreq;
2865
2866 /* Make sure that descriptors are written before
2867 * doorbell record.
2868 */
2869 wmb();
2870
2871 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2872 }
2873
2874 spin_unlock_irqrestore(&qp->rq.lock, flags);
2875
2876 return err;
2877}
2878
2879static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2880{
2881 switch (mlx5_state) {
2882 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2883 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2884 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2885 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2886 case MLX5_QP_STATE_SQ_DRAINING:
2887 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2888 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2889 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2890 default: return -1;
2891 }
2892}
2893
2894static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2895{
2896 switch (mlx5_mig_state) {
2897 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2898 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2899 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2900 default: return -1;
2901 }
2902}
2903
2904static int to_ib_qp_access_flags(int mlx5_flags)
2905{
2906 int ib_flags = 0;
2907
2908 if (mlx5_flags & MLX5_QP_BIT_RRE)
2909 ib_flags |= IB_ACCESS_REMOTE_READ;
2910 if (mlx5_flags & MLX5_QP_BIT_RWE)
2911 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2912 if (mlx5_flags & MLX5_QP_BIT_RAE)
2913 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2914
2915 return ib_flags;
2916}
2917
2918static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2919 struct mlx5_qp_path *path)
2920{
9603b61d 2921 struct mlx5_core_dev *dev = ibdev->mdev;
e126ba97
EC
2922
2923 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2924 ib_ah_attr->port_num = path->port;
2925
c7a08ac7
EC
2926 if (ib_ah_attr->port_num == 0 ||
2927 ib_ah_attr->port_num > dev->caps.gen.num_ports)
e126ba97
EC
2928 return;
2929
2930 ib_ah_attr->sl = path->sl & 0xf;
2931
2932 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2933 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2934 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2935 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2936 if (ib_ah_attr->ah_flags) {
2937 ib_ah_attr->grh.sgid_index = path->mgid_index;
2938 ib_ah_attr->grh.hop_limit = path->hop_limit;
2939 ib_ah_attr->grh.traffic_class =
2940 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2941 ib_ah_attr->grh.flow_label =
2942 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2943 memcpy(ib_ah_attr->grh.dgid.raw,
2944 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2945 }
2946}
2947
2948int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2949 struct ib_qp_init_attr *qp_init_attr)
2950{
2951 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2952 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2953 struct mlx5_query_qp_mbox_out *outb;
2954 struct mlx5_qp_context *context;
2955 int mlx5_state;
2956 int err = 0;
2957
2958 mutex_lock(&qp->mutex);
2959 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2960 if (!outb) {
2961 err = -ENOMEM;
2962 goto out;
2963 }
2964 context = &outb->ctx;
9603b61d 2965 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
e126ba97
EC
2966 if (err)
2967 goto out_free;
2968
2969 mlx5_state = be32_to_cpu(context->flags) >> 28;
2970
2971 qp->state = to_ib_qp_state(mlx5_state);
2972 qp_attr->qp_state = qp->state;
2973 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2974 qp_attr->path_mig_state =
2975 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2976 qp_attr->qkey = be32_to_cpu(context->qkey);
2977 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2978 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2979 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2980 qp_attr->qp_access_flags =
2981 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2982
2983 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2984 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2985 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2986 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2987 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2988 }
2989
2990 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2991 qp_attr->port_num = context->pri_path.port;
2992
2993 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2994 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2995
2996 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2997
2998 qp_attr->max_dest_rd_atomic =
2999 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3000 qp_attr->min_rnr_timer =
3001 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3002 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3003 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3004 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3005 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3006 qp_attr->cur_qp_state = qp_attr->qp_state;
3007 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3008 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3009
3010 if (!ibqp->uobject) {
3011 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3012 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3013 } else {
3014 qp_attr->cap.max_send_wr = 0;
3015 qp_attr->cap.max_send_sge = 0;
3016 }
3017
3018 /* We don't support inline sends for kernel QPs (yet), and we
3019 * don't know what userspace's value should be.
3020 */
3021 qp_attr->cap.max_inline_data = 0;
3022
3023 qp_init_attr->cap = qp_attr->cap;
3024
3025 qp_init_attr->create_flags = 0;
3026 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3027 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3028
3029 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3030 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3031
3032out_free:
3033 kfree(outb);
3034
3035out:
3036 mutex_unlock(&qp->mutex);
3037 return err;
3038}
3039
3040struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3041 struct ib_ucontext *context,
3042 struct ib_udata *udata)
3043{
3044 struct mlx5_ib_dev *dev = to_mdev(ibdev);
c7a08ac7 3045 struct mlx5_general_caps *gen;
e126ba97
EC
3046 struct mlx5_ib_xrcd *xrcd;
3047 int err;
3048
c7a08ac7
EC
3049 gen = &dev->mdev->caps.gen;
3050 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
e126ba97
EC
3051 return ERR_PTR(-ENOSYS);
3052
3053 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3054 if (!xrcd)
3055 return ERR_PTR(-ENOMEM);
3056
9603b61d 3057 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
e126ba97
EC
3058 if (err) {
3059 kfree(xrcd);
3060 return ERR_PTR(-ENOMEM);
3061 }
3062
3063 return &xrcd->ibxrcd;
3064}
3065
3066int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3067{
3068 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3069 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3070 int err;
3071
9603b61d 3072 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
e126ba97
EC
3073 if (err) {
3074 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3075 return err;
3076 }
3077
3078 kfree(xrcd);
3079
3080 return 0;
3081}