]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/hns/hns_roce_hw_v2.c
RDMA/hns: Add return operation when configured global param fail
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <net/addrconf.h>
38 #include <rdma/ib_umem.h>
39
40 #include "hnae3.h"
41 #include "hns_roce_common.h"
42 #include "hns_roce_device.h"
43 #include "hns_roce_cmd.h"
44 #include "hns_roce_hem.h"
45 #include "hns_roce_hw_v2.h"
46
47 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
48 struct ib_sge *sg)
49 {
50 dseg->lkey = cpu_to_le32(sg->lkey);
51 dseg->addr = cpu_to_le64(sg->addr);
52 dseg->len = cpu_to_le32(sg->length);
53 }
54
55 static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
56 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
57 void *wqe, unsigned int *sge_ind,
58 struct ib_send_wr **bad_wr)
59 {
60 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
61 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
62 struct hns_roce_qp *qp = to_hr_qp(ibqp);
63 int i;
64
65 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
66 if (le32_to_cpu(rc_sq_wqe->msg_len) >
67 hr_dev->caps.max_sq_inline) {
68 *bad_wr = wr;
69 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
70 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
71 return -EINVAL;
72 }
73
74 if (wr->opcode == IB_WR_RDMA_READ) {
75 dev_err(hr_dev->dev, "Not support inline data!\n");
76 return -EINVAL;
77 }
78
79 for (i = 0; i < wr->num_sge; i++) {
80 memcpy(wqe, ((void *)wr->sg_list[i].addr),
81 wr->sg_list[i].length);
82 wqe += wr->sg_list[i].length;
83 }
84
85 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
86 1);
87 } else {
88 if (wr->num_sge <= 2) {
89 for (i = 0; i < wr->num_sge; i++) {
90 if (likely(wr->sg_list[i].length)) {
91 set_data_seg_v2(dseg, wr->sg_list + i);
92 dseg++;
93 }
94 }
95 } else {
96 roce_set_field(rc_sq_wqe->byte_20,
97 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
98 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
99 (*sge_ind) & (qp->sge.sge_cnt - 1));
100
101 for (i = 0; i < 2; i++) {
102 if (likely(wr->sg_list[i].length)) {
103 set_data_seg_v2(dseg, wr->sg_list + i);
104 dseg++;
105 }
106 }
107
108 dseg = get_send_extend_sge(qp,
109 (*sge_ind) & (qp->sge.sge_cnt - 1));
110
111 for (i = 0; i < wr->num_sge - 2; i++) {
112 if (likely(wr->sg_list[i + 2].length)) {
113 set_data_seg_v2(dseg,
114 wr->sg_list + 2 + i);
115 dseg++;
116 (*sge_ind)++;
117 }
118 }
119 }
120
121 roce_set_field(rc_sq_wqe->byte_16,
122 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
123 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
124 }
125
126 return 0;
127 }
128
129 static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
130 struct ib_send_wr **bad_wr)
131 {
132 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
133 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
134 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
135 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
136 struct hns_roce_qp *qp = to_hr_qp(ibqp);
137 struct hns_roce_v2_wqe_data_seg *dseg;
138 struct device *dev = hr_dev->dev;
139 struct hns_roce_v2_db sq_db;
140 unsigned int sge_ind = 0;
141 unsigned int owner_bit;
142 unsigned long flags;
143 unsigned int ind;
144 void *wqe = NULL;
145 u32 tmp_len = 0;
146 bool loopback;
147 int ret = 0;
148 u8 *smac;
149 int nreq;
150 int i;
151
152 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
153 ibqp->qp_type != IB_QPT_GSI &&
154 ibqp->qp_type != IB_QPT_UD)) {
155 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
156 *bad_wr = wr;
157 return -EOPNOTSUPP;
158 }
159
160 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
161 qp->state == IB_QPS_RTR)) {
162 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
163 *bad_wr = wr;
164 return -EINVAL;
165 }
166
167 spin_lock_irqsave(&qp->sq.lock, flags);
168 ind = qp->sq_next_wqe;
169 sge_ind = qp->next_sge;
170
171 for (nreq = 0; wr; ++nreq, wr = wr->next) {
172 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
173 ret = -ENOMEM;
174 *bad_wr = wr;
175 goto out;
176 }
177
178 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
179 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
180 wr->num_sge, qp->sq.max_gs);
181 ret = -EINVAL;
182 *bad_wr = wr;
183 goto out;
184 }
185
186 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
187 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
188 wr->wr_id;
189
190 owner_bit =
191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
192
193 /* Corresponding to the QP type, wqe process separately */
194 if (ibqp->qp_type == IB_QPT_GSI) {
195 ud_sq_wqe = wqe;
196 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
197
198 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
199 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
200 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
201 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
202 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
203 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
204 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
205 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
206 roce_set_field(ud_sq_wqe->byte_48,
207 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
208 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
209 ah->av.mac[4]);
210 roce_set_field(ud_sq_wqe->byte_48,
211 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
212 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
213 ah->av.mac[5]);
214
215 /* MAC loopback */
216 smac = (u8 *)hr_dev->dev_addr[qp->port];
217 loopback = ether_addr_equal_unaligned(ah->av.mac,
218 smac) ? 1 : 0;
219
220 roce_set_bit(ud_sq_wqe->byte_40,
221 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
222
223 roce_set_field(ud_sq_wqe->byte_4,
224 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
225 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
226 HNS_ROCE_V2_WQE_OP_SEND);
227
228 for (i = 0; i < wr->num_sge; i++)
229 tmp_len += wr->sg_list[i].length;
230
231 ud_sq_wqe->msg_len =
232 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
233
234 switch (wr->opcode) {
235 case IB_WR_SEND_WITH_IMM:
236 case IB_WR_RDMA_WRITE_WITH_IMM:
237 ud_sq_wqe->immtdata = wr->ex.imm_data;
238 break;
239 default:
240 ud_sq_wqe->immtdata = 0;
241 break;
242 }
243
244 /* Set sig attr */
245 roce_set_bit(ud_sq_wqe->byte_4,
246 V2_UD_SEND_WQE_BYTE_4_CQE_S,
247 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
248
249 /* Set se attr */
250 roce_set_bit(ud_sq_wqe->byte_4,
251 V2_UD_SEND_WQE_BYTE_4_SE_S,
252 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
253
254 roce_set_bit(ud_sq_wqe->byte_4,
255 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
256
257 roce_set_field(ud_sq_wqe->byte_16,
258 V2_UD_SEND_WQE_BYTE_16_PD_M,
259 V2_UD_SEND_WQE_BYTE_16_PD_S,
260 to_hr_pd(ibqp->pd)->pdn);
261
262 roce_set_field(ud_sq_wqe->byte_16,
263 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
264 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
265 wr->num_sge);
266
267 roce_set_field(ud_sq_wqe->byte_20,
268 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
269 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
270 sge_ind & (qp->sge.sge_cnt - 1));
271
272 roce_set_field(ud_sq_wqe->byte_24,
273 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
274 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
275 ud_sq_wqe->qkey =
276 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
277 qp->qkey : ud_wr(wr)->remote_qkey);
278 roce_set_field(ud_sq_wqe->byte_32,
279 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
280 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
281 ud_wr(wr)->remote_qpn);
282
283 roce_set_field(ud_sq_wqe->byte_36,
284 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
285 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
286 le16_to_cpu(ah->av.vlan));
287 roce_set_field(ud_sq_wqe->byte_36,
288 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
289 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
290 ah->av.hop_limit);
291 roce_set_field(ud_sq_wqe->byte_36,
292 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
293 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
294 0);
295 roce_set_field(ud_sq_wqe->byte_36,
296 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
297 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
298 0);
299 roce_set_field(ud_sq_wqe->byte_40,
300 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
301 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
302 roce_set_field(ud_sq_wqe->byte_40,
303 V2_UD_SEND_WQE_BYTE_40_SL_M,
304 V2_UD_SEND_WQE_BYTE_40_SL_S,
305 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
306 HNS_ROCE_SL_SHIFT);
307 roce_set_field(ud_sq_wqe->byte_40,
308 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
309 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
310 qp->port);
311
312 roce_set_field(ud_sq_wqe->byte_48,
313 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
314 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
315 hns_get_gid_index(hr_dev, qp->phy_port,
316 ah->av.gid_index));
317
318 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
319 GID_LEN_V2);
320
321 dseg = get_send_extend_sge(qp,
322 sge_ind & (qp->sge.sge_cnt - 1));
323 for (i = 0; i < wr->num_sge; i++) {
324 set_data_seg_v2(dseg + i, wr->sg_list + i);
325 sge_ind++;
326 }
327
328 ind++;
329 } else if (ibqp->qp_type == IB_QPT_RC) {
330 rc_sq_wqe = wqe;
331 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
332 for (i = 0; i < wr->num_sge; i++)
333 tmp_len += wr->sg_list[i].length;
334
335 rc_sq_wqe->msg_len =
336 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
337
338 switch (wr->opcode) {
339 case IB_WR_SEND_WITH_IMM:
340 case IB_WR_RDMA_WRITE_WITH_IMM:
341 rc_sq_wqe->immtdata = wr->ex.imm_data;
342 break;
343 case IB_WR_SEND_WITH_INV:
344 rc_sq_wqe->inv_key =
345 cpu_to_le32(wr->ex.invalidate_rkey);
346 break;
347 default:
348 rc_sq_wqe->immtdata = 0;
349 break;
350 }
351
352 roce_set_bit(rc_sq_wqe->byte_4,
353 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
354 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
355
356 roce_set_bit(rc_sq_wqe->byte_4,
357 V2_RC_SEND_WQE_BYTE_4_SE_S,
358 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
359
360 roce_set_bit(rc_sq_wqe->byte_4,
361 V2_RC_SEND_WQE_BYTE_4_CQE_S,
362 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
363
364 roce_set_bit(rc_sq_wqe->byte_4,
365 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
366
367 switch (wr->opcode) {
368 case IB_WR_RDMA_READ:
369 roce_set_field(rc_sq_wqe->byte_4,
370 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
371 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
372 HNS_ROCE_V2_WQE_OP_RDMA_READ);
373 rc_sq_wqe->rkey =
374 cpu_to_le32(rdma_wr(wr)->rkey);
375 rc_sq_wqe->va =
376 cpu_to_le64(rdma_wr(wr)->remote_addr);
377 break;
378 case IB_WR_RDMA_WRITE:
379 roce_set_field(rc_sq_wqe->byte_4,
380 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
381 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
382 HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
383 rc_sq_wqe->rkey =
384 cpu_to_le32(rdma_wr(wr)->rkey);
385 rc_sq_wqe->va =
386 cpu_to_le64(rdma_wr(wr)->remote_addr);
387 break;
388 case IB_WR_RDMA_WRITE_WITH_IMM:
389 roce_set_field(rc_sq_wqe->byte_4,
390 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
391 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
392 HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
393 rc_sq_wqe->rkey =
394 cpu_to_le32(rdma_wr(wr)->rkey);
395 rc_sq_wqe->va =
396 cpu_to_le64(rdma_wr(wr)->remote_addr);
397 break;
398 case IB_WR_SEND:
399 roce_set_field(rc_sq_wqe->byte_4,
400 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
401 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
402 HNS_ROCE_V2_WQE_OP_SEND);
403 break;
404 case IB_WR_SEND_WITH_INV:
405 roce_set_field(rc_sq_wqe->byte_4,
406 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
407 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
408 HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
409 break;
410 case IB_WR_SEND_WITH_IMM:
411 roce_set_field(rc_sq_wqe->byte_4,
412 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
413 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
414 HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
415 break;
416 case IB_WR_LOCAL_INV:
417 roce_set_field(rc_sq_wqe->byte_4,
418 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
419 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
420 HNS_ROCE_V2_WQE_OP_LOCAL_INV);
421 break;
422 case IB_WR_ATOMIC_CMP_AND_SWP:
423 roce_set_field(rc_sq_wqe->byte_4,
424 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
425 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
426 HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
427 break;
428 case IB_WR_ATOMIC_FETCH_AND_ADD:
429 roce_set_field(rc_sq_wqe->byte_4,
430 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
431 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
432 HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
433 break;
434 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
435 roce_set_field(rc_sq_wqe->byte_4,
436 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
437 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
438 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
439 break;
440 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
441 roce_set_field(rc_sq_wqe->byte_4,
442 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
443 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
444 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
445 break;
446 default:
447 roce_set_field(rc_sq_wqe->byte_4,
448 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
449 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
450 HNS_ROCE_V2_WQE_OP_MASK);
451 break;
452 }
453
454 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
455 dseg = wqe;
456
457 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
458 &sge_ind, bad_wr);
459 if (ret)
460 goto out;
461 ind++;
462 } else {
463 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
464 spin_unlock_irqrestore(&qp->sq.lock, flags);
465 *bad_wr = wr;
466 return -EOPNOTSUPP;
467 }
468 }
469
470 out:
471 if (likely(nreq)) {
472 qp->sq.head += nreq;
473 /* Memory barrier */
474 wmb();
475
476 sq_db.byte_4 = 0;
477 sq_db.parameter = 0;
478
479 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
480 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
481 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
482 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
483 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
484 V2_DB_PARAMETER_CONS_IDX_S,
485 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
486 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
487 V2_DB_PARAMETER_SL_S, qp->sl);
488
489 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
490
491 qp->sq_next_wqe = ind;
492 qp->next_sge = sge_ind;
493 }
494
495 spin_unlock_irqrestore(&qp->sq.lock, flags);
496
497 return ret;
498 }
499
500 static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
501 struct ib_recv_wr **bad_wr)
502 {
503 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
504 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
505 struct hns_roce_v2_wqe_data_seg *dseg;
506 struct hns_roce_rinl_sge *sge_list;
507 struct device *dev = hr_dev->dev;
508 unsigned long flags;
509 void *wqe = NULL;
510 int ret = 0;
511 int nreq;
512 int ind;
513 int i;
514
515 spin_lock_irqsave(&hr_qp->rq.lock, flags);
516 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
517
518 if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
519 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
520 *bad_wr = wr;
521 return -EINVAL;
522 }
523
524 for (nreq = 0; wr; ++nreq, wr = wr->next) {
525 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
526 hr_qp->ibqp.recv_cq)) {
527 ret = -ENOMEM;
528 *bad_wr = wr;
529 goto out;
530 }
531
532 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
533 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
534 wr->num_sge, hr_qp->rq.max_gs);
535 ret = -EINVAL;
536 *bad_wr = wr;
537 goto out;
538 }
539
540 wqe = get_recv_wqe(hr_qp, ind);
541 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
542 for (i = 0; i < wr->num_sge; i++) {
543 if (!wr->sg_list[i].length)
544 continue;
545 set_data_seg_v2(dseg, wr->sg_list + i);
546 dseg++;
547 }
548
549 if (i < hr_qp->rq.max_gs) {
550 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
551 dseg[i].addr = 0;
552 }
553
554 /* rq support inline data */
555 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
556 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
557 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
558 (u32)wr->num_sge;
559 for (i = 0; i < wr->num_sge; i++) {
560 sge_list[i].addr =
561 (void *)(u64)wr->sg_list[i].addr;
562 sge_list[i].len = wr->sg_list[i].length;
563 }
564 }
565
566 hr_qp->rq.wrid[ind] = wr->wr_id;
567
568 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
569 }
570
571 out:
572 if (likely(nreq)) {
573 hr_qp->rq.head += nreq;
574 /* Memory barrier */
575 wmb();
576
577 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
578 }
579 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
580
581 return ret;
582 }
583
584 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
585 {
586 int ntu = ring->next_to_use;
587 int ntc = ring->next_to_clean;
588 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
589
590 return ring->desc_num - used - 1;
591 }
592
593 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
594 struct hns_roce_v2_cmq_ring *ring)
595 {
596 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
597
598 ring->desc = kzalloc(size, GFP_KERNEL);
599 if (!ring->desc)
600 return -ENOMEM;
601
602 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
603 DMA_BIDIRECTIONAL);
604 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
605 ring->desc_dma_addr = 0;
606 kfree(ring->desc);
607 ring->desc = NULL;
608 return -ENOMEM;
609 }
610
611 return 0;
612 }
613
614 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
615 struct hns_roce_v2_cmq_ring *ring)
616 {
617 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
618 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
619 DMA_BIDIRECTIONAL);
620 kfree(ring->desc);
621 }
622
623 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
624 {
625 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
626 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
627 &priv->cmq.csq : &priv->cmq.crq;
628
629 ring->flag = ring_type;
630 ring->next_to_clean = 0;
631 ring->next_to_use = 0;
632
633 return hns_roce_alloc_cmq_desc(hr_dev, ring);
634 }
635
636 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
637 {
638 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
639 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
640 &priv->cmq.csq : &priv->cmq.crq;
641 dma_addr_t dma = ring->desc_dma_addr;
642
643 if (ring_type == TYPE_CSQ) {
644 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
645 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
646 upper_32_bits(dma));
647 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
648 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
649 HNS_ROCE_CMQ_ENABLE);
650 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
651 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
652 } else {
653 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
654 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
655 upper_32_bits(dma));
656 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
657 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
658 HNS_ROCE_CMQ_ENABLE);
659 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
660 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
661 }
662 }
663
664 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
665 {
666 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
667 int ret;
668
669 /* Setup the queue entries for command queue */
670 priv->cmq.csq.desc_num = 1024;
671 priv->cmq.crq.desc_num = 1024;
672
673 /* Setup the lock for command queue */
674 spin_lock_init(&priv->cmq.csq.lock);
675 spin_lock_init(&priv->cmq.crq.lock);
676
677 /* Setup Tx write back timeout */
678 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
679
680 /* Init CSQ */
681 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
682 if (ret) {
683 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
684 return ret;
685 }
686
687 /* Init CRQ */
688 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
689 if (ret) {
690 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
691 goto err_crq;
692 }
693
694 /* Init CSQ REG */
695 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
696
697 /* Init CRQ REG */
698 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
699
700 return 0;
701
702 err_crq:
703 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
704
705 return ret;
706 }
707
708 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
709 {
710 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
711
712 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
713 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
714 }
715
716 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
717 enum hns_roce_opcode_type opcode,
718 bool is_read)
719 {
720 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
721 desc->opcode = cpu_to_le16(opcode);
722 desc->flag =
723 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
724 if (is_read)
725 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
726 else
727 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
728 }
729
730 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
731 {
732 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
733 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
734
735 return head == priv->cmq.csq.next_to_use;
736 }
737
738 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
739 {
740 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
741 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
742 struct hns_roce_cmq_desc *desc;
743 u16 ntc = csq->next_to_clean;
744 u32 head;
745 int clean = 0;
746
747 desc = &csq->desc[ntc];
748 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
749 while (head != ntc) {
750 memset(desc, 0, sizeof(*desc));
751 ntc++;
752 if (ntc == csq->desc_num)
753 ntc = 0;
754 desc = &csq->desc[ntc];
755 clean++;
756 }
757 csq->next_to_clean = ntc;
758
759 return clean;
760 }
761
762 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
763 struct hns_roce_cmq_desc *desc, int num)
764 {
765 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
766 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
767 struct hns_roce_cmq_desc *desc_to_use;
768 bool complete = false;
769 u32 timeout = 0;
770 int handle = 0;
771 u16 desc_ret;
772 int ret = 0;
773 int ntc;
774
775 spin_lock_bh(&csq->lock);
776
777 if (num > hns_roce_cmq_space(csq)) {
778 spin_unlock_bh(&csq->lock);
779 return -EBUSY;
780 }
781
782 /*
783 * Record the location of desc in the cmq for this time
784 * which will be use for hardware to write back
785 */
786 ntc = csq->next_to_use;
787
788 while (handle < num) {
789 desc_to_use = &csq->desc[csq->next_to_use];
790 *desc_to_use = desc[handle];
791 dev_dbg(hr_dev->dev, "set cmq desc:\n");
792 csq->next_to_use++;
793 if (csq->next_to_use == csq->desc_num)
794 csq->next_to_use = 0;
795 handle++;
796 }
797
798 /* Write to hardware */
799 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
800
801 /*
802 * If the command is sync, wait for the firmware to write back,
803 * if multi descriptors to be sent, use the first one to check
804 */
805 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
806 do {
807 if (hns_roce_cmq_csq_done(hr_dev))
808 break;
809 udelay(1);
810 timeout++;
811 } while (timeout < priv->cmq.tx_timeout);
812 }
813
814 if (hns_roce_cmq_csq_done(hr_dev)) {
815 complete = true;
816 handle = 0;
817 while (handle < num) {
818 /* get the result of hardware write back */
819 desc_to_use = &csq->desc[ntc];
820 desc[handle] = *desc_to_use;
821 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
822 desc_ret = desc[handle].retval;
823 if (desc_ret == CMD_EXEC_SUCCESS)
824 ret = 0;
825 else
826 ret = -EIO;
827 priv->cmq.last_status = desc_ret;
828 ntc++;
829 handle++;
830 if (ntc == csq->desc_num)
831 ntc = 0;
832 }
833 }
834
835 if (!complete)
836 ret = -EAGAIN;
837
838 /* clean the command send queue */
839 handle = hns_roce_cmq_csq_clean(hr_dev);
840 if (handle != num)
841 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
842 handle, num);
843
844 spin_unlock_bh(&csq->lock);
845
846 return ret;
847 }
848
849 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
850 {
851 struct hns_roce_query_version *resp;
852 struct hns_roce_cmq_desc desc;
853 int ret;
854
855 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
856 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
857 if (ret)
858 return ret;
859
860 resp = (struct hns_roce_query_version *)desc.data;
861 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
862 hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
863
864 return 0;
865 }
866
867 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
868 {
869 struct hns_roce_cfg_global_param *req;
870 struct hns_roce_cmq_desc desc;
871
872 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
873 false);
874
875 req = (struct hns_roce_cfg_global_param *)desc.data;
876 memset(req, 0, sizeof(*req));
877 roce_set_field(req->time_cfg_udp_port,
878 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
879 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
880 roce_set_field(req->time_cfg_udp_port,
881 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
882 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
883
884 return hns_roce_cmq_send(hr_dev, &desc, 1);
885 }
886
887 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
888 {
889 struct hns_roce_cmq_desc desc[2];
890 struct hns_roce_pf_res *res;
891 int ret;
892 int i;
893
894 for (i = 0; i < 2; i++) {
895 hns_roce_cmq_setup_basic_desc(&desc[i],
896 HNS_ROCE_OPC_QUERY_PF_RES, true);
897
898 if (i == 0)
899 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
900 else
901 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
902 }
903
904 ret = hns_roce_cmq_send(hr_dev, desc, 2);
905 if (ret)
906 return ret;
907
908 res = (struct hns_roce_pf_res *)desc[0].data;
909
910 hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
911 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
912 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
913 hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
914 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
915 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
916 hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
917 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
918 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
919 hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
920 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
921 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
922
923 return 0;
924 }
925
926 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
927 {
928 struct hns_roce_cmq_desc desc[2];
929 struct hns_roce_vf_res_a *req_a;
930 struct hns_roce_vf_res_b *req_b;
931 int i;
932
933 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
934 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
935 memset(req_a, 0, sizeof(*req_a));
936 memset(req_b, 0, sizeof(*req_b));
937 for (i = 0; i < 2; i++) {
938 hns_roce_cmq_setup_basic_desc(&desc[i],
939 HNS_ROCE_OPC_ALLOC_VF_RES, false);
940
941 if (i == 0)
942 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
943 else
944 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
945
946 if (i == 0) {
947 roce_set_field(req_a->vf_qpc_bt_idx_num,
948 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
949 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
950 roce_set_field(req_a->vf_qpc_bt_idx_num,
951 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
952 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
953 HNS_ROCE_VF_QPC_BT_NUM);
954
955 roce_set_field(req_a->vf_srqc_bt_idx_num,
956 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
957 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
958 roce_set_field(req_a->vf_srqc_bt_idx_num,
959 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
960 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
961 HNS_ROCE_VF_SRQC_BT_NUM);
962
963 roce_set_field(req_a->vf_cqc_bt_idx_num,
964 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
965 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
966 roce_set_field(req_a->vf_cqc_bt_idx_num,
967 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
968 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
969 HNS_ROCE_VF_CQC_BT_NUM);
970
971 roce_set_field(req_a->vf_mpt_bt_idx_num,
972 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
973 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
974 roce_set_field(req_a->vf_mpt_bt_idx_num,
975 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
976 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
977 HNS_ROCE_VF_MPT_BT_NUM);
978
979 roce_set_field(req_a->vf_eqc_bt_idx_num,
980 VF_RES_A_DATA_5_VF_EQC_IDX_M,
981 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
982 roce_set_field(req_a->vf_eqc_bt_idx_num,
983 VF_RES_A_DATA_5_VF_EQC_NUM_M,
984 VF_RES_A_DATA_5_VF_EQC_NUM_S,
985 HNS_ROCE_VF_EQC_NUM);
986 } else {
987 roce_set_field(req_b->vf_smac_idx_num,
988 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
989 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
990 roce_set_field(req_b->vf_smac_idx_num,
991 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
992 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
993 HNS_ROCE_VF_SMAC_NUM);
994
995 roce_set_field(req_b->vf_sgid_idx_num,
996 VF_RES_B_DATA_2_VF_SGID_IDX_M,
997 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
998 roce_set_field(req_b->vf_sgid_idx_num,
999 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1000 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1001 HNS_ROCE_VF_SGID_NUM);
1002
1003 roce_set_field(req_b->vf_qid_idx_sl_num,
1004 VF_RES_B_DATA_3_VF_QID_IDX_M,
1005 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1006 roce_set_field(req_b->vf_qid_idx_sl_num,
1007 VF_RES_B_DATA_3_VF_SL_NUM_M,
1008 VF_RES_B_DATA_3_VF_SL_NUM_S,
1009 HNS_ROCE_VF_SL_NUM);
1010 }
1011 }
1012
1013 return hns_roce_cmq_send(hr_dev, desc, 2);
1014 }
1015
1016 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1017 {
1018 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1019 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1020 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1021 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1022 struct hns_roce_cfg_bt_attr *req;
1023 struct hns_roce_cmq_desc desc;
1024
1025 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1026 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1027 memset(req, 0, sizeof(*req));
1028
1029 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1030 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1031 hr_dev->caps.qpc_ba_pg_sz);
1032 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1033 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1034 hr_dev->caps.qpc_buf_pg_sz);
1035 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1036 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1037 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1038
1039 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1040 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1041 hr_dev->caps.srqc_ba_pg_sz);
1042 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1043 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1044 hr_dev->caps.srqc_buf_pg_sz);
1045 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1046 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1047 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1048
1049 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1050 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1051 hr_dev->caps.cqc_ba_pg_sz);
1052 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1053 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1054 hr_dev->caps.cqc_buf_pg_sz);
1055 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1056 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1057 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1058
1059 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1060 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1061 hr_dev->caps.mpt_ba_pg_sz);
1062 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1063 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1064 hr_dev->caps.mpt_buf_pg_sz);
1065 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1066 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1067 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1068
1069 return hns_roce_cmq_send(hr_dev, &desc, 1);
1070 }
1071
1072 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1073 {
1074 struct hns_roce_caps *caps = &hr_dev->caps;
1075 int ret;
1076
1077 ret = hns_roce_cmq_query_hw_info(hr_dev);
1078 if (ret) {
1079 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1080 ret);
1081 return ret;
1082 }
1083
1084 ret = hns_roce_config_global_param(hr_dev);
1085 if (ret) {
1086 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1087 ret);
1088 return ret;
1089 }
1090
1091 /* Get pf resource owned by every pf */
1092 ret = hns_roce_query_pf_resource(hr_dev);
1093 if (ret) {
1094 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1095 ret);
1096 return ret;
1097 }
1098
1099 ret = hns_roce_alloc_vf_resource(hr_dev);
1100 if (ret) {
1101 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1102 ret);
1103 return ret;
1104 }
1105
1106 hr_dev->vendor_part_id = 0;
1107 hr_dev->sys_image_guid = 0;
1108
1109 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1110 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1111 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1112 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1113 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1114 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1115 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1116 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1117 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1118 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1119 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1120 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1121 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1122 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1123 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1124 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1125 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1126 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1127 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1128 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1129 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1130 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1131 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1132 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1133 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1134 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1135 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1136 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1137 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1138 caps->reserved_lkey = 0;
1139 caps->reserved_pds = 0;
1140 caps->reserved_mrws = 1;
1141 caps->reserved_uars = 0;
1142 caps->reserved_cqs = 0;
1143
1144 caps->qpc_ba_pg_sz = 0;
1145 caps->qpc_buf_pg_sz = 0;
1146 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1147 caps->srqc_ba_pg_sz = 0;
1148 caps->srqc_buf_pg_sz = 0;
1149 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
1150 caps->cqc_ba_pg_sz = 0;
1151 caps->cqc_buf_pg_sz = 0;
1152 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1153 caps->mpt_ba_pg_sz = 0;
1154 caps->mpt_buf_pg_sz = 0;
1155 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1156 caps->pbl_ba_pg_sz = 0;
1157 caps->pbl_buf_pg_sz = 0;
1158 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1159 caps->mtt_ba_pg_sz = 0;
1160 caps->mtt_buf_pg_sz = 0;
1161 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1162 caps->cqe_ba_pg_sz = 0;
1163 caps->cqe_buf_pg_sz = 0;
1164 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1165 caps->eqe_ba_pg_sz = 0;
1166 caps->eqe_buf_pg_sz = 0;
1167 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1168 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1169
1170 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1171 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1172 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1173 HNS_ROCE_CAP_FLAG_RECORD_DB;
1174 caps->pkey_table_len[0] = 1;
1175 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1176 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1177 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1178 caps->local_ca_ack_delay = 0;
1179 caps->max_mtu = IB_MTU_4096;
1180
1181 ret = hns_roce_v2_set_bt(hr_dev);
1182 if (ret)
1183 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1184 ret);
1185
1186 return ret;
1187 }
1188
1189 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1190 {
1191 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1192
1193 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1194 }
1195
1196 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1197 {
1198 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1199
1200 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1201 }
1202
1203 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1204 u64 out_param, u32 in_modifier, u8 op_modifier,
1205 u16 op, u16 token, int event)
1206 {
1207 struct device *dev = hr_dev->dev;
1208 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
1209 ROCEE_VF_MB_CFG0_REG);
1210 unsigned long end;
1211 u32 val0 = 0;
1212 u32 val1 = 0;
1213
1214 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1215 while (hns_roce_v2_cmd_pending(hr_dev)) {
1216 if (time_after(jiffies, end)) {
1217 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1218 (int)end);
1219 return -EAGAIN;
1220 }
1221 cond_resched();
1222 }
1223
1224 roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
1225 HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
1226 roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
1227 HNS_ROCE_VF_MB4_CMD_SHIFT, op);
1228 roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
1229 HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
1230 roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
1231 HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
1232
1233 writeq(in_param, hcr + 0);
1234 writeq(out_param, hcr + 2);
1235
1236 /* Memory barrier */
1237 wmb();
1238
1239 writel(val0, hcr + 4);
1240 writel(val1, hcr + 5);
1241
1242 mmiowb();
1243
1244 return 0;
1245 }
1246
1247 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1248 unsigned long timeout)
1249 {
1250 struct device *dev = hr_dev->dev;
1251 unsigned long end = 0;
1252 u32 status;
1253
1254 end = msecs_to_jiffies(timeout) + jiffies;
1255 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1256 cond_resched();
1257
1258 if (hns_roce_v2_cmd_pending(hr_dev)) {
1259 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1260 return -ETIMEDOUT;
1261 }
1262
1263 status = hns_roce_v2_cmd_complete(hr_dev);
1264 if (status != 0x1) {
1265 dev_err(dev, "mailbox status 0x%x!\n", status);
1266 return -EBUSY;
1267 }
1268
1269 return 0;
1270 }
1271
1272 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1273 int gid_index, union ib_gid *gid,
1274 const struct ib_gid_attr *attr)
1275 {
1276 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
1277 u32 *p;
1278 u32 val;
1279
1280 if (!gid || !attr)
1281 return -EINVAL;
1282
1283 if (attr->gid_type == IB_GID_TYPE_ROCE)
1284 sgid_type = GID_TYPE_FLAG_ROCE_V1;
1285
1286 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1287 if (ipv6_addr_v4mapped((void *)gid))
1288 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1289 else
1290 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1291 }
1292
1293 p = (u32 *)&gid->raw[0];
1294 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
1295 0x20 * gid_index);
1296
1297 p = (u32 *)&gid->raw[4];
1298 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
1299 0x20 * gid_index);
1300
1301 p = (u32 *)&gid->raw[8];
1302 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
1303 0x20 * gid_index);
1304
1305 p = (u32 *)&gid->raw[0xc];
1306 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
1307 0x20 * gid_index);
1308
1309 val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
1310 roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
1311 ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
1312
1313 roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
1314
1315 return 0;
1316 }
1317
1318 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1319 u8 *addr)
1320 {
1321 u16 reg_smac_h;
1322 u32 reg_smac_l;
1323 u32 val;
1324
1325 reg_smac_l = *(u32 *)(&addr[0]);
1326 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
1327 0x08 * phy_port);
1328 val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
1329
1330 reg_smac_h = *(u16 *)(&addr[4]);
1331 roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
1332 ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
1333 roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
1334
1335 return 0;
1336 }
1337
1338 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1339 unsigned long mtpt_idx)
1340 {
1341 struct hns_roce_v2_mpt_entry *mpt_entry;
1342 struct scatterlist *sg;
1343 u64 page_addr;
1344 u64 *pages;
1345 int i, j;
1346 int len;
1347 int entry;
1348
1349 mpt_entry = mb_buf;
1350 memset(mpt_entry, 0, sizeof(*mpt_entry));
1351
1352 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1353 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1354 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1355 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1356 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1357 roce_set_field(mpt_entry->byte_4_pd_hop_st,
1358 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1359 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
1360 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1361 V2_MPT_BYTE_4_PD_S, mr->pd);
1362 mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
1363
1364 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1365 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1366 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
1367 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1368 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1369 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
1370 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1371 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1372 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1373 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1374 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1375 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1376 mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
1377
1378 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1379 mr->type == MR_TYPE_MR ? 0 : 1);
1380 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1381
1382 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1383 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1384 mpt_entry->lkey = cpu_to_le32(mr->key);
1385 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1386 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1387
1388 if (mr->type == MR_TYPE_DMA)
1389 return 0;
1390
1391 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1392
1393 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1394 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1395 V2_MPT_BYTE_48_PBL_BA_H_S,
1396 upper_32_bits(mr->pbl_ba >> 3));
1397 mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
1398
1399 pages = (u64 *)__get_free_page(GFP_KERNEL);
1400 if (!pages)
1401 return -ENOMEM;
1402
1403 i = 0;
1404 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1405 len = sg_dma_len(sg) >> PAGE_SHIFT;
1406 for (j = 0; j < len; ++j) {
1407 page_addr = sg_dma_address(sg) +
1408 (j << mr->umem->page_shift);
1409 pages[i] = page_addr >> 6;
1410
1411 /* Record the first 2 entry directly to MTPT table */
1412 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1413 goto found;
1414 i++;
1415 }
1416 }
1417
1418 found:
1419 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1420 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1421 V2_MPT_BYTE_56_PA0_H_S,
1422 upper_32_bits(pages[0]));
1423 mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
1424
1425 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1426 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1427 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1428
1429 free_page((unsigned long)pages);
1430
1431 roce_set_field(mpt_entry->byte_64_buf_pa1,
1432 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1433 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
1434 mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
1435
1436 return 0;
1437 }
1438
1439 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1440 struct hns_roce_mr *mr, int flags,
1441 u32 pdn, int mr_access_flags, u64 iova,
1442 u64 size, void *mb_buf)
1443 {
1444 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1445
1446 if (flags & IB_MR_REREG_PD) {
1447 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1448 V2_MPT_BYTE_4_PD_S, pdn);
1449 mr->pd = pdn;
1450 }
1451
1452 if (flags & IB_MR_REREG_ACCESS) {
1453 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1454 V2_MPT_BYTE_8_BIND_EN_S,
1455 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1456 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1457 V2_MPT_BYTE_8_ATOMIC_EN_S,
1458 (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
1459 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1460 (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
1461 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1462 (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1463 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1464 (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1465 }
1466
1467 if (flags & IB_MR_REREG_TRANS) {
1468 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1469 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1470 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1471 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1472
1473 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1474 mpt_entry->pbl_ba_l =
1475 cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1476 roce_set_field(mpt_entry->byte_48_mode_ba,
1477 V2_MPT_BYTE_48_PBL_BA_H_M,
1478 V2_MPT_BYTE_48_PBL_BA_H_S,
1479 upper_32_bits(mr->pbl_ba >> 3));
1480 mpt_entry->byte_48_mode_ba =
1481 cpu_to_le32(mpt_entry->byte_48_mode_ba);
1482
1483 mr->iova = iova;
1484 mr->size = size;
1485 }
1486
1487 return 0;
1488 }
1489
1490 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1491 {
1492 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1493 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1494 }
1495
1496 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1497 {
1498 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1499
1500 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1501 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
1502 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
1503 }
1504
1505 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
1506 {
1507 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
1508 }
1509
1510 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1511 {
1512 *hr_cq->set_ci_db = cons_index & 0xffffff;
1513 }
1514
1515 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1516 struct hns_roce_srq *srq)
1517 {
1518 struct hns_roce_v2_cqe *cqe, *dest;
1519 u32 prod_index;
1520 int nfreed = 0;
1521 u8 owner_bit;
1522
1523 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
1524 ++prod_index) {
1525 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1526 break;
1527 }
1528
1529 /*
1530 * Now backwards through the CQ, removing CQ entries
1531 * that match our QP by overwriting them with next entries.
1532 */
1533 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1534 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1535 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1536 V2_CQE_BYTE_16_LCL_QPN_S) &
1537 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
1538 /* In v1 engine, not support SRQ */
1539 ++nfreed;
1540 } else if (nfreed) {
1541 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
1542 hr_cq->ib_cq.cqe);
1543 owner_bit = roce_get_bit(dest->byte_4,
1544 V2_CQE_BYTE_4_OWNER_S);
1545 memcpy(dest, cqe, sizeof(*cqe));
1546 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
1547 owner_bit);
1548 }
1549 }
1550
1551 if (nfreed) {
1552 hr_cq->cons_index += nfreed;
1553 /*
1554 * Make sure update of buffer contents is done before
1555 * updating consumer index.
1556 */
1557 wmb();
1558 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1559 }
1560 }
1561
1562 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1563 struct hns_roce_srq *srq)
1564 {
1565 spin_lock_irq(&hr_cq->lock);
1566 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
1567 spin_unlock_irq(&hr_cq->lock);
1568 }
1569
1570 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
1571 struct hns_roce_cq *hr_cq, void *mb_buf,
1572 u64 *mtts, dma_addr_t dma_handle, int nent,
1573 u32 vector)
1574 {
1575 struct hns_roce_v2_cq_context *cq_context;
1576
1577 cq_context = mb_buf;
1578 memset(cq_context, 0, sizeof(*cq_context));
1579
1580 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
1581 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
1582 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
1583 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
1584 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
1585 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
1586 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
1587 V2_CQC_BYTE_4_CEQN_S, vector);
1588 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
1589
1590 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
1591 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
1592
1593 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
1594 cq_context->cqe_cur_blk_addr =
1595 cpu_to_le32(cq_context->cqe_cur_blk_addr);
1596
1597 roce_set_field(cq_context->byte_16_hop_addr,
1598 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
1599 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
1600 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
1601 roce_set_field(cq_context->byte_16_hop_addr,
1602 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
1603 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
1604 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
1605
1606 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
1607 roce_set_field(cq_context->byte_24_pgsz_addr,
1608 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
1609 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
1610 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
1611 roce_set_field(cq_context->byte_24_pgsz_addr,
1612 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
1613 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
1614 hr_dev->caps.cqe_ba_pg_sz);
1615 roce_set_field(cq_context->byte_24_pgsz_addr,
1616 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
1617 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
1618 hr_dev->caps.cqe_buf_pg_sz);
1619
1620 cq_context->cqe_ba = (u32)(dma_handle >> 3);
1621
1622 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
1623 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
1624
1625 if (hr_cq->db_en)
1626 roce_set_bit(cq_context->byte_44_db_record,
1627 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
1628
1629 roce_set_field(cq_context->byte_44_db_record,
1630 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
1631 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
1632 ((u32)hr_cq->db.dma) >> 1);
1633 cq_context->db_record_addr = hr_cq->db.dma >> 32;
1634
1635 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1636 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
1637 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
1638 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
1639 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1640 V2_CQC_BYTE_56_CQ_PERIOD_M,
1641 V2_CQC_BYTE_56_CQ_PERIOD_S,
1642 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
1643 }
1644
1645 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
1646 enum ib_cq_notify_flags flags)
1647 {
1648 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1649 u32 notification_flag;
1650 u32 doorbell[2];
1651
1652 doorbell[0] = 0;
1653 doorbell[1] = 0;
1654
1655 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
1656 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
1657 /*
1658 * flags = 0; Notification Flag = 1, next
1659 * flags = 1; Notification Flag = 0, solocited
1660 */
1661 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
1662 hr_cq->cqn);
1663 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
1664 HNS_ROCE_V2_CQ_DB_NTR);
1665 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
1666 V2_CQ_DB_PARAMETER_CONS_IDX_S,
1667 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
1668 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
1669 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
1670 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
1671 notification_flag);
1672
1673 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1674
1675 return 0;
1676 }
1677
1678 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
1679 struct hns_roce_qp **cur_qp,
1680 struct ib_wc *wc)
1681 {
1682 struct hns_roce_rinl_sge *sge_list;
1683 u32 wr_num, wr_cnt, sge_num;
1684 u32 sge_cnt, data_len, size;
1685 void *wqe_buf;
1686
1687 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
1688 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
1689 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
1690
1691 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
1692 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
1693 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
1694 data_len = wc->byte_len;
1695
1696 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
1697 size = min(sge_list[sge_cnt].len, data_len);
1698 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
1699
1700 data_len -= size;
1701 wqe_buf += size;
1702 }
1703
1704 if (data_len) {
1705 wc->status = IB_WC_LOC_LEN_ERR;
1706 return -EAGAIN;
1707 }
1708
1709 return 0;
1710 }
1711
1712 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1713 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
1714 {
1715 struct hns_roce_dev *hr_dev;
1716 struct hns_roce_v2_cqe *cqe;
1717 struct hns_roce_qp *hr_qp;
1718 struct hns_roce_wq *wq;
1719 int is_send;
1720 u16 wqe_ctr;
1721 u32 opcode;
1722 u32 status;
1723 int qpn;
1724 int ret;
1725
1726 /* Find cqe according to consumer index */
1727 cqe = next_cqe_sw_v2(hr_cq);
1728 if (!cqe)
1729 return -EAGAIN;
1730
1731 ++hr_cq->cons_index;
1732 /* Memory barrier */
1733 rmb();
1734
1735 /* 0->SQ, 1->RQ */
1736 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
1737
1738 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1739 V2_CQE_BYTE_16_LCL_QPN_S);
1740
1741 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
1742 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
1743 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
1744 if (unlikely(!hr_qp)) {
1745 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
1746 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
1747 return -EINVAL;
1748 }
1749 *cur_qp = hr_qp;
1750 }
1751
1752 wc->qp = &(*cur_qp)->ibqp;
1753 wc->vendor_err = 0;
1754
1755 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
1756 V2_CQE_BYTE_4_STATUS_S);
1757 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
1758 case HNS_ROCE_CQE_V2_SUCCESS:
1759 wc->status = IB_WC_SUCCESS;
1760 break;
1761 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
1762 wc->status = IB_WC_LOC_LEN_ERR;
1763 break;
1764 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
1765 wc->status = IB_WC_LOC_QP_OP_ERR;
1766 break;
1767 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
1768 wc->status = IB_WC_LOC_PROT_ERR;
1769 break;
1770 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
1771 wc->status = IB_WC_WR_FLUSH_ERR;
1772 break;
1773 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
1774 wc->status = IB_WC_MW_BIND_ERR;
1775 break;
1776 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
1777 wc->status = IB_WC_BAD_RESP_ERR;
1778 break;
1779 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
1780 wc->status = IB_WC_LOC_ACCESS_ERR;
1781 break;
1782 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
1783 wc->status = IB_WC_REM_INV_REQ_ERR;
1784 break;
1785 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
1786 wc->status = IB_WC_REM_ACCESS_ERR;
1787 break;
1788 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
1789 wc->status = IB_WC_REM_OP_ERR;
1790 break;
1791 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
1792 wc->status = IB_WC_RETRY_EXC_ERR;
1793 break;
1794 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
1795 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
1796 break;
1797 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
1798 wc->status = IB_WC_REM_ABORT_ERR;
1799 break;
1800 default:
1801 wc->status = IB_WC_GENERAL_ERR;
1802 break;
1803 }
1804
1805 /* CQE status error, directly return */
1806 if (wc->status != IB_WC_SUCCESS)
1807 return 0;
1808
1809 if (is_send) {
1810 wc->wc_flags = 0;
1811 /* SQ corresponding to CQE */
1812 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
1813 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
1814 case HNS_ROCE_SQ_OPCODE_SEND:
1815 wc->opcode = IB_WC_SEND;
1816 break;
1817 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
1818 wc->opcode = IB_WC_SEND;
1819 break;
1820 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
1821 wc->opcode = IB_WC_SEND;
1822 wc->wc_flags |= IB_WC_WITH_IMM;
1823 break;
1824 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
1825 wc->opcode = IB_WC_RDMA_READ;
1826 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
1827 break;
1828 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
1829 wc->opcode = IB_WC_RDMA_WRITE;
1830 break;
1831 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
1832 wc->opcode = IB_WC_RDMA_WRITE;
1833 wc->wc_flags |= IB_WC_WITH_IMM;
1834 break;
1835 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
1836 wc->opcode = IB_WC_LOCAL_INV;
1837 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
1838 break;
1839 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
1840 wc->opcode = IB_WC_COMP_SWAP;
1841 wc->byte_len = 8;
1842 break;
1843 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
1844 wc->opcode = IB_WC_FETCH_ADD;
1845 wc->byte_len = 8;
1846 break;
1847 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
1848 wc->opcode = IB_WC_MASKED_COMP_SWAP;
1849 wc->byte_len = 8;
1850 break;
1851 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
1852 wc->opcode = IB_WC_MASKED_FETCH_ADD;
1853 wc->byte_len = 8;
1854 break;
1855 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
1856 wc->opcode = IB_WC_REG_MR;
1857 break;
1858 case HNS_ROCE_SQ_OPCODE_BIND_MW:
1859 wc->opcode = IB_WC_REG_MR;
1860 break;
1861 default:
1862 wc->status = IB_WC_GENERAL_ERR;
1863 break;
1864 }
1865
1866 wq = &(*cur_qp)->sq;
1867 if ((*cur_qp)->sq_signal_bits) {
1868 /*
1869 * If sg_signal_bit is 1,
1870 * firstly tail pointer updated to wqe
1871 * which current cqe correspond to
1872 */
1873 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
1874 V2_CQE_BYTE_4_WQE_INDX_M,
1875 V2_CQE_BYTE_4_WQE_INDX_S);
1876 wq->tail += (wqe_ctr - (u16)wq->tail) &
1877 (wq->wqe_cnt - 1);
1878 }
1879
1880 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1881 ++wq->tail;
1882 } else {
1883 /* RQ correspond to CQE */
1884 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
1885
1886 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
1887 V2_CQE_BYTE_4_OPCODE_S);
1888 switch (opcode & 0x1f) {
1889 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
1890 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
1891 wc->wc_flags = IB_WC_WITH_IMM;
1892 wc->ex.imm_data = cqe->immtdata;
1893 break;
1894 case HNS_ROCE_V2_OPCODE_SEND:
1895 wc->opcode = IB_WC_RECV;
1896 wc->wc_flags = 0;
1897 break;
1898 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
1899 wc->opcode = IB_WC_RECV;
1900 wc->wc_flags = IB_WC_WITH_IMM;
1901 wc->ex.imm_data = cqe->immtdata;
1902 break;
1903 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
1904 wc->opcode = IB_WC_RECV;
1905 wc->wc_flags = IB_WC_WITH_INVALIDATE;
1906 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
1907 break;
1908 default:
1909 wc->status = IB_WC_GENERAL_ERR;
1910 break;
1911 }
1912
1913 if ((wc->qp->qp_type == IB_QPT_RC ||
1914 wc->qp->qp_type == IB_QPT_UC) &&
1915 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
1916 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
1917 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
1918 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
1919 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
1920 if (ret)
1921 return -EAGAIN;
1922 }
1923
1924 /* Update tail pointer, record wr_id */
1925 wq = &(*cur_qp)->rq;
1926 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1927 ++wq->tail;
1928
1929 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
1930 V2_CQE_BYTE_32_SL_S);
1931 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
1932 V2_CQE_BYTE_32_RMT_QPN_M,
1933 V2_CQE_BYTE_32_RMT_QPN_S);
1934 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
1935 V2_CQE_BYTE_32_GRH_S) ?
1936 IB_WC_GRH : 0);
1937 wc->port_num = roce_get_field(cqe->byte_32,
1938 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
1939 wc->pkey_index = 0;
1940 memcpy(wc->smac, cqe->smac, 4);
1941 wc->smac[4] = roce_get_field(cqe->byte_28,
1942 V2_CQE_BYTE_28_SMAC_4_M,
1943 V2_CQE_BYTE_28_SMAC_4_S);
1944 wc->smac[5] = roce_get_field(cqe->byte_28,
1945 V2_CQE_BYTE_28_SMAC_5_M,
1946 V2_CQE_BYTE_28_SMAC_5_S);
1947 wc->vlan_id = 0xffff;
1948 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
1949 wc->network_hdr_type = roce_get_field(cqe->byte_28,
1950 V2_CQE_BYTE_28_PORT_TYPE_M,
1951 V2_CQE_BYTE_28_PORT_TYPE_S);
1952 }
1953
1954 return 0;
1955 }
1956
1957 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
1958 struct ib_wc *wc)
1959 {
1960 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1961 struct hns_roce_qp *cur_qp = NULL;
1962 unsigned long flags;
1963 int npolled;
1964
1965 spin_lock_irqsave(&hr_cq->lock, flags);
1966
1967 for (npolled = 0; npolled < num_entries; ++npolled) {
1968 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
1969 break;
1970 }
1971
1972 if (npolled) {
1973 /* Memory barrier */
1974 wmb();
1975 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1976 }
1977
1978 spin_unlock_irqrestore(&hr_cq->lock, flags);
1979
1980 return npolled;
1981 }
1982
1983 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
1984 struct hns_roce_hem_table *table, int obj,
1985 int step_idx)
1986 {
1987 struct device *dev = hr_dev->dev;
1988 struct hns_roce_cmd_mailbox *mailbox;
1989 struct hns_roce_hem_iter iter;
1990 struct hns_roce_hem_mhop mhop;
1991 struct hns_roce_hem *hem;
1992 unsigned long mhop_obj = obj;
1993 int i, j, k;
1994 int ret = 0;
1995 u64 hem_idx = 0;
1996 u64 l1_idx = 0;
1997 u64 bt_ba = 0;
1998 u32 chunk_ba_num;
1999 u32 hop_num;
2000 u16 op = 0xff;
2001
2002 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2003 return 0;
2004
2005 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2006 i = mhop.l0_idx;
2007 j = mhop.l1_idx;
2008 k = mhop.l2_idx;
2009 hop_num = mhop.hop_num;
2010 chunk_ba_num = mhop.bt_chunk_size / 8;
2011
2012 if (hop_num == 2) {
2013 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2014 k;
2015 l1_idx = i * chunk_ba_num + j;
2016 } else if (hop_num == 1) {
2017 hem_idx = i * chunk_ba_num + j;
2018 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2019 hem_idx = i;
2020 }
2021
2022 switch (table->type) {
2023 case HEM_TYPE_QPC:
2024 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2025 break;
2026 case HEM_TYPE_MTPT:
2027 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2028 break;
2029 case HEM_TYPE_CQC:
2030 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2031 break;
2032 case HEM_TYPE_SRQC:
2033 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2034 break;
2035 default:
2036 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2037 table->type);
2038 return 0;
2039 }
2040 op += step_idx;
2041
2042 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2043 if (IS_ERR(mailbox))
2044 return PTR_ERR(mailbox);
2045
2046 if (check_whether_last_step(hop_num, step_idx)) {
2047 hem = table->hem[hem_idx];
2048 for (hns_roce_hem_first(hem, &iter);
2049 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2050 bt_ba = hns_roce_hem_addr(&iter);
2051
2052 /* configure the ba, tag, and op */
2053 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2054 obj, 0, op,
2055 HNS_ROCE_CMD_TIMEOUT_MSECS);
2056 }
2057 } else {
2058 if (step_idx == 0)
2059 bt_ba = table->bt_l0_dma_addr[i];
2060 else if (step_idx == 1 && hop_num == 2)
2061 bt_ba = table->bt_l1_dma_addr[l1_idx];
2062
2063 /* configure the ba, tag, and op */
2064 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2065 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2066 }
2067
2068 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2069 return ret;
2070 }
2071
2072 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2073 struct hns_roce_hem_table *table, int obj,
2074 int step_idx)
2075 {
2076 struct device *dev = hr_dev->dev;
2077 struct hns_roce_cmd_mailbox *mailbox;
2078 int ret = 0;
2079 u16 op = 0xff;
2080
2081 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2082 return 0;
2083
2084 switch (table->type) {
2085 case HEM_TYPE_QPC:
2086 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2087 break;
2088 case HEM_TYPE_MTPT:
2089 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2090 break;
2091 case HEM_TYPE_CQC:
2092 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2093 break;
2094 case HEM_TYPE_SRQC:
2095 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2096 break;
2097 default:
2098 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2099 table->type);
2100 return 0;
2101 }
2102 op += step_idx;
2103
2104 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2105 if (IS_ERR(mailbox))
2106 return PTR_ERR(mailbox);
2107
2108 /* configure the tag and op */
2109 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2110 HNS_ROCE_CMD_TIMEOUT_MSECS);
2111
2112 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2113 return ret;
2114 }
2115
2116 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2117 struct hns_roce_mtt *mtt,
2118 enum ib_qp_state cur_state,
2119 enum ib_qp_state new_state,
2120 struct hns_roce_v2_qp_context *context,
2121 struct hns_roce_qp *hr_qp)
2122 {
2123 struct hns_roce_cmd_mailbox *mailbox;
2124 int ret;
2125
2126 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2127 if (IS_ERR(mailbox))
2128 return PTR_ERR(mailbox);
2129
2130 memcpy(mailbox->buf, context, sizeof(*context) * 2);
2131
2132 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2133 HNS_ROCE_CMD_MODIFY_QPC,
2134 HNS_ROCE_CMD_TIMEOUT_MSECS);
2135
2136 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2137
2138 return ret;
2139 }
2140
2141 static void set_access_flags(struct hns_roce_qp *hr_qp,
2142 struct hns_roce_v2_qp_context *context,
2143 struct hns_roce_v2_qp_context *qpc_mask,
2144 const struct ib_qp_attr *attr, int attr_mask)
2145 {
2146 u8 dest_rd_atomic;
2147 u32 access_flags;
2148
2149 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
2150 attr->max_dest_rd_atomic : hr_qp->resp_depth;
2151
2152 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
2153 attr->qp_access_flags : hr_qp->atomic_rd_en;
2154
2155 if (!dest_rd_atomic)
2156 access_flags &= IB_ACCESS_REMOTE_WRITE;
2157
2158 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2159 !!(access_flags & IB_ACCESS_REMOTE_READ));
2160 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2161
2162 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2163 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2164 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2165
2166 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2167 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2168 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2169 }
2170
2171 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2172 const struct ib_qp_attr *attr,
2173 int attr_mask,
2174 struct hns_roce_v2_qp_context *context,
2175 struct hns_roce_v2_qp_context *qpc_mask)
2176 {
2177 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2178 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2179
2180 /*
2181 * In v2 engine, software pass context and context mask to hardware
2182 * when modifying qp. If software need modify some fields in context,
2183 * we should set all bits of the relevant fields in context mask to
2184 * 0 at the same time, else set them to 0x1.
2185 */
2186 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2187 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2188 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2189 V2_QPC_BYTE_4_TST_S, 0);
2190
2191 if (ibqp->qp_type == IB_QPT_GSI)
2192 roce_set_field(context->byte_4_sqpn_tst,
2193 V2_QPC_BYTE_4_SGE_SHIFT_M,
2194 V2_QPC_BYTE_4_SGE_SHIFT_S,
2195 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2196 else
2197 roce_set_field(context->byte_4_sqpn_tst,
2198 V2_QPC_BYTE_4_SGE_SHIFT_M,
2199 V2_QPC_BYTE_4_SGE_SHIFT_S,
2200 hr_qp->sq.max_gs > 2 ?
2201 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2202
2203 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2204 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2205
2206 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2207 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2208 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2209 V2_QPC_BYTE_4_SQPN_S, 0);
2210
2211 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2212 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2213 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2214 V2_QPC_BYTE_16_PD_S, 0);
2215
2216 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2217 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2218 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2219 V2_QPC_BYTE_20_RQWS_S, 0);
2220
2221 roce_set_field(context->byte_20_smac_sgid_idx,
2222 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2223 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2224 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2225 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2226
2227 roce_set_field(context->byte_20_smac_sgid_idx,
2228 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2229 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2230 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2231 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2232
2233 /* No VLAN need to set 0xFFF */
2234 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2235 V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
2236 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2237 V2_QPC_BYTE_24_VLAN_IDX_S, 0);
2238
2239 /*
2240 * Set some fields in context to zero, Because the default values
2241 * of all fields in context are zero, we need not set them to 0 again.
2242 * but we should set the relevant fields of context mask to 0.
2243 */
2244 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2245 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2246 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2247 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2248
2249 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
2250 V2_QPC_BYTE_60_MAPID_S, 0);
2251
2252 roce_set_bit(qpc_mask->byte_60_qpst_mapid,
2253 V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
2254 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
2255 0);
2256 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
2257 0);
2258 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
2259 0);
2260 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
2261 0);
2262 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
2263 0);
2264 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2265 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2266
2267 if (attr_mask & IB_QP_QKEY) {
2268 context->qkey_xrcd = attr->qkey;
2269 qpc_mask->qkey_xrcd = 0;
2270 hr_qp->qkey = attr->qkey;
2271 }
2272
2273 if (hr_qp->rdb_en) {
2274 roce_set_bit(context->byte_68_rq_db,
2275 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2276 roce_set_bit(qpc_mask->byte_68_rq_db,
2277 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2278 }
2279
2280 roce_set_field(context->byte_68_rq_db,
2281 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2282 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2283 ((u32)hr_qp->rdb.dma) >> 1);
2284 roce_set_field(qpc_mask->byte_68_rq_db,
2285 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2286 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2287 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2288 qpc_mask->rq_db_record_addr = 0;
2289
2290 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2291 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2292 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2293
2294 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2295 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2296 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2297 V2_QPC_BYTE_80_RX_CQN_S, 0);
2298 if (ibqp->srq) {
2299 roce_set_field(context->byte_76_srqn_op_en,
2300 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2301 to_hr_srq(ibqp->srq)->srqn);
2302 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2303 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2304 roce_set_bit(context->byte_76_srqn_op_en,
2305 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2306 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2307 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2308 }
2309
2310 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2311 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2312 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2313 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2314 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2315 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2316
2317 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2318 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2319
2320 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2321 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2322
2323 roce_set_field(qpc_mask->byte_104_rq_sge,
2324 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2325 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2326
2327 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2328 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2329 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2330 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2331 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2332 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2333 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2334
2335 qpc_mask->rq_rnr_timer = 0;
2336 qpc_mask->rx_msg_len = 0;
2337 qpc_mask->rx_rkey_pkt_info = 0;
2338 qpc_mask->rx_va = 0;
2339
2340 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2341 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2342 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2343 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2344
2345 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
2346 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2347 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2348 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2349 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2350
2351 roce_set_field(qpc_mask->byte_144_raq,
2352 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2353 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2354 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
2355 0);
2356 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2357 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2358 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2359
2360 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2361 V2_QPC_BYTE_148_RQ_MSN_S, 0);
2362 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2363 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2364
2365 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2366 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2367 roce_set_field(qpc_mask->byte_152_raq,
2368 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2369 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2370
2371 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2372 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2373
2374 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2375 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2376 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2377 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2378 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2379 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2380
2381 roce_set_field(context->byte_168_irrl_idx,
2382 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2383 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2384 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2385 roce_set_field(qpc_mask->byte_168_irrl_idx,
2386 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2387 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2388
2389 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2390 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
2391 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2392 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
2393 roce_set_field(qpc_mask->byte_168_irrl_idx,
2394 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2395 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2396
2397 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2398 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2399 roce_set_field(qpc_mask->byte_172_sq_psn,
2400 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2401 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2402
2403 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2404 0);
2405
2406 roce_set_field(qpc_mask->byte_176_msg_pktn,
2407 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2408 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2409 roce_set_field(qpc_mask->byte_176_msg_pktn,
2410 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2411 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2412
2413 roce_set_field(qpc_mask->byte_184_irrl_idx,
2414 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2415 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2416
2417 qpc_mask->cur_sge_offset = 0;
2418
2419 roce_set_field(qpc_mask->byte_192_ext_sge,
2420 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2421 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2422 roce_set_field(qpc_mask->byte_192_ext_sge,
2423 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2424 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2425
2426 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2427 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2428
2429 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2430 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2431 roce_set_field(qpc_mask->byte_200_sq_max,
2432 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2433 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2434
2435 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2436 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2437
2438 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2439 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2440
2441 qpc_mask->sq_timer = 0;
2442
2443 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2444 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2445 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2446 roce_set_field(qpc_mask->byte_232_irrl_sge,
2447 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2448 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2449
2450 qpc_mask->irrl_cur_sge_offset = 0;
2451
2452 roce_set_field(qpc_mask->byte_240_irrl_tail,
2453 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2454 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2455 roce_set_field(qpc_mask->byte_240_irrl_tail,
2456 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2457 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2458 roce_set_field(qpc_mask->byte_240_irrl_tail,
2459 V2_QPC_BYTE_240_RX_ACK_MSN_M,
2460 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2461
2462 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2463 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2464 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2465 0);
2466 roce_set_field(qpc_mask->byte_248_ack_psn,
2467 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2468 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2469 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2470 0);
2471 roce_set_bit(qpc_mask->byte_248_ack_psn,
2472 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2473 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2474 0);
2475
2476 hr_qp->access_flags = attr->qp_access_flags;
2477 hr_qp->pkey_index = attr->pkey_index;
2478 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2479 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2480 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2481 V2_QPC_BYTE_252_TX_CQN_S, 0);
2482
2483 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
2484 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
2485
2486 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2487 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
2488 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
2489 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2490 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
2491 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
2492 }
2493
2494 static void modify_qp_init_to_init(struct ib_qp *ibqp,
2495 const struct ib_qp_attr *attr, int attr_mask,
2496 struct hns_roce_v2_qp_context *context,
2497 struct hns_roce_v2_qp_context *qpc_mask)
2498 {
2499 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2500
2501 /*
2502 * In v2 engine, software pass context and context mask to hardware
2503 * when modifying qp. If software need modify some fields in context,
2504 * we should set all bits of the relevant fields in context mask to
2505 * 0 at the same time, else set them to 0x1.
2506 */
2507 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2508 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2509 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2510 V2_QPC_BYTE_4_TST_S, 0);
2511
2512 if (ibqp->qp_type == IB_QPT_GSI)
2513 roce_set_field(context->byte_4_sqpn_tst,
2514 V2_QPC_BYTE_4_SGE_SHIFT_M,
2515 V2_QPC_BYTE_4_SGE_SHIFT_S,
2516 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2517 else
2518 roce_set_field(context->byte_4_sqpn_tst,
2519 V2_QPC_BYTE_4_SGE_SHIFT_M,
2520 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
2521 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2522
2523 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2524 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2525
2526 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2528 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2529 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2530 0);
2531
2532 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2533 !!(attr->qp_access_flags &
2534 IB_ACCESS_REMOTE_WRITE));
2535 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2536 0);
2537
2538 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2539 !!(attr->qp_access_flags &
2540 IB_ACCESS_REMOTE_ATOMIC));
2541 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2542 0);
2543 } else {
2544 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2545 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
2546 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2547 0);
2548
2549 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2550 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
2551 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2552 0);
2553
2554 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2555 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
2556 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2557 0);
2558 }
2559
2560 roce_set_field(context->byte_20_smac_sgid_idx,
2561 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2562 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2563 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2564 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2565
2566 roce_set_field(context->byte_20_smac_sgid_idx,
2567 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2568 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2569 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2570 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2571
2572 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2573 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2574 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2575 V2_QPC_BYTE_16_PD_S, 0);
2576
2577 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2578 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2579 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2580 V2_QPC_BYTE_80_RX_CQN_S, 0);
2581
2582 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2583 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2584 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2585 V2_QPC_BYTE_252_TX_CQN_S, 0);
2586
2587 if (ibqp->srq) {
2588 roce_set_bit(context->byte_76_srqn_op_en,
2589 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2590 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2591 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2592 roce_set_field(context->byte_76_srqn_op_en,
2593 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2594 to_hr_srq(ibqp->srq)->srqn);
2595 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2596 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2597 }
2598
2599 if (attr_mask & IB_QP_QKEY) {
2600 context->qkey_xrcd = attr->qkey;
2601 qpc_mask->qkey_xrcd = 0;
2602 }
2603
2604 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2605 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2606 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2607 V2_QPC_BYTE_4_SQPN_S, 0);
2608
2609 if (attr_mask & IB_QP_DEST_QPN) {
2610 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2611 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
2612 roce_set_field(qpc_mask->byte_56_dqpn_err,
2613 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
2614 }
2615 roce_set_field(context->byte_168_irrl_idx,
2616 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2617 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2618 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2619 roce_set_field(qpc_mask->byte_168_irrl_idx,
2620 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2621 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2622 }
2623
2624 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2625 const struct ib_qp_attr *attr, int attr_mask,
2626 struct hns_roce_v2_qp_context *context,
2627 struct hns_roce_v2_qp_context *qpc_mask)
2628 {
2629 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2630 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2631 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2632 struct device *dev = hr_dev->dev;
2633 dma_addr_t dma_handle_3;
2634 dma_addr_t dma_handle_2;
2635 dma_addr_t dma_handle;
2636 u32 page_size;
2637 u8 port_num;
2638 u64 *mtts_3;
2639 u64 *mtts_2;
2640 u64 *mtts;
2641 u8 *dmac;
2642 u8 *smac;
2643 int port;
2644
2645 /* Search qp buf's mtts */
2646 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2647 hr_qp->mtt.first_seg, &dma_handle);
2648 if (!mtts) {
2649 dev_err(dev, "qp buf pa find failed\n");
2650 return -EINVAL;
2651 }
2652
2653 /* Search IRRL's mtts */
2654 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2655 hr_qp->qpn, &dma_handle_2);
2656 if (!mtts_2) {
2657 dev_err(dev, "qp irrl_table find failed\n");
2658 return -EINVAL;
2659 }
2660
2661 /* Search TRRL's mtts */
2662 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
2663 hr_qp->qpn, &dma_handle_3);
2664 if (!mtts_3) {
2665 dev_err(dev, "qp trrl_table find failed\n");
2666 return -EINVAL;
2667 }
2668
2669 if (attr_mask & IB_QP_ALT_PATH) {
2670 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
2671 return -EINVAL;
2672 }
2673
2674 dmac = (u8 *)attr->ah_attr.roce.dmac;
2675 context->wqe_sge_ba = (u32)(dma_handle >> 3);
2676 qpc_mask->wqe_sge_ba = 0;
2677
2678 /*
2679 * In v2 engine, software pass context and context mask to hardware
2680 * when modifying qp. If software need modify some fields in context,
2681 * we should set all bits of the relevant fields in context mask to
2682 * 0 at the same time, else set them to 0x1.
2683 */
2684 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2685 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
2686 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2687 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
2688
2689 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2690 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
2691 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
2692 0 : hr_dev->caps.mtt_hop_num);
2693 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2694 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
2695
2696 roce_set_field(context->byte_20_smac_sgid_idx,
2697 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2698 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
2699 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
2700 hr_dev->caps.mtt_hop_num : 0);
2701 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2702 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2703 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
2704
2705 roce_set_field(context->byte_20_smac_sgid_idx,
2706 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
2707 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
2708 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
2709 0 : hr_dev->caps.mtt_hop_num);
2710 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2711 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
2712 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
2713
2714 roce_set_field(context->byte_16_buf_ba_pg_sz,
2715 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
2716 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
2717 hr_dev->caps.mtt_ba_pg_sz);
2718 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
2719 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
2720 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
2721
2722 roce_set_field(context->byte_16_buf_ba_pg_sz,
2723 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
2724 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
2725 hr_dev->caps.mtt_buf_pg_sz);
2726 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
2727 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
2728 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
2729
2730 roce_set_field(context->byte_80_rnr_rx_cqn,
2731 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
2732 V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
2733 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
2734 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
2735 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
2736
2737 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
2738 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
2739 >> PAGE_ADDR_SHIFT);
2740 qpc_mask->rq_cur_blk_addr = 0;
2741
2742 roce_set_field(context->byte_92_srq_info,
2743 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
2744 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
2745 mtts[hr_qp->rq.offset / page_size]
2746 >> (32 + PAGE_ADDR_SHIFT));
2747 roce_set_field(qpc_mask->byte_92_srq_info,
2748 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
2749 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
2750
2751 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
2752 >> PAGE_ADDR_SHIFT);
2753 qpc_mask->rq_nxt_blk_addr = 0;
2754
2755 roce_set_field(context->byte_104_rq_sge,
2756 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
2757 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
2758 mtts[hr_qp->rq.offset / page_size + 1]
2759 >> (32 + PAGE_ADDR_SHIFT));
2760 roce_set_field(qpc_mask->byte_104_rq_sge,
2761 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
2762 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
2763
2764 roce_set_field(context->byte_108_rx_reqepsn,
2765 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
2766 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
2767 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2768 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
2769 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
2770
2771 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
2772 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
2773 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
2774 V2_QPC_BYTE_132_TRRL_BA_S, 0);
2775 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
2776 qpc_mask->trrl_ba = 0;
2777 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
2778 V2_QPC_BYTE_140_TRRL_BA_S,
2779 (u32)(dma_handle_3 >> (32 + 16 + 4)));
2780 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
2781 V2_QPC_BYTE_140_TRRL_BA_S, 0);
2782
2783 context->irrl_ba = (u32)(dma_handle_2 >> 6);
2784 qpc_mask->irrl_ba = 0;
2785 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
2786 V2_QPC_BYTE_208_IRRL_BA_S,
2787 dma_handle_2 >> (32 + 6));
2788 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
2789 V2_QPC_BYTE_208_IRRL_BA_S, 0);
2790
2791 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
2792 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
2793
2794 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
2795 hr_qp->sq_signal_bits);
2796 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
2797 0);
2798
2799 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
2800
2801 smac = (u8 *)hr_dev->dev_addr[port];
2802 /* when dmac equals smac or loop_idc is 1, it should loopback */
2803 if (ether_addr_equal_unaligned(dmac, smac) ||
2804 hr_dev->loop_idc == 0x1) {
2805 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
2806 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
2807 }
2808
2809 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
2810 attr->max_dest_rd_atomic) {
2811 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
2812 V2_QPC_BYTE_140_RR_MAX_S,
2813 fls(attr->max_dest_rd_atomic - 1));
2814 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
2815 V2_QPC_BYTE_140_RR_MAX_S, 0);
2816 }
2817
2818 if (attr_mask & IB_QP_DEST_QPN) {
2819 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2820 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
2821 roce_set_field(qpc_mask->byte_56_dqpn_err,
2822 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
2823 }
2824
2825 /* Configure GID index */
2826 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2827 roce_set_field(context->byte_20_smac_sgid_idx,
2828 V2_QPC_BYTE_20_SGID_IDX_M,
2829 V2_QPC_BYTE_20_SGID_IDX_S,
2830 hns_get_gid_index(hr_dev, port_num - 1,
2831 grh->sgid_index));
2832 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2833 V2_QPC_BYTE_20_SGID_IDX_M,
2834 V2_QPC_BYTE_20_SGID_IDX_S, 0);
2835 memcpy(&(context->dmac), dmac, 4);
2836 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
2837 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
2838 qpc_mask->dmac = 0;
2839 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
2840 V2_QPC_BYTE_52_DMAC_S, 0);
2841
2842 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
2843 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
2844 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
2845 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
2846
2847 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
2848 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
2849 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
2850 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
2851
2852 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
2853 V2_QPC_BYTE_28_FL_S, grh->flow_label);
2854 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
2855 V2_QPC_BYTE_28_FL_S, 0);
2856
2857 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
2858 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
2859 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
2860 V2_QPC_BYTE_24_TC_S, 0);
2861
2862 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
2863 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2864 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
2865 else if (attr_mask & IB_QP_PATH_MTU)
2866 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2867 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
2868
2869 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2870 V2_QPC_BYTE_24_MTU_S, 0);
2871
2872 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
2873 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
2874
2875 roce_set_field(context->byte_84_rq_ci_pi,
2876 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2877 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
2878 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2879 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2880 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2881
2882 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2883 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2884 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2885 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2886 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2887 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2888 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2889 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2890 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2891 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2892
2893 context->rq_rnr_timer = 0;
2894 qpc_mask->rq_rnr_timer = 0;
2895
2896 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2897 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
2898 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2899 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2900
2901 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2902 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2903 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2904 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2905
2906 roce_set_field(context->byte_168_irrl_idx,
2907 V2_QPC_BYTE_168_LP_SGEN_INI_M,
2908 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
2909 roce_set_field(qpc_mask->byte_168_irrl_idx,
2910 V2_QPC_BYTE_168_LP_SGEN_INI_M,
2911 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
2912
2913 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
2914 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
2915 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
2916 V2_QPC_BYTE_28_SL_S, 0);
2917 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
2918
2919 return 0;
2920 }
2921
2922 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
2923 const struct ib_qp_attr *attr, int attr_mask,
2924 struct hns_roce_v2_qp_context *context,
2925 struct hns_roce_v2_qp_context *qpc_mask)
2926 {
2927 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2928 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2929 struct device *dev = hr_dev->dev;
2930 dma_addr_t dma_handle;
2931 u32 page_size;
2932 u64 *mtts;
2933
2934 /* Search qp buf's mtts */
2935 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2936 hr_qp->mtt.first_seg, &dma_handle);
2937 if (!mtts) {
2938 dev_err(dev, "qp buf pa find failed\n");
2939 return -EINVAL;
2940 }
2941
2942 /* Not support alternate path and path migration */
2943 if ((attr_mask & IB_QP_ALT_PATH) ||
2944 (attr_mask & IB_QP_PATH_MIG_STATE)) {
2945 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
2946 return -EINVAL;
2947 }
2948
2949 /*
2950 * In v2 engine, software pass context and context mask to hardware
2951 * when modifying qp. If software need modify some fields in context,
2952 * we should set all bits of the relevant fields in context mask to
2953 * 0 at the same time, else set them to 0x1.
2954 */
2955 roce_set_field(context->byte_60_qpst_mapid,
2956 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
2957 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
2958 roce_set_field(qpc_mask->byte_60_qpst_mapid,
2959 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
2960 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
2961
2962 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2963 roce_set_field(context->byte_168_irrl_idx,
2964 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
2965 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
2966 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2967 qpc_mask->sq_cur_blk_addr = 0;
2968 roce_set_field(qpc_mask->byte_168_irrl_idx,
2969 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
2970 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
2971
2972 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
2973 context->sq_cur_sge_blk_addr =
2974 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
2975 ((u32)(mtts[hr_qp->sge.offset / page_size]
2976 >> PAGE_ADDR_SHIFT)) : 0;
2977 roce_set_field(context->byte_184_irrl_idx,
2978 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
2979 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
2980 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
2981 (mtts[hr_qp->sge.offset / page_size] >>
2982 (32 + PAGE_ADDR_SHIFT)) : 0);
2983 qpc_mask->sq_cur_sge_blk_addr = 0;
2984 roce_set_field(qpc_mask->byte_184_irrl_idx,
2985 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
2986 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
2987
2988 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2989 roce_set_field(context->byte_232_irrl_sge,
2990 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
2991 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
2992 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2993 qpc_mask->rx_sq_cur_blk_addr = 0;
2994 roce_set_field(qpc_mask->byte_232_irrl_sge,
2995 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
2996 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
2997
2998 /*
2999 * Set some fields in context to zero, Because the default values
3000 * of all fields in context are zero, we need not set them to 0 again.
3001 * but we should set the relevant fields of context mask to 0.
3002 */
3003 roce_set_field(qpc_mask->byte_232_irrl_sge,
3004 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3005 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3006
3007 roce_set_field(qpc_mask->byte_240_irrl_tail,
3008 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3009 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3010
3011 roce_set_field(context->byte_244_rnr_rxack,
3012 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3013 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3014 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3015 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3016 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3017
3018 roce_set_field(qpc_mask->byte_248_ack_psn,
3019 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3020 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3021 roce_set_bit(qpc_mask->byte_248_ack_psn,
3022 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3023 roce_set_field(qpc_mask->byte_248_ack_psn,
3024 V2_QPC_BYTE_248_IRRL_PSN_M,
3025 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3026
3027 roce_set_field(qpc_mask->byte_240_irrl_tail,
3028 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3029 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3030
3031 roce_set_field(context->byte_220_retry_psn_msn,
3032 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3033 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3034 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3035 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3036 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3037
3038 roce_set_field(context->byte_224_retry_msg,
3039 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3040 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3041 roce_set_field(qpc_mask->byte_224_retry_msg,
3042 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3043 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3044
3045 roce_set_field(context->byte_224_retry_msg,
3046 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3047 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3048 roce_set_field(qpc_mask->byte_224_retry_msg,
3049 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3050 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3051
3052 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3053 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3054 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3055
3056 roce_set_bit(qpc_mask->byte_248_ack_psn,
3057 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3058
3059 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3060 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3061
3062 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3063 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3064 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3065 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3066
3067 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3068 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3069 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3070 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3071
3072 roce_set_field(context->byte_244_rnr_rxack,
3073 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3074 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3075 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3076 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3077 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3078
3079 roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3080 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3081 roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3082 V2_QPC_BYTE_244_RNR_CNT_S, 0);
3083
3084 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3085 V2_QPC_BYTE_212_LSN_S, 0x100);
3086 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3087 V2_QPC_BYTE_212_LSN_S, 0);
3088
3089 if (attr_mask & IB_QP_TIMEOUT) {
3090 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3091 V2_QPC_BYTE_28_AT_S, attr->timeout);
3092 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3093 V2_QPC_BYTE_28_AT_S, 0);
3094 }
3095
3096 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3097 V2_QPC_BYTE_28_SL_S,
3098 rdma_ah_get_sl(&attr->ah_attr));
3099 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3100 V2_QPC_BYTE_28_SL_S, 0);
3101 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3102
3103 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3104 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3105 roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3106 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3107
3108 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3109 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3110 roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3111 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3112 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3113 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3114
3115 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3116 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3117 V2_QPC_BYTE_208_SR_MAX_S,
3118 fls(attr->max_rd_atomic - 1));
3119 roce_set_field(qpc_mask->byte_208_irrl,
3120 V2_QPC_BYTE_208_SR_MAX_M,
3121 V2_QPC_BYTE_208_SR_MAX_S, 0);
3122 }
3123 return 0;
3124 }
3125
3126 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3127 const struct ib_qp_attr *attr,
3128 int attr_mask, enum ib_qp_state cur_state,
3129 enum ib_qp_state new_state)
3130 {
3131 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3132 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3133 struct hns_roce_v2_qp_context *context;
3134 struct hns_roce_v2_qp_context *qpc_mask;
3135 struct device *dev = hr_dev->dev;
3136 int ret = -EINVAL;
3137
3138 context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
3139 if (!context)
3140 return -ENOMEM;
3141
3142 qpc_mask = context + 1;
3143 /*
3144 * In v2 engine, software pass context and context mask to hardware
3145 * when modifying qp. If software need modify some fields in context,
3146 * we should set all bits of the relevant fields in context mask to
3147 * 0 at the same time, else set them to 0x1.
3148 */
3149 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3150 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3151 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3152 qpc_mask);
3153 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3154 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3155 qpc_mask);
3156 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3157 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3158 qpc_mask);
3159 if (ret)
3160 goto out;
3161 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3162 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3163 qpc_mask);
3164 if (ret)
3165 goto out;
3166 } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3167 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3168 (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3169 (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3170 (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3171 (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3172 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3173 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3174 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3175 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3176 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3177 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3178 (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3179 (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
3180 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
3181 /* Nothing */
3182 ;
3183 } else {
3184 dev_err(dev, "Illegal state for QP!\n");
3185 goto out;
3186 }
3187
3188 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3189 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3190
3191 /* Every status migrate must change state */
3192 roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3193 V2_QPC_BYTE_60_QP_ST_S, new_state);
3194 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3195 V2_QPC_BYTE_60_QP_ST_S, 0);
3196
3197 /* SW pass context to HW */
3198 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3199 context, hr_qp);
3200 if (ret) {
3201 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3202 goto out;
3203 }
3204
3205 hr_qp->state = new_state;
3206
3207 if (attr_mask & IB_QP_ACCESS_FLAGS)
3208 hr_qp->atomic_rd_en = attr->qp_access_flags;
3209
3210 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3211 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3212 if (attr_mask & IB_QP_PORT) {
3213 hr_qp->port = attr->port_num - 1;
3214 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3215 }
3216
3217 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3218 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3219 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3220 if (ibqp->send_cq != ibqp->recv_cq)
3221 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3222 hr_qp->qpn, NULL);
3223
3224 hr_qp->rq.head = 0;
3225 hr_qp->rq.tail = 0;
3226 hr_qp->sq.head = 0;
3227 hr_qp->sq.tail = 0;
3228 hr_qp->sq_next_wqe = 0;
3229 hr_qp->next_sge = 0;
3230 if (hr_qp->rq.wqe_cnt)
3231 *hr_qp->rdb.db_record = 0;
3232 }
3233
3234 out:
3235 kfree(context);
3236 return ret;
3237 }
3238
3239 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3240 {
3241 switch (state) {
3242 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
3243 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
3244 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
3245 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
3246 case HNS_ROCE_QP_ST_SQ_DRAINING:
3247 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
3248 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
3249 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
3250 default: return -1;
3251 }
3252 }
3253
3254 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3255 struct hns_roce_qp *hr_qp,
3256 struct hns_roce_v2_qp_context *hr_context)
3257 {
3258 struct hns_roce_cmd_mailbox *mailbox;
3259 int ret;
3260
3261 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3262 if (IS_ERR(mailbox))
3263 return PTR_ERR(mailbox);
3264
3265 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3266 HNS_ROCE_CMD_QUERY_QPC,
3267 HNS_ROCE_CMD_TIMEOUT_MSECS);
3268 if (ret) {
3269 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3270 goto out;
3271 }
3272
3273 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3274
3275 out:
3276 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3277 return ret;
3278 }
3279
3280 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3281 int qp_attr_mask,
3282 struct ib_qp_init_attr *qp_init_attr)
3283 {
3284 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3285 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3286 struct hns_roce_v2_qp_context *context;
3287 struct device *dev = hr_dev->dev;
3288 int tmp_qp_state;
3289 int state;
3290 int ret;
3291
3292 context = kzalloc(sizeof(*context), GFP_KERNEL);
3293 if (!context)
3294 return -ENOMEM;
3295
3296 memset(qp_attr, 0, sizeof(*qp_attr));
3297 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3298
3299 mutex_lock(&hr_qp->mutex);
3300
3301 if (hr_qp->state == IB_QPS_RESET) {
3302 qp_attr->qp_state = IB_QPS_RESET;
3303 ret = 0;
3304 goto done;
3305 }
3306
3307 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3308 if (ret) {
3309 dev_err(dev, "query qpc error\n");
3310 ret = -EINVAL;
3311 goto out;
3312 }
3313
3314 state = roce_get_field(context->byte_60_qpst_mapid,
3315 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3316 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3317 if (tmp_qp_state == -1) {
3318 dev_err(dev, "Illegal ib_qp_state\n");
3319 ret = -EINVAL;
3320 goto out;
3321 }
3322 hr_qp->state = (u8)tmp_qp_state;
3323 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3324 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3325 V2_QPC_BYTE_24_MTU_M,
3326 V2_QPC_BYTE_24_MTU_S);
3327 qp_attr->path_mig_state = IB_MIG_ARMED;
3328 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3329 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3330 qp_attr->qkey = V2_QKEY_VAL;
3331
3332 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3333 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3334 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3335 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3336 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3337 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3338 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3339 V2_QPC_BYTE_56_DQPN_M,
3340 V2_QPC_BYTE_56_DQPN_S);
3341 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3342 V2_QPC_BYTE_76_RRE_S)) << 2) |
3343 ((roce_get_bit(context->byte_76_srqn_op_en,
3344 V2_QPC_BYTE_76_RWE_S)) << 1) |
3345 ((roce_get_bit(context->byte_76_srqn_op_en,
3346 V2_QPC_BYTE_76_ATE_S)) << 3);
3347 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3348 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3349 struct ib_global_route *grh =
3350 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3351
3352 rdma_ah_set_sl(&qp_attr->ah_attr,
3353 roce_get_field(context->byte_28_at_fl,
3354 V2_QPC_BYTE_28_SL_M,
3355 V2_QPC_BYTE_28_SL_S));
3356 grh->flow_label = roce_get_field(context->byte_28_at_fl,
3357 V2_QPC_BYTE_28_FL_M,
3358 V2_QPC_BYTE_28_FL_S);
3359 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3360 V2_QPC_BYTE_20_SGID_IDX_M,
3361 V2_QPC_BYTE_20_SGID_IDX_S);
3362 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3363 V2_QPC_BYTE_24_HOP_LIMIT_M,
3364 V2_QPC_BYTE_24_HOP_LIMIT_S);
3365 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3366 V2_QPC_BYTE_24_TC_M,
3367 V2_QPC_BYTE_24_TC_S);
3368
3369 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3370 }
3371
3372 qp_attr->port_num = hr_qp->port + 1;
3373 qp_attr->sq_draining = 0;
3374 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3375 V2_QPC_BYTE_208_SR_MAX_M,
3376 V2_QPC_BYTE_208_SR_MAX_S);
3377 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3378 V2_QPC_BYTE_140_RR_MAX_M,
3379 V2_QPC_BYTE_140_RR_MAX_S);
3380 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3381 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3382 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3383 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3384 V2_QPC_BYTE_28_AT_M,
3385 V2_QPC_BYTE_28_AT_S);
3386 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3387 V2_QPC_BYTE_212_RETRY_CNT_M,
3388 V2_QPC_BYTE_212_RETRY_CNT_S);
3389 qp_attr->rnr_retry = context->rq_rnr_timer;
3390
3391 done:
3392 qp_attr->cur_qp_state = qp_attr->qp_state;
3393 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3394 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3395
3396 if (!ibqp->uobject) {
3397 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3398 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3399 } else {
3400 qp_attr->cap.max_send_wr = 0;
3401 qp_attr->cap.max_send_sge = 0;
3402 }
3403
3404 qp_init_attr->cap = qp_attr->cap;
3405
3406 out:
3407 mutex_unlock(&hr_qp->mutex);
3408 kfree(context);
3409 return ret;
3410 }
3411
3412 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
3413 struct hns_roce_qp *hr_qp,
3414 int is_user)
3415 {
3416 struct hns_roce_cq *send_cq, *recv_cq;
3417 struct device *dev = hr_dev->dev;
3418 int ret;
3419
3420 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
3421 /* Modify qp to reset before destroying qp */
3422 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
3423 hr_qp->state, IB_QPS_RESET);
3424 if (ret) {
3425 dev_err(dev, "modify QP %06lx to ERR failed.\n",
3426 hr_qp->qpn);
3427 return ret;
3428 }
3429 }
3430
3431 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3432 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3433
3434 hns_roce_lock_cqs(send_cq, recv_cq);
3435
3436 if (!is_user) {
3437 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3438 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3439 if (send_cq != recv_cq)
3440 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
3441 }
3442
3443 hns_roce_qp_remove(hr_dev, hr_qp);
3444
3445 hns_roce_unlock_cqs(send_cq, recv_cq);
3446
3447 hns_roce_qp_free(hr_dev, hr_qp);
3448
3449 /* Not special_QP, free their QPN */
3450 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
3451 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
3452 (hr_qp->ibqp.qp_type == IB_QPT_UD))
3453 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3454
3455 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3456
3457 if (is_user) {
3458 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
3459 hns_roce_db_unmap_user(
3460 to_hr_ucontext(hr_qp->ibqp.uobject->context),
3461 &hr_qp->rdb);
3462 ib_umem_release(hr_qp->umem);
3463 } else {
3464 kfree(hr_qp->sq.wrid);
3465 kfree(hr_qp->rq.wrid);
3466 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3467 if (hr_qp->rq.wqe_cnt)
3468 hns_roce_free_db(hr_dev, &hr_qp->rdb);
3469 }
3470
3471 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
3472 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
3473 kfree(hr_qp->rq_inl_buf.wqe_list);
3474 }
3475
3476 return 0;
3477 }
3478
3479 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
3480 {
3481 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3482 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3483 int ret;
3484
3485 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
3486 if (ret) {
3487 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
3488 return ret;
3489 }
3490
3491 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3492 kfree(hr_to_hr_sqp(hr_qp));
3493 else
3494 kfree(hr_qp);
3495
3496 return 0;
3497 }
3498
3499 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
3500 {
3501 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
3502 struct hns_roce_v2_cq_context *cq_context;
3503 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
3504 struct hns_roce_v2_cq_context *cqc_mask;
3505 struct hns_roce_cmd_mailbox *mailbox;
3506 int ret;
3507
3508 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3509 if (IS_ERR(mailbox))
3510 return PTR_ERR(mailbox);
3511
3512 cq_context = mailbox->buf;
3513 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
3514
3515 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
3516
3517 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3518 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3519 cq_count);
3520 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3521 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3522 0);
3523 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3524 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3525 cq_period);
3526 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3527 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3528 0);
3529
3530 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
3531 HNS_ROCE_CMD_MODIFY_CQC,
3532 HNS_ROCE_CMD_TIMEOUT_MSECS);
3533 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3534 if (ret)
3535 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
3536
3537 return ret;
3538 }
3539
3540 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
3541 {
3542 u32 doorbell[2];
3543
3544 doorbell[0] = 0;
3545 doorbell[1] = 0;
3546
3547 if (eq->type_flag == HNS_ROCE_AEQ) {
3548 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3549 HNS_ROCE_V2_EQ_DB_CMD_S,
3550 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3551 HNS_ROCE_EQ_DB_CMD_AEQ :
3552 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
3553 } else {
3554 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
3555 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
3556
3557 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3558 HNS_ROCE_V2_EQ_DB_CMD_S,
3559 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3560 HNS_ROCE_EQ_DB_CMD_CEQ :
3561 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
3562 }
3563
3564 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
3565 HNS_ROCE_V2_EQ_DB_PARA_S,
3566 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
3567
3568 hns_roce_write64_k(doorbell, eq->doorbell);
3569 }
3570
3571 static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3572 struct hns_roce_aeqe *aeqe,
3573 u32 qpn)
3574 {
3575 struct device *dev = hr_dev->dev;
3576 int sub_type;
3577
3578 dev_warn(dev, "Local work queue catastrophic error.\n");
3579 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3580 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3581 switch (sub_type) {
3582 case HNS_ROCE_LWQCE_QPC_ERROR:
3583 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3584 break;
3585 case HNS_ROCE_LWQCE_MTU_ERROR:
3586 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3587 break;
3588 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3589 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3590 break;
3591 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3592 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3593 break;
3594 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3595 dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
3596 break;
3597 default:
3598 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3599 break;
3600 }
3601 }
3602
3603 static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3604 struct hns_roce_aeqe *aeqe, u32 qpn)
3605 {
3606 struct device *dev = hr_dev->dev;
3607 int sub_type;
3608
3609 dev_warn(dev, "Local access violation work queue error.\n");
3610 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3611 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3612 switch (sub_type) {
3613 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3614 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3615 break;
3616 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3617 dev_warn(dev, "QP %d, length error.\n", qpn);
3618 break;
3619 case HNS_ROCE_LAVWQE_VA_ERROR:
3620 dev_warn(dev, "QP %d, VA error.\n", qpn);
3621 break;
3622 case HNS_ROCE_LAVWQE_PD_ERROR:
3623 dev_err(dev, "QP %d, PD error.\n", qpn);
3624 break;
3625 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3626 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3627 break;
3628 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3629 dev_warn(dev, "QP %d, key state error.\n", qpn);
3630 break;
3631 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3632 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3633 break;
3634 default:
3635 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3636 break;
3637 }
3638 }
3639
3640 static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
3641 struct hns_roce_aeqe *aeqe,
3642 int event_type)
3643 {
3644 struct device *dev = hr_dev->dev;
3645 u32 qpn;
3646
3647 qpn = roce_get_field(aeqe->event.qp_event.qp,
3648 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3649 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3650
3651 switch (event_type) {
3652 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3653 dev_warn(dev, "Communication established.\n");
3654 break;
3655 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3656 dev_warn(dev, "Send queue drained.\n");
3657 break;
3658 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3659 hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
3660 break;
3661 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3662 dev_warn(dev, "Invalid request local work queue error.\n");
3663 break;
3664 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3665 hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3666 break;
3667 default:
3668 break;
3669 }
3670
3671 hns_roce_qp_event(hr_dev, qpn, event_type);
3672 }
3673
3674 static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
3675 struct hns_roce_aeqe *aeqe,
3676 int event_type)
3677 {
3678 struct device *dev = hr_dev->dev;
3679 u32 cqn;
3680
3681 cqn = roce_get_field(aeqe->event.cq_event.cq,
3682 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3683 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3684
3685 switch (event_type) {
3686 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3687 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3688 break;
3689 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3690 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3691 break;
3692 default:
3693 break;
3694 }
3695
3696 hns_roce_cq_event(hr_dev, cqn, event_type);
3697 }
3698
3699 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
3700 {
3701 u32 buf_chk_sz;
3702 unsigned long off;
3703
3704 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3705 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
3706
3707 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
3708 off % buf_chk_sz);
3709 }
3710
3711 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
3712 {
3713 u32 buf_chk_sz;
3714 unsigned long off;
3715
3716 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3717
3718 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
3719
3720 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
3721 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
3722 off % buf_chk_sz);
3723 else
3724 return (struct hns_roce_aeqe *)((u8 *)
3725 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
3726 }
3727
3728 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
3729 {
3730 struct hns_roce_aeqe *aeqe;
3731
3732 if (!eq->hop_num)
3733 aeqe = get_aeqe_v2(eq, eq->cons_index);
3734 else
3735 aeqe = mhop_get_aeqe(eq, eq->cons_index);
3736
3737 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
3738 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3739 }
3740
3741 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
3742 struct hns_roce_eq *eq)
3743 {
3744 struct device *dev = hr_dev->dev;
3745 struct hns_roce_aeqe *aeqe;
3746 int aeqe_found = 0;
3747 int event_type;
3748
3749 while ((aeqe = next_aeqe_sw_v2(eq))) {
3750
3751 /* Make sure we read AEQ entry after we have checked the
3752 * ownership bit
3753 */
3754 dma_rmb();
3755
3756 event_type = roce_get_field(aeqe->asyn,
3757 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
3758 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
3759
3760 switch (event_type) {
3761 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3762 dev_warn(dev, "Path migrated succeeded.\n");
3763 break;
3764 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3765 dev_warn(dev, "Path migration failed.\n");
3766 break;
3767 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3768 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3769 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3770 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3771 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3772 hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
3773 break;
3774 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3775 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3776 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3777 dev_warn(dev, "SRQ not support.\n");
3778 break;
3779 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3780 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3781 hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
3782 break;
3783 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3784 dev_warn(dev, "DB overflow.\n");
3785 break;
3786 case HNS_ROCE_EVENT_TYPE_MB:
3787 hns_roce_cmd_event(hr_dev,
3788 le16_to_cpu(aeqe->event.cmd.token),
3789 aeqe->event.cmd.status,
3790 le64_to_cpu(aeqe->event.cmd.out_param));
3791 break;
3792 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3793 dev_warn(dev, "CEQ overflow.\n");
3794 break;
3795 case HNS_ROCE_EVENT_TYPE_FLR:
3796 dev_warn(dev, "Function level reset.\n");
3797 break;
3798 default:
3799 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3800 event_type, eq->eqn, eq->cons_index);
3801 break;
3802 };
3803
3804 ++eq->cons_index;
3805 aeqe_found = 1;
3806
3807 if (eq->cons_index > (2 * eq->entries - 1)) {
3808 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3809 eq->cons_index = 0;
3810 }
3811 }
3812
3813 set_eq_cons_index_v2(eq);
3814 return aeqe_found;
3815 }
3816
3817 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
3818 {
3819 u32 buf_chk_sz;
3820 unsigned long off;
3821
3822 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3823 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
3824
3825 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
3826 off % buf_chk_sz);
3827 }
3828
3829 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
3830 {
3831 u32 buf_chk_sz;
3832 unsigned long off;
3833
3834 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3835
3836 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
3837
3838 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
3839 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
3840 off % buf_chk_sz);
3841 else
3842 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
3843 buf_chk_sz]) + off % buf_chk_sz);
3844 }
3845
3846 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
3847 {
3848 struct hns_roce_ceqe *ceqe;
3849
3850 if (!eq->hop_num)
3851 ceqe = get_ceqe_v2(eq, eq->cons_index);
3852 else
3853 ceqe = mhop_get_ceqe(eq, eq->cons_index);
3854
3855 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
3856 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3857 }
3858
3859 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
3860 struct hns_roce_eq *eq)
3861 {
3862 struct device *dev = hr_dev->dev;
3863 struct hns_roce_ceqe *ceqe;
3864 int ceqe_found = 0;
3865 u32 cqn;
3866
3867 while ((ceqe = next_ceqe_sw_v2(eq))) {
3868
3869 /* Make sure we read CEQ entry after we have checked the
3870 * ownership bit
3871 */
3872 dma_rmb();
3873
3874 cqn = roce_get_field(ceqe->comp,
3875 HNS_ROCE_V2_CEQE_COMP_CQN_M,
3876 HNS_ROCE_V2_CEQE_COMP_CQN_S);
3877
3878 hns_roce_cq_completion(hr_dev, cqn);
3879
3880 ++eq->cons_index;
3881 ceqe_found = 1;
3882
3883 if (eq->cons_index > (2 * eq->entries - 1)) {
3884 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3885 eq->cons_index = 0;
3886 }
3887 }
3888
3889 set_eq_cons_index_v2(eq);
3890
3891 return ceqe_found;
3892 }
3893
3894 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
3895 {
3896 struct hns_roce_eq *eq = eq_ptr;
3897 struct hns_roce_dev *hr_dev = eq->hr_dev;
3898 int int_work = 0;
3899
3900 if (eq->type_flag == HNS_ROCE_CEQ)
3901 /* Completion event interrupt */
3902 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
3903 else
3904 /* Asychronous event interrupt */
3905 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
3906
3907 return IRQ_RETVAL(int_work);
3908 }
3909
3910 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
3911 {
3912 struct hns_roce_dev *hr_dev = dev_id;
3913 struct device *dev = hr_dev->dev;
3914 int int_work = 0;
3915 u32 int_st;
3916 u32 int_en;
3917
3918 /* Abnormal interrupt */
3919 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
3920 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
3921
3922 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
3923 dev_err(dev, "AEQ overflow!\n");
3924
3925 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
3926 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3927
3928 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3929 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3930
3931 int_work = 1;
3932 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
3933 dev_err(dev, "BUS ERR!\n");
3934
3935 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
3936 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3937
3938 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3939 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3940
3941 int_work = 1;
3942 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
3943 dev_err(dev, "OTHER ERR!\n");
3944
3945 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
3946 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3947
3948 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3949 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3950
3951 int_work = 1;
3952 } else
3953 dev_err(dev, "There is no abnormal irq found!\n");
3954
3955 return IRQ_RETVAL(int_work);
3956 }
3957
3958 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
3959 int eq_num, int enable_flag)
3960 {
3961 int i;
3962
3963 if (enable_flag == EQ_ENABLE) {
3964 for (i = 0; i < eq_num; i++)
3965 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
3966 i * EQ_REG_OFFSET,
3967 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
3968
3969 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
3970 HNS_ROCE_V2_VF_ABN_INT_EN_M);
3971 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
3972 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
3973 } else {
3974 for (i = 0; i < eq_num; i++)
3975 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
3976 i * EQ_REG_OFFSET,
3977 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
3978
3979 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
3980 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
3981 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
3982 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
3983 }
3984 }
3985
3986 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
3987 {
3988 struct device *dev = hr_dev->dev;
3989 int ret;
3990
3991 if (eqn < hr_dev->caps.num_comp_vectors)
3992 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
3993 0, HNS_ROCE_CMD_DESTROY_CEQC,
3994 HNS_ROCE_CMD_TIMEOUT_MSECS);
3995 else
3996 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
3997 0, HNS_ROCE_CMD_DESTROY_AEQC,
3998 HNS_ROCE_CMD_TIMEOUT_MSECS);
3999 if (ret)
4000 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4001 }
4002
4003 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4004 struct hns_roce_eq *eq)
4005 {
4006 struct device *dev = hr_dev->dev;
4007 u64 idx;
4008 u64 size;
4009 u32 buf_chk_sz;
4010 u32 bt_chk_sz;
4011 u32 mhop_num;
4012 int eqe_alloc;
4013 int ba_num;
4014 int i = 0;
4015 int j = 0;
4016
4017 mhop_num = hr_dev->caps.eqe_hop_num;
4018 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4019 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4020 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
4021 buf_chk_sz;
4022
4023 /* hop_num = 0 */
4024 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4025 dma_free_coherent(dev, (unsigned int)(eq->entries *
4026 eq->eqe_size), eq->bt_l0, eq->l0_dma);
4027 return;
4028 }
4029
4030 /* hop_num = 1 or hop = 2 */
4031 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4032 if (mhop_num == 1) {
4033 for (i = 0; i < eq->l0_last_num; i++) {
4034 if (i == eq->l0_last_num - 1) {
4035 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4036 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4037 dma_free_coherent(dev, size, eq->buf[i],
4038 eq->buf_dma[i]);
4039 break;
4040 }
4041 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4042 eq->buf_dma[i]);
4043 }
4044 } else if (mhop_num == 2) {
4045 for (i = 0; i < eq->l0_last_num; i++) {
4046 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4047 eq->l1_dma[i]);
4048
4049 for (j = 0; j < bt_chk_sz / 8; j++) {
4050 idx = i * (bt_chk_sz / 8) + j;
4051 if ((i == eq->l0_last_num - 1)
4052 && j == eq->l1_last_num - 1) {
4053 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4054 * idx;
4055 size = (eq->entries - eqe_alloc)
4056 * eq->eqe_size;
4057 dma_free_coherent(dev, size,
4058 eq->buf[idx],
4059 eq->buf_dma[idx]);
4060 break;
4061 }
4062 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4063 eq->buf_dma[idx]);
4064 }
4065 }
4066 }
4067 kfree(eq->buf_dma);
4068 kfree(eq->buf);
4069 kfree(eq->l1_dma);
4070 kfree(eq->bt_l1);
4071 eq->buf_dma = NULL;
4072 eq->buf = NULL;
4073 eq->l1_dma = NULL;
4074 eq->bt_l1 = NULL;
4075 }
4076
4077 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4078 struct hns_roce_eq *eq)
4079 {
4080 u32 buf_chk_sz;
4081
4082 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4083
4084 if (hr_dev->caps.eqe_hop_num) {
4085 hns_roce_mhop_free_eq(hr_dev, eq);
4086 return;
4087 }
4088
4089 if (eq->buf_list)
4090 dma_free_coherent(hr_dev->dev, buf_chk_sz,
4091 eq->buf_list->buf, eq->buf_list->map);
4092 }
4093
4094 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4095 struct hns_roce_eq *eq,
4096 void *mb_buf)
4097 {
4098 struct hns_roce_eq_context *eqc;
4099
4100 eqc = mb_buf;
4101 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4102
4103 /* init eqc */
4104 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4105 eq->hop_num = hr_dev->caps.eqe_hop_num;
4106 eq->cons_index = 0;
4107 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4108 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4109 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4110 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4111 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4112 eq->shift = ilog2((unsigned int)eq->entries);
4113
4114 if (!eq->hop_num)
4115 eq->eqe_ba = eq->buf_list->map;
4116 else
4117 eq->eqe_ba = eq->l0_dma;
4118
4119 /* set eqc state */
4120 roce_set_field(eqc->byte_4,
4121 HNS_ROCE_EQC_EQ_ST_M,
4122 HNS_ROCE_EQC_EQ_ST_S,
4123 HNS_ROCE_V2_EQ_STATE_VALID);
4124
4125 /* set eqe hop num */
4126 roce_set_field(eqc->byte_4,
4127 HNS_ROCE_EQC_HOP_NUM_M,
4128 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4129
4130 /* set eqc over_ignore */
4131 roce_set_field(eqc->byte_4,
4132 HNS_ROCE_EQC_OVER_IGNORE_M,
4133 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4134
4135 /* set eqc coalesce */
4136 roce_set_field(eqc->byte_4,
4137 HNS_ROCE_EQC_COALESCE_M,
4138 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4139
4140 /* set eqc arm_state */
4141 roce_set_field(eqc->byte_4,
4142 HNS_ROCE_EQC_ARM_ST_M,
4143 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4144
4145 /* set eqn */
4146 roce_set_field(eqc->byte_4,
4147 HNS_ROCE_EQC_EQN_M,
4148 HNS_ROCE_EQC_EQN_S, eq->eqn);
4149
4150 /* set eqe_cnt */
4151 roce_set_field(eqc->byte_4,
4152 HNS_ROCE_EQC_EQE_CNT_M,
4153 HNS_ROCE_EQC_EQE_CNT_S,
4154 HNS_ROCE_EQ_INIT_EQE_CNT);
4155
4156 /* set eqe_ba_pg_sz */
4157 roce_set_field(eqc->byte_8,
4158 HNS_ROCE_EQC_BA_PG_SZ_M,
4159 HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
4160
4161 /* set eqe_buf_pg_sz */
4162 roce_set_field(eqc->byte_8,
4163 HNS_ROCE_EQC_BUF_PG_SZ_M,
4164 HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
4165
4166 /* set eq_producer_idx */
4167 roce_set_field(eqc->byte_8,
4168 HNS_ROCE_EQC_PROD_INDX_M,
4169 HNS_ROCE_EQC_PROD_INDX_S,
4170 HNS_ROCE_EQ_INIT_PROD_IDX);
4171
4172 /* set eq_max_cnt */
4173 roce_set_field(eqc->byte_12,
4174 HNS_ROCE_EQC_MAX_CNT_M,
4175 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4176
4177 /* set eq_period */
4178 roce_set_field(eqc->byte_12,
4179 HNS_ROCE_EQC_PERIOD_M,
4180 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4181
4182 /* set eqe_report_timer */
4183 roce_set_field(eqc->eqe_report_timer,
4184 HNS_ROCE_EQC_REPORT_TIMER_M,
4185 HNS_ROCE_EQC_REPORT_TIMER_S,
4186 HNS_ROCE_EQ_INIT_REPORT_TIMER);
4187
4188 /* set eqe_ba [34:3] */
4189 roce_set_field(eqc->eqe_ba0,
4190 HNS_ROCE_EQC_EQE_BA_L_M,
4191 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4192
4193 /* set eqe_ba [64:35] */
4194 roce_set_field(eqc->eqe_ba1,
4195 HNS_ROCE_EQC_EQE_BA_H_M,
4196 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4197
4198 /* set eq shift */
4199 roce_set_field(eqc->byte_28,
4200 HNS_ROCE_EQC_SHIFT_M,
4201 HNS_ROCE_EQC_SHIFT_S, eq->shift);
4202
4203 /* set eq MSI_IDX */
4204 roce_set_field(eqc->byte_28,
4205 HNS_ROCE_EQC_MSI_INDX_M,
4206 HNS_ROCE_EQC_MSI_INDX_S,
4207 HNS_ROCE_EQ_INIT_MSI_IDX);
4208
4209 /* set cur_eqe_ba [27:12] */
4210 roce_set_field(eqc->byte_28,
4211 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4212 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4213
4214 /* set cur_eqe_ba [59:28] */
4215 roce_set_field(eqc->byte_32,
4216 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4217 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4218
4219 /* set cur_eqe_ba [63:60] */
4220 roce_set_field(eqc->byte_36,
4221 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4222 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4223
4224 /* set eq consumer idx */
4225 roce_set_field(eqc->byte_36,
4226 HNS_ROCE_EQC_CONS_INDX_M,
4227 HNS_ROCE_EQC_CONS_INDX_S,
4228 HNS_ROCE_EQ_INIT_CONS_IDX);
4229
4230 /* set nex_eqe_ba[43:12] */
4231 roce_set_field(eqc->nxt_eqe_ba0,
4232 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4233 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4234
4235 /* set nex_eqe_ba[63:44] */
4236 roce_set_field(eqc->nxt_eqe_ba1,
4237 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4238 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4239 }
4240
4241 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4242 struct hns_roce_eq *eq)
4243 {
4244 struct device *dev = hr_dev->dev;
4245 int eq_alloc_done = 0;
4246 int eq_buf_cnt = 0;
4247 int eqe_alloc;
4248 u32 buf_chk_sz;
4249 u32 bt_chk_sz;
4250 u32 mhop_num;
4251 u64 size;
4252 u64 idx;
4253 int ba_num;
4254 int bt_num;
4255 int record_i;
4256 int record_j;
4257 int i = 0;
4258 int j = 0;
4259
4260 mhop_num = hr_dev->caps.eqe_hop_num;
4261 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4262 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4263
4264 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4265 / buf_chk_sz;
4266 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4267
4268 /* hop_num = 0 */
4269 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4270 if (eq->entries > buf_chk_sz / eq->eqe_size) {
4271 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4272 eq->entries);
4273 return -EINVAL;
4274 }
4275 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4276 &(eq->l0_dma), GFP_KERNEL);
4277 if (!eq->bt_l0)
4278 return -ENOMEM;
4279
4280 eq->cur_eqe_ba = eq->l0_dma;
4281 eq->nxt_eqe_ba = 0;
4282
4283 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
4284
4285 return 0;
4286 }
4287
4288 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4289 if (!eq->buf_dma)
4290 return -ENOMEM;
4291 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4292 if (!eq->buf)
4293 goto err_kcalloc_buf;
4294
4295 if (mhop_num == 2) {
4296 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4297 if (!eq->l1_dma)
4298 goto err_kcalloc_l1_dma;
4299
4300 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4301 if (!eq->bt_l1)
4302 goto err_kcalloc_bt_l1;
4303 }
4304
4305 /* alloc L0 BT */
4306 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4307 if (!eq->bt_l0)
4308 goto err_dma_alloc_l0;
4309
4310 if (mhop_num == 1) {
4311 if (ba_num > (bt_chk_sz / 8))
4312 dev_err(dev, "ba_num %d is too large for 1 hop\n",
4313 ba_num);
4314
4315 /* alloc buf */
4316 for (i = 0; i < bt_chk_sz / 8; i++) {
4317 if (eq_buf_cnt + 1 < ba_num) {
4318 size = buf_chk_sz;
4319 } else {
4320 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4321 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4322 }
4323 eq->buf[i] = dma_alloc_coherent(dev, size,
4324 &(eq->buf_dma[i]),
4325 GFP_KERNEL);
4326 if (!eq->buf[i])
4327 goto err_dma_alloc_buf;
4328
4329 memset(eq->buf[i], 0, size);
4330 *(eq->bt_l0 + i) = eq->buf_dma[i];
4331
4332 eq_buf_cnt++;
4333 if (eq_buf_cnt >= ba_num)
4334 break;
4335 }
4336 eq->cur_eqe_ba = eq->buf_dma[0];
4337 eq->nxt_eqe_ba = eq->buf_dma[1];
4338
4339 } else if (mhop_num == 2) {
4340 /* alloc L1 BT and buf */
4341 for (i = 0; i < bt_chk_sz / 8; i++) {
4342 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4343 &(eq->l1_dma[i]),
4344 GFP_KERNEL);
4345 if (!eq->bt_l1[i])
4346 goto err_dma_alloc_l1;
4347 *(eq->bt_l0 + i) = eq->l1_dma[i];
4348
4349 for (j = 0; j < bt_chk_sz / 8; j++) {
4350 idx = i * bt_chk_sz / 8 + j;
4351 if (eq_buf_cnt + 1 < ba_num) {
4352 size = buf_chk_sz;
4353 } else {
4354 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4355 * idx;
4356 size = (eq->entries - eqe_alloc)
4357 * eq->eqe_size;
4358 }
4359 eq->buf[idx] = dma_alloc_coherent(dev, size,
4360 &(eq->buf_dma[idx]),
4361 GFP_KERNEL);
4362 if (!eq->buf[idx])
4363 goto err_dma_alloc_buf;
4364
4365 memset(eq->buf[idx], 0, size);
4366 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
4367
4368 eq_buf_cnt++;
4369 if (eq_buf_cnt >= ba_num) {
4370 eq_alloc_done = 1;
4371 break;
4372 }
4373 }
4374
4375 if (eq_alloc_done)
4376 break;
4377 }
4378 eq->cur_eqe_ba = eq->buf_dma[0];
4379 eq->nxt_eqe_ba = eq->buf_dma[1];
4380 }
4381
4382 eq->l0_last_num = i + 1;
4383 if (mhop_num == 2)
4384 eq->l1_last_num = j + 1;
4385
4386 return 0;
4387
4388 err_dma_alloc_l1:
4389 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4390 eq->bt_l0 = NULL;
4391 eq->l0_dma = 0;
4392 for (i -= 1; i >= 0; i--) {
4393 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4394 eq->l1_dma[i]);
4395
4396 for (j = 0; j < bt_chk_sz / 8; j++) {
4397 idx = i * bt_chk_sz / 8 + j;
4398 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4399 eq->buf_dma[idx]);
4400 }
4401 }
4402 goto err_dma_alloc_l0;
4403
4404 err_dma_alloc_buf:
4405 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4406 eq->bt_l0 = NULL;
4407 eq->l0_dma = 0;
4408
4409 if (mhop_num == 1)
4410 for (i -= 1; i >= 0; i--)
4411 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4412 eq->buf_dma[i]);
4413 else if (mhop_num == 2) {
4414 record_i = i;
4415 record_j = j;
4416 for (; i >= 0; i--) {
4417 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4418 eq->l1_dma[i]);
4419
4420 for (j = 0; j < bt_chk_sz / 8; j++) {
4421 if (i == record_i && j >= record_j)
4422 break;
4423
4424 idx = i * bt_chk_sz / 8 + j;
4425 dma_free_coherent(dev, buf_chk_sz,
4426 eq->buf[idx],
4427 eq->buf_dma[idx]);
4428 }
4429 }
4430 }
4431
4432 err_dma_alloc_l0:
4433 kfree(eq->bt_l1);
4434 eq->bt_l1 = NULL;
4435
4436 err_kcalloc_bt_l1:
4437 kfree(eq->l1_dma);
4438 eq->l1_dma = NULL;
4439
4440 err_kcalloc_l1_dma:
4441 kfree(eq->buf);
4442 eq->buf = NULL;
4443
4444 err_kcalloc_buf:
4445 kfree(eq->buf_dma);
4446 eq->buf_dma = NULL;
4447
4448 return -ENOMEM;
4449 }
4450
4451 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
4452 struct hns_roce_eq *eq,
4453 unsigned int eq_cmd)
4454 {
4455 struct device *dev = hr_dev->dev;
4456 struct hns_roce_cmd_mailbox *mailbox;
4457 u32 buf_chk_sz = 0;
4458 int ret;
4459
4460 /* Allocate mailbox memory */
4461 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4462 if (IS_ERR(mailbox))
4463 return PTR_ERR(mailbox);
4464
4465 if (!hr_dev->caps.eqe_hop_num) {
4466 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4467
4468 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
4469 GFP_KERNEL);
4470 if (!eq->buf_list) {
4471 ret = -ENOMEM;
4472 goto free_cmd_mbox;
4473 }
4474
4475 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
4476 &(eq->buf_list->map),
4477 GFP_KERNEL);
4478 if (!eq->buf_list->buf) {
4479 ret = -ENOMEM;
4480 goto err_alloc_buf;
4481 }
4482
4483 memset(eq->buf_list->buf, 0, buf_chk_sz);
4484 } else {
4485 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
4486 if (ret) {
4487 ret = -ENOMEM;
4488 goto free_cmd_mbox;
4489 }
4490 }
4491
4492 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
4493
4494 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
4495 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
4496 if (ret) {
4497 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
4498 goto err_cmd_mbox;
4499 }
4500
4501 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4502
4503 return 0;
4504
4505 err_cmd_mbox:
4506 if (!hr_dev->caps.eqe_hop_num)
4507 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
4508 eq->buf_list->map);
4509 else {
4510 hns_roce_mhop_free_eq(hr_dev, eq);
4511 goto free_cmd_mbox;
4512 }
4513
4514 err_alloc_buf:
4515 kfree(eq->buf_list);
4516
4517 free_cmd_mbox:
4518 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4519
4520 return ret;
4521 }
4522
4523 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
4524 {
4525 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4526 struct device *dev = hr_dev->dev;
4527 struct hns_roce_eq *eq;
4528 unsigned int eq_cmd;
4529 int irq_num;
4530 int eq_num;
4531 int other_num;
4532 int comp_num;
4533 int aeq_num;
4534 int i, j, k;
4535 int ret;
4536
4537 other_num = hr_dev->caps.num_other_vectors;
4538 comp_num = hr_dev->caps.num_comp_vectors;
4539 aeq_num = hr_dev->caps.num_aeq_vectors;
4540
4541 eq_num = comp_num + aeq_num;
4542 irq_num = eq_num + other_num;
4543
4544 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4545 if (!eq_table->eq)
4546 return -ENOMEM;
4547
4548 for (i = 0; i < irq_num; i++) {
4549 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
4550 GFP_KERNEL);
4551 if (!hr_dev->irq_names[i]) {
4552 ret = -ENOMEM;
4553 goto err_failed_kzalloc;
4554 }
4555 }
4556
4557 /* create eq */
4558 for (j = 0; j < eq_num; j++) {
4559 eq = &eq_table->eq[j];
4560 eq->hr_dev = hr_dev;
4561 eq->eqn = j;
4562 if (j < comp_num) {
4563 /* CEQ */
4564 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
4565 eq->type_flag = HNS_ROCE_CEQ;
4566 eq->entries = hr_dev->caps.ceqe_depth;
4567 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4568 eq->irq = hr_dev->irq[j + other_num + aeq_num];
4569 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
4570 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
4571 } else {
4572 /* AEQ */
4573 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
4574 eq->type_flag = HNS_ROCE_AEQ;
4575 eq->entries = hr_dev->caps.aeqe_depth;
4576 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4577 eq->irq = hr_dev->irq[j - comp_num + other_num];
4578 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
4579 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
4580 }
4581
4582 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
4583 if (ret) {
4584 dev_err(dev, "eq create failed.\n");
4585 goto err_create_eq_fail;
4586 }
4587 }
4588
4589 /* enable irq */
4590 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
4591
4592 /* irq contains: abnormal + AEQ + CEQ*/
4593 for (k = 0; k < irq_num; k++)
4594 if (k < other_num)
4595 snprintf((char *)hr_dev->irq_names[k],
4596 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
4597 else if (k < (other_num + aeq_num))
4598 snprintf((char *)hr_dev->irq_names[k],
4599 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
4600 k - other_num);
4601 else
4602 snprintf((char *)hr_dev->irq_names[k],
4603 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
4604 k - other_num - aeq_num);
4605
4606 for (k = 0; k < irq_num; k++) {
4607 if (k < other_num)
4608 ret = request_irq(hr_dev->irq[k],
4609 hns_roce_v2_msix_interrupt_abn,
4610 0, hr_dev->irq_names[k], hr_dev);
4611
4612 else if (k < (other_num + comp_num))
4613 ret = request_irq(eq_table->eq[k - other_num].irq,
4614 hns_roce_v2_msix_interrupt_eq,
4615 0, hr_dev->irq_names[k + aeq_num],
4616 &eq_table->eq[k - other_num]);
4617 else
4618 ret = request_irq(eq_table->eq[k - other_num].irq,
4619 hns_roce_v2_msix_interrupt_eq,
4620 0, hr_dev->irq_names[k - comp_num],
4621 &eq_table->eq[k - other_num]);
4622 if (ret) {
4623 dev_err(dev, "Request irq error!\n");
4624 goto err_request_irq_fail;
4625 }
4626 }
4627
4628 return 0;
4629
4630 err_request_irq_fail:
4631 for (k -= 1; k >= 0; k--)
4632 if (k < other_num)
4633 free_irq(hr_dev->irq[k], hr_dev);
4634 else
4635 free_irq(eq_table->eq[k - other_num].irq,
4636 &eq_table->eq[k - other_num]);
4637
4638 err_create_eq_fail:
4639 for (j -= 1; j >= 0; j--)
4640 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
4641
4642 err_failed_kzalloc:
4643 for (i -= 1; i >= 0; i--)
4644 kfree(hr_dev->irq_names[i]);
4645 kfree(eq_table->eq);
4646
4647 return ret;
4648 }
4649
4650 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4651 {
4652 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4653 int irq_num;
4654 int eq_num;
4655 int i;
4656
4657 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4658 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4659
4660 /* Disable irq */
4661 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
4662
4663 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
4664 free_irq(hr_dev->irq[i], hr_dev);
4665
4666 for (i = 0; i < eq_num; i++) {
4667 hns_roce_v2_destroy_eqc(hr_dev, i);
4668
4669 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
4670
4671 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
4672 }
4673
4674 for (i = 0; i < irq_num; i++)
4675 kfree(hr_dev->irq_names[i]);
4676
4677 kfree(eq_table->eq);
4678 }
4679
4680 static const struct hns_roce_hw hns_roce_hw_v2 = {
4681 .cmq_init = hns_roce_v2_cmq_init,
4682 .cmq_exit = hns_roce_v2_cmq_exit,
4683 .hw_profile = hns_roce_v2_profile,
4684 .post_mbox = hns_roce_v2_post_mbox,
4685 .chk_mbox = hns_roce_v2_chk_mbox,
4686 .set_gid = hns_roce_v2_set_gid,
4687 .set_mac = hns_roce_v2_set_mac,
4688 .write_mtpt = hns_roce_v2_write_mtpt,
4689 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
4690 .write_cqc = hns_roce_v2_write_cqc,
4691 .set_hem = hns_roce_v2_set_hem,
4692 .clear_hem = hns_roce_v2_clear_hem,
4693 .modify_qp = hns_roce_v2_modify_qp,
4694 .query_qp = hns_roce_v2_query_qp,
4695 .destroy_qp = hns_roce_v2_destroy_qp,
4696 .modify_cq = hns_roce_v2_modify_cq,
4697 .post_send = hns_roce_v2_post_send,
4698 .post_recv = hns_roce_v2_post_recv,
4699 .req_notify_cq = hns_roce_v2_req_notify_cq,
4700 .poll_cq = hns_roce_v2_poll_cq,
4701 .init_eq = hns_roce_v2_init_eq_table,
4702 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
4703 };
4704
4705 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
4706 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
4707 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
4708 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
4709 /* required last entry */
4710 {0, }
4711 };
4712
4713 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
4714
4715 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
4716 struct hnae3_handle *handle)
4717 {
4718 const struct pci_device_id *id;
4719 int i;
4720
4721 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
4722 if (!id) {
4723 dev_err(hr_dev->dev, "device is not compatible!\n");
4724 return -ENXIO;
4725 }
4726
4727 hr_dev->hw = &hns_roce_hw_v2;
4728 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4729 hr_dev->odb_offset = hr_dev->sdb_offset;
4730
4731 /* Get info from NIC driver. */
4732 hr_dev->reg_base = handle->rinfo.roce_io_base;
4733 hr_dev->caps.num_ports = 1;
4734 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
4735 hr_dev->iboe.phy_port[0] = 0;
4736
4737 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
4738 hr_dev->iboe.netdevs[0]->dev_addr);
4739
4740 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
4741 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
4742 i + handle->rinfo.base_vector);
4743
4744 /* cmd issue mode: 0 is poll, 1 is event */
4745 hr_dev->cmd_mod = 1;
4746 hr_dev->loop_idc = 0;
4747
4748 return 0;
4749 }
4750
4751 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
4752 {
4753 struct hns_roce_dev *hr_dev;
4754 int ret;
4755
4756 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
4757 if (!hr_dev)
4758 return -ENOMEM;
4759
4760 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
4761 if (!hr_dev->priv) {
4762 ret = -ENOMEM;
4763 goto error_failed_kzalloc;
4764 }
4765
4766 hr_dev->pci_dev = handle->pdev;
4767 hr_dev->dev = &handle->pdev->dev;
4768 handle->priv = hr_dev;
4769
4770 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
4771 if (ret) {
4772 dev_err(hr_dev->dev, "Get Configuration failed!\n");
4773 goto error_failed_get_cfg;
4774 }
4775
4776 ret = hns_roce_init(hr_dev);
4777 if (ret) {
4778 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
4779 goto error_failed_get_cfg;
4780 }
4781
4782 return 0;
4783
4784 error_failed_get_cfg:
4785 kfree(hr_dev->priv);
4786
4787 error_failed_kzalloc:
4788 ib_dealloc_device(&hr_dev->ib_dev);
4789
4790 return ret;
4791 }
4792
4793 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
4794 bool reset)
4795 {
4796 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
4797
4798 hns_roce_exit(hr_dev);
4799 kfree(hr_dev->priv);
4800 ib_dealloc_device(&hr_dev->ib_dev);
4801 }
4802
4803 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
4804 .init_instance = hns_roce_hw_v2_init_instance,
4805 .uninit_instance = hns_roce_hw_v2_uninit_instance,
4806 };
4807
4808 static struct hnae3_client hns_roce_hw_v2_client = {
4809 .name = "hns_roce_hw_v2",
4810 .type = HNAE3_CLIENT_ROCE,
4811 .ops = &hns_roce_hw_v2_ops,
4812 };
4813
4814 static int __init hns_roce_hw_v2_init(void)
4815 {
4816 return hnae3_register_client(&hns_roce_hw_v2_client);
4817 }
4818
4819 static void __exit hns_roce_hw_v2_exit(void)
4820 {
4821 hnae3_unregister_client(&hns_roce_hw_v2_client);
4822 }
4823
4824 module_init(hns_roce_hw_v2_init);
4825 module_exit(hns_roce_hw_v2_exit);
4826
4827 MODULE_LICENSE("Dual BSD/GPL");
4828 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4829 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4830 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
4831 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");