]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/kref.h> | |
34 | #include <rdma/ib_umem.h> | |
a8237b32 | 35 | #include <rdma/ib_user_verbs.h> |
e126ba97 EC |
36 | #include "mlx5_ib.h" |
37 | #include "user.h" | |
38 | ||
39 | static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) | |
40 | { | |
41 | struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; | |
42 | ||
43 | ibcq->comp_handler(ibcq, ibcq->cq_context); | |
44 | } | |
45 | ||
46 | static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) | |
47 | { | |
48 | struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); | |
49 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
50 | struct ib_cq *ibcq = &cq->ibcq; | |
51 | struct ib_event event; | |
52 | ||
53 | if (type != MLX5_EVENT_TYPE_CQ_ERROR) { | |
54 | mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", | |
55 | type, mcq->cqn); | |
56 | return; | |
57 | } | |
58 | ||
59 | if (ibcq->event_handler) { | |
60 | event.device = &dev->ib_dev; | |
61 | event.event = IB_EVENT_CQ_ERR; | |
62 | event.element.cq = ibcq; | |
63 | ibcq->event_handler(&event, ibcq->cq_context); | |
64 | } | |
65 | } | |
66 | ||
67 | static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) | |
68 | { | |
69 | return mlx5_buf_offset(&buf->buf, n * size); | |
70 | } | |
71 | ||
72 | static void *get_cqe(struct mlx5_ib_cq *cq, int n) | |
73 | { | |
74 | return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); | |
75 | } | |
76 | ||
bde51583 EC |
77 | static u8 sw_ownership_bit(int n, int nent) |
78 | { | |
79 | return (n & nent) ? 1 : 0; | |
80 | } | |
81 | ||
e126ba97 EC |
82 | static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) |
83 | { | |
84 | void *cqe = get_cqe(cq, n & cq->ibcq.cqe); | |
85 | struct mlx5_cqe64 *cqe64; | |
86 | ||
87 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
bde51583 EC |
88 | |
89 | if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && | |
90 | !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { | |
91 | return cqe; | |
92 | } else { | |
93 | return NULL; | |
94 | } | |
e126ba97 EC |
95 | } |
96 | ||
97 | static void *next_cqe_sw(struct mlx5_ib_cq *cq) | |
98 | { | |
99 | return get_sw_cqe(cq, cq->mcq.cons_index); | |
100 | } | |
101 | ||
102 | static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) | |
103 | { | |
104 | switch (wq->wr_data[idx]) { | |
105 | case MLX5_IB_WR_UMR: | |
106 | return 0; | |
107 | ||
108 | case IB_WR_LOCAL_INV: | |
109 | return IB_WC_LOCAL_INV; | |
110 | ||
111 | case IB_WR_FAST_REG_MR: | |
112 | return IB_WC_FAST_REG_MR; | |
113 | ||
114 | default: | |
115 | pr_warn("unknown completion status\n"); | |
116 | return 0; | |
117 | } | |
118 | } | |
119 | ||
120 | static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |
121 | struct mlx5_ib_wq *wq, int idx) | |
122 | { | |
123 | wc->wc_flags = 0; | |
124 | switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { | |
125 | case MLX5_OPCODE_RDMA_WRITE_IMM: | |
126 | wc->wc_flags |= IB_WC_WITH_IMM; | |
127 | case MLX5_OPCODE_RDMA_WRITE: | |
128 | wc->opcode = IB_WC_RDMA_WRITE; | |
129 | break; | |
130 | case MLX5_OPCODE_SEND_IMM: | |
131 | wc->wc_flags |= IB_WC_WITH_IMM; | |
132 | case MLX5_OPCODE_SEND: | |
133 | case MLX5_OPCODE_SEND_INVAL: | |
134 | wc->opcode = IB_WC_SEND; | |
135 | break; | |
136 | case MLX5_OPCODE_RDMA_READ: | |
137 | wc->opcode = IB_WC_RDMA_READ; | |
138 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); | |
139 | break; | |
140 | case MLX5_OPCODE_ATOMIC_CS: | |
141 | wc->opcode = IB_WC_COMP_SWAP; | |
142 | wc->byte_len = 8; | |
143 | break; | |
144 | case MLX5_OPCODE_ATOMIC_FA: | |
145 | wc->opcode = IB_WC_FETCH_ADD; | |
146 | wc->byte_len = 8; | |
147 | break; | |
148 | case MLX5_OPCODE_ATOMIC_MASKED_CS: | |
149 | wc->opcode = IB_WC_MASKED_COMP_SWAP; | |
150 | wc->byte_len = 8; | |
151 | break; | |
152 | case MLX5_OPCODE_ATOMIC_MASKED_FA: | |
153 | wc->opcode = IB_WC_MASKED_FETCH_ADD; | |
154 | wc->byte_len = 8; | |
155 | break; | |
156 | case MLX5_OPCODE_BIND_MW: | |
157 | wc->opcode = IB_WC_BIND_MW; | |
158 | break; | |
159 | case MLX5_OPCODE_UMR: | |
160 | wc->opcode = get_umr_comp(wq, idx); | |
161 | break; | |
162 | } | |
163 | } | |
164 | ||
165 | enum { | |
166 | MLX5_GRH_IN_BUFFER = 1, | |
167 | MLX5_GRH_IN_CQE = 2, | |
168 | }; | |
169 | ||
170 | static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |
171 | struct mlx5_ib_qp *qp) | |
172 | { | |
173 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); | |
174 | struct mlx5_ib_srq *srq; | |
175 | struct mlx5_ib_wq *wq; | |
176 | u16 wqe_ctr; | |
177 | u8 g; | |
178 | ||
179 | if (qp->ibqp.srq || qp->ibqp.xrcd) { | |
180 | struct mlx5_core_srq *msrq = NULL; | |
181 | ||
182 | if (qp->ibqp.xrcd) { | |
9603b61d | 183 | msrq = mlx5_core_get_srq(dev->mdev, |
e126ba97 EC |
184 | be32_to_cpu(cqe->srqn)); |
185 | srq = to_mibsrq(msrq); | |
186 | } else { | |
187 | srq = to_msrq(qp->ibqp.srq); | |
188 | } | |
189 | if (srq) { | |
190 | wqe_ctr = be16_to_cpu(cqe->wqe_counter); | |
191 | wc->wr_id = srq->wrid[wqe_ctr]; | |
192 | mlx5_ib_free_srq_wqe(srq, wqe_ctr); | |
193 | if (msrq && atomic_dec_and_test(&msrq->refcount)) | |
194 | complete(&msrq->free); | |
195 | } | |
196 | } else { | |
197 | wq = &qp->rq; | |
198 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
199 | ++wq->tail; | |
200 | } | |
201 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); | |
202 | ||
203 | switch (cqe->op_own >> 4) { | |
204 | case MLX5_CQE_RESP_WR_IMM: | |
205 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
206 | wc->wc_flags = IB_WC_WITH_IMM; | |
207 | wc->ex.imm_data = cqe->imm_inval_pkey; | |
208 | break; | |
209 | case MLX5_CQE_RESP_SEND: | |
210 | wc->opcode = IB_WC_RECV; | |
211 | wc->wc_flags = 0; | |
212 | break; | |
213 | case MLX5_CQE_RESP_SEND_IMM: | |
214 | wc->opcode = IB_WC_RECV; | |
215 | wc->wc_flags = IB_WC_WITH_IMM; | |
216 | wc->ex.imm_data = cqe->imm_inval_pkey; | |
217 | break; | |
218 | case MLX5_CQE_RESP_SEND_INV: | |
219 | wc->opcode = IB_WC_RECV; | |
220 | wc->wc_flags = IB_WC_WITH_INVALIDATE; | |
221 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); | |
222 | break; | |
223 | } | |
224 | wc->slid = be16_to_cpu(cqe->slid); | |
225 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; | |
226 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; | |
227 | wc->dlid_path_bits = cqe->ml_path; | |
228 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; | |
229 | wc->wc_flags |= g ? IB_WC_GRH : 0; | |
230 | wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; | |
231 | } | |
232 | ||
233 | static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) | |
234 | { | |
235 | __be32 *p = (__be32 *)cqe; | |
236 | int i; | |
237 | ||
238 | mlx5_ib_warn(dev, "dump error cqe\n"); | |
239 | for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) | |
240 | pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), | |
241 | be32_to_cpu(p[1]), be32_to_cpu(p[2]), | |
242 | be32_to_cpu(p[3])); | |
243 | } | |
244 | ||
245 | static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, | |
246 | struct mlx5_err_cqe *cqe, | |
247 | struct ib_wc *wc) | |
248 | { | |
249 | int dump = 1; | |
250 | ||
251 | switch (cqe->syndrome) { | |
252 | case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: | |
253 | wc->status = IB_WC_LOC_LEN_ERR; | |
254 | break; | |
255 | case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: | |
256 | wc->status = IB_WC_LOC_QP_OP_ERR; | |
257 | break; | |
258 | case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: | |
259 | wc->status = IB_WC_LOC_PROT_ERR; | |
260 | break; | |
261 | case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: | |
262 | dump = 0; | |
263 | wc->status = IB_WC_WR_FLUSH_ERR; | |
264 | break; | |
265 | case MLX5_CQE_SYNDROME_MW_BIND_ERR: | |
266 | wc->status = IB_WC_MW_BIND_ERR; | |
267 | break; | |
268 | case MLX5_CQE_SYNDROME_BAD_RESP_ERR: | |
269 | wc->status = IB_WC_BAD_RESP_ERR; | |
270 | break; | |
271 | case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: | |
272 | wc->status = IB_WC_LOC_ACCESS_ERR; | |
273 | break; | |
274 | case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: | |
275 | wc->status = IB_WC_REM_INV_REQ_ERR; | |
276 | break; | |
277 | case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: | |
278 | wc->status = IB_WC_REM_ACCESS_ERR; | |
279 | break; | |
280 | case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: | |
281 | wc->status = IB_WC_REM_OP_ERR; | |
282 | break; | |
283 | case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: | |
284 | wc->status = IB_WC_RETRY_EXC_ERR; | |
285 | dump = 0; | |
286 | break; | |
287 | case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: | |
288 | wc->status = IB_WC_RNR_RETRY_EXC_ERR; | |
289 | dump = 0; | |
290 | break; | |
291 | case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: | |
292 | wc->status = IB_WC_REM_ABORT_ERR; | |
293 | break; | |
294 | default: | |
295 | wc->status = IB_WC_GENERAL_ERR; | |
296 | break; | |
297 | } | |
298 | ||
299 | wc->vendor_err = cqe->vendor_err_synd; | |
300 | if (dump) | |
301 | dump_cqe(dev, cqe); | |
302 | } | |
303 | ||
304 | static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) | |
305 | { | |
306 | /* TBD: waiting decision | |
307 | */ | |
308 | return 0; | |
309 | } | |
310 | ||
311 | static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) | |
312 | { | |
313 | struct mlx5_wqe_data_seg *dpseg; | |
314 | void *addr; | |
315 | ||
316 | dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + | |
317 | sizeof(struct mlx5_wqe_raddr_seg) + | |
318 | sizeof(struct mlx5_wqe_atomic_seg); | |
319 | addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); | |
320 | return addr; | |
321 | } | |
322 | ||
323 | static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | |
324 | uint16_t idx) | |
325 | { | |
326 | void *addr; | |
327 | int byte_count; | |
328 | int i; | |
329 | ||
330 | if (!is_atomic_response(qp, idx)) | |
331 | return; | |
332 | ||
333 | byte_count = be32_to_cpu(cqe64->byte_cnt); | |
334 | addr = mlx5_get_atomic_laddr(qp, idx); | |
335 | ||
336 | if (byte_count == 4) { | |
337 | *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); | |
338 | } else { | |
339 | for (i = 0; i < byte_count; i += 8) { | |
340 | *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); | |
341 | addr += 8; | |
342 | } | |
343 | } | |
344 | ||
345 | return; | |
346 | } | |
347 | ||
348 | static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | |
349 | u16 tail, u16 head) | |
350 | { | |
f241e749 | 351 | u16 idx; |
e126ba97 EC |
352 | |
353 | do { | |
354 | idx = tail & (qp->sq.wqe_cnt - 1); | |
355 | handle_atomic(qp, cqe64, idx); | |
356 | if (idx == head) | |
357 | break; | |
358 | ||
359 | tail = qp->sq.w_list[idx].next; | |
360 | } while (1); | |
361 | tail = qp->sq.w_list[idx].next; | |
362 | qp->sq.last_poll = tail; | |
363 | } | |
364 | ||
bde51583 EC |
365 | static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) |
366 | { | |
9603b61d | 367 | mlx5_buf_free(dev->mdev, &buf->buf); |
bde51583 EC |
368 | } |
369 | ||
d5436ba0 SG |
370 | static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, |
371 | struct ib_sig_err *item) | |
372 | { | |
373 | u16 syndrome = be16_to_cpu(cqe->syndrome); | |
374 | ||
375 | #define GUARD_ERR (1 << 13) | |
376 | #define APPTAG_ERR (1 << 12) | |
377 | #define REFTAG_ERR (1 << 11) | |
378 | ||
379 | if (syndrome & GUARD_ERR) { | |
380 | item->err_type = IB_SIG_BAD_GUARD; | |
381 | item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; | |
382 | item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; | |
383 | } else | |
384 | if (syndrome & REFTAG_ERR) { | |
385 | item->err_type = IB_SIG_BAD_REFTAG; | |
386 | item->expected = be32_to_cpu(cqe->expected_reftag); | |
387 | item->actual = be32_to_cpu(cqe->actual_reftag); | |
388 | } else | |
389 | if (syndrome & APPTAG_ERR) { | |
390 | item->err_type = IB_SIG_BAD_APPTAG; | |
391 | item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; | |
392 | item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; | |
393 | } else { | |
394 | pr_err("Got signature completion error with bad syndrome %04x\n", | |
395 | syndrome); | |
396 | } | |
397 | ||
398 | item->sig_err_offset = be64_to_cpu(cqe->err_offset); | |
399 | item->key = be32_to_cpu(cqe->mkey); | |
400 | } | |
401 | ||
e126ba97 EC |
402 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, |
403 | struct mlx5_ib_qp **cur_qp, | |
404 | struct ib_wc *wc) | |
405 | { | |
406 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
407 | struct mlx5_err_cqe *err_cqe; | |
408 | struct mlx5_cqe64 *cqe64; | |
409 | struct mlx5_core_qp *mqp; | |
410 | struct mlx5_ib_wq *wq; | |
d5436ba0 SG |
411 | struct mlx5_sig_err_cqe *sig_err_cqe; |
412 | struct mlx5_core_mr *mmr; | |
413 | struct mlx5_ib_mr *mr; | |
e126ba97 EC |
414 | uint8_t opcode; |
415 | uint32_t qpn; | |
416 | u16 wqe_ctr; | |
417 | void *cqe; | |
418 | int idx; | |
419 | ||
bde51583 | 420 | repoll: |
e126ba97 EC |
421 | cqe = next_cqe_sw(cq); |
422 | if (!cqe) | |
423 | return -EAGAIN; | |
424 | ||
425 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
426 | ||
427 | ++cq->mcq.cons_index; | |
428 | ||
429 | /* Make sure we read CQ entry contents after we've checked the | |
430 | * ownership bit. | |
431 | */ | |
432 | rmb(); | |
433 | ||
bde51583 EC |
434 | opcode = cqe64->op_own >> 4; |
435 | if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { | |
436 | if (likely(cq->resize_buf)) { | |
437 | free_cq_buf(dev, &cq->buf); | |
438 | cq->buf = *cq->resize_buf; | |
439 | kfree(cq->resize_buf); | |
440 | cq->resize_buf = NULL; | |
441 | goto repoll; | |
442 | } else { | |
443 | mlx5_ib_warn(dev, "unexpected resize cqe\n"); | |
444 | } | |
445 | } | |
e126ba97 EC |
446 | |
447 | qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; | |
448 | if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { | |
449 | /* We do not have to take the QP table lock here, | |
450 | * because CQs will be locked while QPs are removed | |
451 | * from the table. | |
452 | */ | |
9603b61d | 453 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
e126ba97 EC |
454 | if (unlikely(!mqp)) { |
455 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | |
456 | cq->mcq.cqn, qpn); | |
457 | return -EINVAL; | |
458 | } | |
459 | ||
460 | *cur_qp = to_mibqp(mqp); | |
461 | } | |
462 | ||
463 | wc->qp = &(*cur_qp)->ibqp; | |
e126ba97 EC |
464 | switch (opcode) { |
465 | case MLX5_CQE_REQ: | |
466 | wq = &(*cur_qp)->sq; | |
467 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
468 | idx = wqe_ctr & (wq->wqe_cnt - 1); | |
469 | handle_good_req(wc, cqe64, wq, idx); | |
470 | handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); | |
471 | wc->wr_id = wq->wrid[idx]; | |
472 | wq->tail = wq->wqe_head[idx] + 1; | |
473 | wc->status = IB_WC_SUCCESS; | |
474 | break; | |
475 | case MLX5_CQE_RESP_WR_IMM: | |
476 | case MLX5_CQE_RESP_SEND: | |
477 | case MLX5_CQE_RESP_SEND_IMM: | |
478 | case MLX5_CQE_RESP_SEND_INV: | |
479 | handle_responder(wc, cqe64, *cur_qp); | |
480 | wc->status = IB_WC_SUCCESS; | |
481 | break; | |
482 | case MLX5_CQE_RESIZE_CQ: | |
483 | break; | |
484 | case MLX5_CQE_REQ_ERR: | |
485 | case MLX5_CQE_RESP_ERR: | |
486 | err_cqe = (struct mlx5_err_cqe *)cqe64; | |
487 | mlx5_handle_error_cqe(dev, err_cqe, wc); | |
488 | mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", | |
489 | opcode == MLX5_CQE_REQ_ERR ? | |
490 | "Requestor" : "Responder", cq->mcq.cqn); | |
491 | mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", | |
492 | err_cqe->syndrome, err_cqe->vendor_err_synd); | |
493 | if (opcode == MLX5_CQE_REQ_ERR) { | |
494 | wq = &(*cur_qp)->sq; | |
495 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
496 | idx = wqe_ctr & (wq->wqe_cnt - 1); | |
497 | wc->wr_id = wq->wrid[idx]; | |
498 | wq->tail = wq->wqe_head[idx] + 1; | |
499 | } else { | |
500 | struct mlx5_ib_srq *srq; | |
501 | ||
502 | if ((*cur_qp)->ibqp.srq) { | |
503 | srq = to_msrq((*cur_qp)->ibqp.srq); | |
504 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
505 | wc->wr_id = srq->wrid[wqe_ctr]; | |
506 | mlx5_ib_free_srq_wqe(srq, wqe_ctr); | |
507 | } else { | |
508 | wq = &(*cur_qp)->rq; | |
509 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
510 | ++wq->tail; | |
511 | } | |
512 | } | |
513 | break; | |
d5436ba0 SG |
514 | case MLX5_CQE_SIG_ERR: |
515 | sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; | |
516 | ||
9603b61d JM |
517 | read_lock(&dev->mdev->priv.mr_table.lock); |
518 | mmr = __mlx5_mr_lookup(dev->mdev, | |
d5436ba0 SG |
519 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
520 | if (unlikely(!mmr)) { | |
9603b61d | 521 | read_unlock(&dev->mdev->priv.mr_table.lock); |
d5436ba0 SG |
522 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", |
523 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | |
524 | return -EINVAL; | |
525 | } | |
526 | ||
527 | mr = to_mibmr(mmr); | |
528 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); | |
529 | mr->sig->sig_err_exists = true; | |
530 | mr->sig->sigerr_count++; | |
531 | ||
532 | mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", | |
533 | cq->mcq.cqn, mr->sig->err_item.key, | |
534 | mr->sig->err_item.err_type, | |
535 | mr->sig->err_item.sig_err_offset, | |
536 | mr->sig->err_item.expected, | |
537 | mr->sig->err_item.actual); | |
538 | ||
9603b61d | 539 | read_unlock(&dev->mdev->priv.mr_table.lock); |
d5436ba0 | 540 | goto repoll; |
e126ba97 EC |
541 | } |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
546 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
547 | { | |
548 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | |
549 | struct mlx5_ib_qp *cur_qp = NULL; | |
550 | unsigned long flags; | |
551 | int npolled; | |
552 | int err = 0; | |
553 | ||
554 | spin_lock_irqsave(&cq->lock, flags); | |
555 | ||
556 | for (npolled = 0; npolled < num_entries; npolled++) { | |
557 | err = mlx5_poll_one(cq, &cur_qp, wc + npolled); | |
558 | if (err) | |
559 | break; | |
560 | } | |
561 | ||
562 | if (npolled) | |
563 | mlx5_cq_set_ci(&cq->mcq); | |
564 | ||
565 | spin_unlock_irqrestore(&cq->lock, flags); | |
566 | ||
567 | if (err == 0 || err == -EAGAIN) | |
568 | return npolled; | |
569 | else | |
570 | return err; | |
571 | } | |
572 | ||
573 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
574 | { | |
575 | mlx5_cq_arm(&to_mcq(ibcq)->mcq, | |
576 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? | |
577 | MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, | |
9603b61d JM |
578 | to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map, |
579 | MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock)); | |
e126ba97 EC |
580 | |
581 | return 0; | |
582 | } | |
583 | ||
584 | static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, | |
585 | int nent, int cqe_size) | |
586 | { | |
587 | int err; | |
588 | ||
9603b61d | 589 | err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, |
e126ba97 EC |
590 | PAGE_SIZE * 2, &buf->buf); |
591 | if (err) | |
592 | return err; | |
593 | ||
594 | buf->cqe_size = cqe_size; | |
bde51583 | 595 | buf->nent = nent; |
e126ba97 EC |
596 | |
597 | return 0; | |
598 | } | |
599 | ||
e126ba97 EC |
600 | static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, |
601 | struct ib_ucontext *context, struct mlx5_ib_cq *cq, | |
602 | int entries, struct mlx5_create_cq_mbox_in **cqb, | |
603 | int *cqe_size, int *index, int *inlen) | |
604 | { | |
605 | struct mlx5_ib_create_cq ucmd; | |
a8237b32 | 606 | size_t ucmdlen; |
e126ba97 EC |
607 | int page_shift; |
608 | int npages; | |
609 | int ncont; | |
610 | int err; | |
611 | ||
a8237b32 YD |
612 | ucmdlen = |
613 | (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < | |
614 | sizeof(ucmd)) ? (sizeof(ucmd) - | |
615 | sizeof(ucmd.reserved)) : sizeof(ucmd); | |
616 | ||
617 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) | |
e126ba97 EC |
618 | return -EFAULT; |
619 | ||
a8237b32 YD |
620 | if (ucmdlen == sizeof(ucmd) && |
621 | ucmd.reserved != 0) | |
622 | return -EINVAL; | |
623 | ||
e126ba97 EC |
624 | if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) |
625 | return -EINVAL; | |
626 | ||
627 | *cqe_size = ucmd.cqe_size; | |
628 | ||
629 | cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, | |
630 | entries * ucmd.cqe_size, | |
631 | IB_ACCESS_LOCAL_WRITE, 1); | |
632 | if (IS_ERR(cq->buf.umem)) { | |
633 | err = PTR_ERR(cq->buf.umem); | |
634 | return err; | |
635 | } | |
636 | ||
637 | err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, | |
638 | &cq->db); | |
639 | if (err) | |
640 | goto err_umem; | |
641 | ||
642 | mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, | |
643 | &ncont, NULL); | |
644 | mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", | |
645 | ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); | |
646 | ||
647 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; | |
648 | *cqb = mlx5_vzalloc(*inlen); | |
649 | if (!*cqb) { | |
650 | err = -ENOMEM; | |
651 | goto err_db; | |
652 | } | |
653 | mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); | |
cf1c5e1f | 654 | (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
e126ba97 EC |
655 | |
656 | *index = to_mucontext(context)->uuari.uars[0].index; | |
657 | ||
658 | return 0; | |
659 | ||
660 | err_db: | |
661 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | |
662 | ||
663 | err_umem: | |
664 | ib_umem_release(cq->buf.umem); | |
665 | return err; | |
666 | } | |
667 | ||
668 | static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) | |
669 | { | |
670 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | |
671 | ib_umem_release(cq->buf.umem); | |
672 | } | |
673 | ||
bde51583 | 674 | static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) |
e126ba97 EC |
675 | { |
676 | int i; | |
677 | void *cqe; | |
678 | struct mlx5_cqe64 *cqe64; | |
679 | ||
bde51583 EC |
680 | for (i = 0; i < buf->nent; i++) { |
681 | cqe = get_cqe_from_buf(buf, i, buf->cqe_size); | |
682 | cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; | |
683 | cqe64->op_own = MLX5_CQE_INVALID << 4; | |
e126ba97 EC |
684 | } |
685 | } | |
686 | ||
687 | static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |
688 | int entries, int cqe_size, | |
689 | struct mlx5_create_cq_mbox_in **cqb, | |
690 | int *index, int *inlen) | |
691 | { | |
692 | int err; | |
693 | ||
9603b61d | 694 | err = mlx5_db_alloc(dev->mdev, &cq->db); |
e126ba97 EC |
695 | if (err) |
696 | return err; | |
697 | ||
698 | cq->mcq.set_ci_db = cq->db.db; | |
699 | cq->mcq.arm_db = cq->db.db + 1; | |
e126ba97 EC |
700 | cq->mcq.cqe_sz = cqe_size; |
701 | ||
702 | err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); | |
703 | if (err) | |
704 | goto err_db; | |
705 | ||
bde51583 | 706 | init_cq_buf(cq, &cq->buf); |
e126ba97 EC |
707 | |
708 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; | |
709 | *cqb = mlx5_vzalloc(*inlen); | |
710 | if (!*cqb) { | |
711 | err = -ENOMEM; | |
712 | goto err_buf; | |
713 | } | |
714 | mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); | |
715 | ||
1b77d2bd | 716 | (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
9603b61d | 717 | *index = dev->mdev->priv.uuari.uars[0].index; |
e126ba97 EC |
718 | |
719 | return 0; | |
720 | ||
721 | err_buf: | |
722 | free_cq_buf(dev, &cq->buf); | |
723 | ||
724 | err_db: | |
9603b61d | 725 | mlx5_db_free(dev->mdev, &cq->db); |
e126ba97 EC |
726 | return err; |
727 | } | |
728 | ||
729 | static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | |
730 | { | |
731 | free_cq_buf(dev, &cq->buf); | |
9603b61d | 732 | mlx5_db_free(dev->mdev, &cq->db); |
e126ba97 EC |
733 | } |
734 | ||
735 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, | |
736 | int vector, struct ib_ucontext *context, | |
737 | struct ib_udata *udata) | |
738 | { | |
739 | struct mlx5_create_cq_mbox_in *cqb = NULL; | |
740 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | |
741 | struct mlx5_ib_cq *cq; | |
742 | int uninitialized_var(index); | |
743 | int uninitialized_var(inlen); | |
744 | int cqe_size; | |
745 | int irqn; | |
746 | int eqn; | |
747 | int err; | |
748 | ||
51ee86a4 EC |
749 | if (entries < 0) |
750 | return ERR_PTR(-EINVAL); | |
751 | ||
e126ba97 | 752 | entries = roundup_pow_of_two(entries + 1); |
c7a08ac7 | 753 | if (entries > dev->mdev->caps.gen.max_cqes) |
e126ba97 EC |
754 | return ERR_PTR(-EINVAL); |
755 | ||
756 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | |
757 | if (!cq) | |
758 | return ERR_PTR(-ENOMEM); | |
759 | ||
760 | cq->ibcq.cqe = entries - 1; | |
761 | mutex_init(&cq->resize_mutex); | |
762 | spin_lock_init(&cq->lock); | |
763 | cq->resize_buf = NULL; | |
764 | cq->resize_umem = NULL; | |
765 | ||
766 | if (context) { | |
767 | err = create_cq_user(dev, udata, context, cq, entries, | |
768 | &cqb, &cqe_size, &index, &inlen); | |
769 | if (err) | |
770 | goto err_create; | |
771 | } else { | |
772 | /* for now choose 64 bytes till we have a proper interface */ | |
773 | cqe_size = 64; | |
774 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | |
775 | &index, &inlen); | |
776 | if (err) | |
777 | goto err_create; | |
778 | } | |
779 | ||
780 | cq->cqe_size = cqe_size; | |
781 | cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | |
782 | cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); | |
233d05d2 | 783 | err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); |
e126ba97 EC |
784 | if (err) |
785 | goto err_cqb; | |
786 | ||
787 | cqb->ctx.c_eqn = cpu_to_be16(eqn); | |
788 | cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); | |
789 | ||
9603b61d | 790 | err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); |
e126ba97 EC |
791 | if (err) |
792 | goto err_cqb; | |
793 | ||
794 | mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); | |
795 | cq->mcq.irqn = irqn; | |
796 | cq->mcq.comp = mlx5_ib_cq_comp; | |
797 | cq->mcq.event = mlx5_ib_cq_event; | |
798 | ||
799 | if (context) | |
800 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { | |
801 | err = -EFAULT; | |
802 | goto err_cmd; | |
803 | } | |
804 | ||
805 | ||
479163f4 | 806 | kvfree(cqb); |
e126ba97 EC |
807 | return &cq->ibcq; |
808 | ||
809 | err_cmd: | |
9603b61d | 810 | mlx5_core_destroy_cq(dev->mdev, &cq->mcq); |
e126ba97 EC |
811 | |
812 | err_cqb: | |
479163f4 | 813 | kvfree(cqb); |
e126ba97 EC |
814 | if (context) |
815 | destroy_cq_user(cq, context); | |
816 | else | |
817 | destroy_cq_kernel(dev, cq); | |
818 | ||
819 | err_create: | |
820 | kfree(cq); | |
821 | ||
822 | return ERR_PTR(err); | |
823 | } | |
824 | ||
825 | ||
826 | int mlx5_ib_destroy_cq(struct ib_cq *cq) | |
827 | { | |
828 | struct mlx5_ib_dev *dev = to_mdev(cq->device); | |
829 | struct mlx5_ib_cq *mcq = to_mcq(cq); | |
830 | struct ib_ucontext *context = NULL; | |
831 | ||
832 | if (cq->uobject) | |
833 | context = cq->uobject->context; | |
834 | ||
9603b61d | 835 | mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); |
e126ba97 EC |
836 | if (context) |
837 | destroy_cq_user(mcq, context); | |
838 | else | |
839 | destroy_cq_kernel(dev, mcq); | |
840 | ||
841 | kfree(mcq); | |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
cfd8f1d4 | 846 | static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) |
e126ba97 | 847 | { |
cfd8f1d4 | 848 | return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); |
e126ba97 EC |
849 | } |
850 | ||
851 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) | |
852 | { | |
853 | struct mlx5_cqe64 *cqe64, *dest64; | |
854 | void *cqe, *dest; | |
855 | u32 prod_index; | |
856 | int nfreed = 0; | |
857 | u8 owner_bit; | |
858 | ||
859 | if (!cq) | |
860 | return; | |
861 | ||
862 | /* First we need to find the current producer index, so we | |
863 | * know where to start cleaning from. It doesn't matter if HW | |
864 | * adds new entries after this loop -- the QP we're worried | |
865 | * about is already in RESET, so the new entries won't come | |
866 | * from our QP and therefore don't need to be checked. | |
867 | */ | |
868 | for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) | |
869 | if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) | |
870 | break; | |
871 | ||
872 | /* Now sweep backwards through the CQ, removing CQ entries | |
873 | * that match our QP by copying older entries on top of them. | |
874 | */ | |
875 | while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { | |
876 | cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); | |
877 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
cfd8f1d4 ML |
878 | if (is_equal_rsn(cqe64, rsn)) { |
879 | if (srq && (ntohl(cqe64->srqn) & 0xffffff)) | |
e126ba97 EC |
880 | mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); |
881 | ++nfreed; | |
882 | } else if (nfreed) { | |
883 | dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); | |
884 | dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; | |
885 | owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; | |
886 | memcpy(dest, cqe, cq->mcq.cqe_sz); | |
887 | dest64->op_own = owner_bit | | |
888 | (dest64->op_own & ~MLX5_CQE_OWNER_MASK); | |
889 | } | |
890 | } | |
891 | ||
892 | if (nfreed) { | |
893 | cq->mcq.cons_index += nfreed; | |
894 | /* Make sure update of buffer contents is done before | |
895 | * updating consumer index. | |
896 | */ | |
897 | wmb(); | |
898 | mlx5_cq_set_ci(&cq->mcq); | |
899 | } | |
900 | } | |
901 | ||
902 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) | |
903 | { | |
904 | if (!cq) | |
905 | return; | |
906 | ||
907 | spin_lock_irq(&cq->lock); | |
908 | __mlx5_ib_cq_clean(cq, qpn, srq); | |
909 | spin_unlock_irq(&cq->lock); | |
910 | } | |
911 | ||
912 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |
913 | { | |
3bdb31f6 EC |
914 | struct mlx5_modify_cq_mbox_in *in; |
915 | struct mlx5_ib_dev *dev = to_mdev(cq->device); | |
916 | struct mlx5_ib_cq *mcq = to_mcq(cq); | |
917 | int err; | |
918 | u32 fsel; | |
919 | ||
c7a08ac7 | 920 | if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) |
3bdb31f6 EC |
921 | return -ENOSYS; |
922 | ||
923 | in = kzalloc(sizeof(*in), GFP_KERNEL); | |
924 | if (!in) | |
925 | return -ENOMEM; | |
926 | ||
927 | in->cqn = cpu_to_be32(mcq->mcq.cqn); | |
928 | fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); | |
929 | in->ctx.cq_period = cpu_to_be16(cq_period); | |
930 | in->ctx.cq_max_count = cpu_to_be16(cq_count); | |
931 | in->field_select = cpu_to_be32(fsel); | |
9603b61d | 932 | err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in)); |
3bdb31f6 EC |
933 | kfree(in); |
934 | ||
935 | if (err) | |
936 | mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); | |
937 | ||
938 | return err; | |
e126ba97 EC |
939 | } |
940 | ||
bde51583 EC |
941 | static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, |
942 | int entries, struct ib_udata *udata, int *npas, | |
943 | int *page_shift, int *cqe_size) | |
944 | { | |
945 | struct mlx5_ib_resize_cq ucmd; | |
946 | struct ib_umem *umem; | |
947 | int err; | |
948 | int npages; | |
949 | struct ib_ucontext *context = cq->buf.umem->context; | |
950 | ||
57761d8d EC |
951 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); |
952 | if (err) | |
953 | return err; | |
954 | ||
955 | if (ucmd.reserved0 || ucmd.reserved1) | |
956 | return -EINVAL; | |
bde51583 EC |
957 | |
958 | umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, | |
959 | IB_ACCESS_LOCAL_WRITE, 1); | |
960 | if (IS_ERR(umem)) { | |
961 | err = PTR_ERR(umem); | |
962 | return err; | |
963 | } | |
964 | ||
965 | mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, | |
966 | npas, NULL); | |
967 | ||
968 | cq->resize_umem = umem; | |
969 | *cqe_size = ucmd.cqe_size; | |
970 | ||
971 | return 0; | |
972 | } | |
973 | ||
974 | static void un_resize_user(struct mlx5_ib_cq *cq) | |
975 | { | |
976 | ib_umem_release(cq->resize_umem); | |
977 | } | |
978 | ||
979 | static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |
980 | int entries, int cqe_size) | |
981 | { | |
982 | int err; | |
983 | ||
984 | cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); | |
985 | if (!cq->resize_buf) | |
986 | return -ENOMEM; | |
987 | ||
988 | err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); | |
989 | if (err) | |
990 | goto ex; | |
991 | ||
992 | init_cq_buf(cq, cq->resize_buf); | |
993 | ||
994 | return 0; | |
995 | ||
996 | ex: | |
997 | kfree(cq->resize_buf); | |
998 | return err; | |
999 | } | |
1000 | ||
1001 | static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | |
1002 | { | |
1003 | free_cq_buf(dev, cq->resize_buf); | |
1004 | cq->resize_buf = NULL; | |
1005 | } | |
1006 | ||
1007 | static int copy_resize_cqes(struct mlx5_ib_cq *cq) | |
1008 | { | |
1009 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
1010 | struct mlx5_cqe64 *scqe64; | |
1011 | struct mlx5_cqe64 *dcqe64; | |
1012 | void *start_cqe; | |
1013 | void *scqe; | |
1014 | void *dcqe; | |
1015 | int ssize; | |
1016 | int dsize; | |
1017 | int i; | |
1018 | u8 sw_own; | |
1019 | ||
1020 | ssize = cq->buf.cqe_size; | |
1021 | dsize = cq->resize_buf->cqe_size; | |
1022 | if (ssize != dsize) { | |
1023 | mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); | |
1024 | return -EINVAL; | |
1025 | } | |
1026 | ||
1027 | i = cq->mcq.cons_index; | |
1028 | scqe = get_sw_cqe(cq, i); | |
1029 | scqe64 = ssize == 64 ? scqe : scqe + 64; | |
1030 | start_cqe = scqe; | |
1031 | if (!scqe) { | |
1032 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | |
1033 | return -EINVAL; | |
1034 | } | |
1035 | ||
1036 | while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { | |
1037 | dcqe = get_cqe_from_buf(cq->resize_buf, | |
1038 | (i + 1) & (cq->resize_buf->nent), | |
1039 | dsize); | |
1040 | dcqe64 = dsize == 64 ? dcqe : dcqe + 64; | |
1041 | sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); | |
1042 | memcpy(dcqe, scqe, dsize); | |
1043 | dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; | |
1044 | ||
1045 | ++i; | |
1046 | scqe = get_sw_cqe(cq, i); | |
1047 | scqe64 = ssize == 64 ? scqe : scqe + 64; | |
1048 | if (!scqe) { | |
1049 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | |
1050 | return -EINVAL; | |
1051 | } | |
1052 | ||
1053 | if (scqe == start_cqe) { | |
1054 | pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", | |
1055 | cq->mcq.cqn); | |
1056 | return -ENOMEM; | |
1057 | } | |
1058 | } | |
1059 | ++cq->mcq.cons_index; | |
1060 | return 0; | |
1061 | } | |
1062 | ||
e126ba97 EC |
1063 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) |
1064 | { | |
bde51583 EC |
1065 | struct mlx5_ib_dev *dev = to_mdev(ibcq->device); |
1066 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | |
1067 | struct mlx5_modify_cq_mbox_in *in; | |
1068 | int err; | |
1069 | int npas; | |
1070 | int page_shift; | |
1071 | int inlen; | |
1072 | int uninitialized_var(cqe_size); | |
1073 | unsigned long flags; | |
1074 | ||
c7a08ac7 | 1075 | if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { |
bde51583 EC |
1076 | pr_info("Firmware does not support resize CQ\n"); |
1077 | return -ENOSYS; | |
1078 | } | |
1079 | ||
1080 | if (entries < 1) | |
1081 | return -EINVAL; | |
1082 | ||
1083 | entries = roundup_pow_of_two(entries + 1); | |
c7a08ac7 | 1084 | if (entries > dev->mdev->caps.gen.max_cqes + 1) |
bde51583 EC |
1085 | return -EINVAL; |
1086 | ||
1087 | if (entries == ibcq->cqe + 1) | |
1088 | return 0; | |
1089 | ||
1090 | mutex_lock(&cq->resize_mutex); | |
1091 | if (udata) { | |
1092 | err = resize_user(dev, cq, entries, udata, &npas, &page_shift, | |
1093 | &cqe_size); | |
1094 | } else { | |
1095 | cqe_size = 64; | |
1096 | err = resize_kernel(dev, cq, entries, cqe_size); | |
1097 | if (!err) { | |
1098 | npas = cq->resize_buf->buf.npages; | |
1099 | page_shift = cq->resize_buf->buf.page_shift; | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | if (err) | |
1104 | goto ex; | |
1105 | ||
1106 | inlen = sizeof(*in) + npas * sizeof(in->pas[0]); | |
1107 | in = mlx5_vzalloc(inlen); | |
1108 | if (!in) { | |
1109 | err = -ENOMEM; | |
1110 | goto ex_resize; | |
1111 | } | |
1112 | ||
1113 | if (udata) | |
1114 | mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, | |
1115 | in->pas, 0); | |
1116 | else | |
1117 | mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); | |
1118 | ||
1119 | in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | | |
1120 | MLX5_MODIFY_CQ_MASK_PG_OFFSET | | |
1121 | MLX5_MODIFY_CQ_MASK_PG_SIZE); | |
1122 | in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; | |
1123 | in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | |
1124 | in->ctx.page_offset = 0; | |
1125 | in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); | |
1126 | in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); | |
1127 | in->cqn = cpu_to_be32(cq->mcq.cqn); | |
1128 | ||
9603b61d | 1129 | err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); |
bde51583 EC |
1130 | if (err) |
1131 | goto ex_alloc; | |
1132 | ||
1133 | if (udata) { | |
1134 | cq->ibcq.cqe = entries - 1; | |
1135 | ib_umem_release(cq->buf.umem); | |
1136 | cq->buf.umem = cq->resize_umem; | |
1137 | cq->resize_umem = NULL; | |
1138 | } else { | |
1139 | struct mlx5_ib_cq_buf tbuf; | |
1140 | int resized = 0; | |
1141 | ||
1142 | spin_lock_irqsave(&cq->lock, flags); | |
1143 | if (cq->resize_buf) { | |
1144 | err = copy_resize_cqes(cq); | |
1145 | if (!err) { | |
1146 | tbuf = cq->buf; | |
1147 | cq->buf = *cq->resize_buf; | |
1148 | kfree(cq->resize_buf); | |
1149 | cq->resize_buf = NULL; | |
1150 | resized = 1; | |
1151 | } | |
1152 | } | |
1153 | cq->ibcq.cqe = entries - 1; | |
1154 | spin_unlock_irqrestore(&cq->lock, flags); | |
1155 | if (resized) | |
1156 | free_cq_buf(dev, &tbuf); | |
1157 | } | |
1158 | mutex_unlock(&cq->resize_mutex); | |
1159 | ||
479163f4 | 1160 | kvfree(in); |
bde51583 EC |
1161 | return 0; |
1162 | ||
1163 | ex_alloc: | |
479163f4 | 1164 | kvfree(in); |
bde51583 EC |
1165 | |
1166 | ex_resize: | |
1167 | if (udata) | |
1168 | un_resize_user(cq); | |
1169 | else | |
1170 | un_resize_kernel(dev, cq); | |
1171 | ex: | |
1172 | mutex_unlock(&cq->resize_mutex); | |
1173 | return err; | |
e126ba97 EC |
1174 | } |
1175 | ||
1176 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) | |
1177 | { | |
1178 | struct mlx5_ib_cq *cq; | |
1179 | ||
1180 | if (!ibcq) | |
1181 | return 128; | |
1182 | ||
1183 | cq = to_mcq(ibcq); | |
1184 | return cq->cqe_size; | |
1185 | } |