]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/qp.c
net/mlx5_core: Fix notification of page supplement error
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/gfp.h>
35#include <linux/export.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/qp.h>
38#include <linux/mlx5/driver.h>
39
40#include "mlx5_core.h"
41
5903325a
EC
42static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 u32 rsn)
e126ba97
EC
44{
45 struct mlx5_qp_table *table = &dev->priv.qp_table;
5903325a 46 struct mlx5_core_rsc_common *common;
e126ba97
EC
47
48 spin_lock(&table->lock);
49
5903325a
EC
50 common = radix_tree_lookup(&table->tree, rsn);
51 if (common)
52 atomic_inc(&common->refcount);
e126ba97
EC
53
54 spin_unlock(&table->lock);
55
5903325a
EC
56 if (!common) {
57 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
58 rsn);
59 return NULL;
e126ba97 60 }
5903325a
EC
61 return common;
62}
e126ba97 63
5903325a
EC
64void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
65{
66 if (atomic_dec_and_test(&common->refcount))
67 complete(&common->free);
68}
69
70void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
71{
72 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
73 struct mlx5_core_qp *qp;
74
75 if (!common)
76 return;
77
78 switch (common->res) {
79 case MLX5_RES_QP:
80 qp = (struct mlx5_core_qp *)common;
81 qp->event(qp, event_type);
82 break;
83
84 default:
85 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
86 }
e126ba97 87
5903325a 88 mlx5_core_put_rsc(common);
e126ba97
EC
89}
90
e420f0c0
HE
91#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
92void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
93{
94 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
97 struct mlx5_core_qp *qp =
98 container_of(common, struct mlx5_core_qp, common);
99 struct mlx5_pagefault pfault;
100
101 if (!qp) {
102 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
103 qpn);
104 return;
105 }
106
107 pfault.event_subtype = eqe->sub_type;
108 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
109 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
110 pfault.bytes_committed = be32_to_cpu(
111 pf_eqe->bytes_committed);
112
113 mlx5_core_dbg(dev,
114 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
115 eqe->sub_type, pfault.flags);
116
117 switch (eqe->sub_type) {
118 case MLX5_PFAULT_SUBTYPE_RDMA:
119 /* RDMA based event */
120 pfault.rdma.r_key =
121 be32_to_cpu(pf_eqe->rdma.r_key);
122 pfault.rdma.packet_size =
123 be16_to_cpu(pf_eqe->rdma.packet_length);
124 pfault.rdma.rdma_op_len =
125 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
126 pfault.rdma.rdma_va =
127 be64_to_cpu(pf_eqe->rdma.rdma_va);
128 mlx5_core_dbg(dev,
129 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
130 qpn, pfault.rdma.r_key);
131 mlx5_core_dbg(dev,
132 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
133 pfault.rdma.rdma_op_len);
134 mlx5_core_dbg(dev,
135 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
136 pfault.rdma.rdma_va);
137 mlx5_core_dbg(dev,
138 "PAGE_FAULT: bytes_committed: 0x%06x\n",
139 pfault.bytes_committed);
140 break;
141
142 case MLX5_PFAULT_SUBTYPE_WQE:
143 /* WQE based event */
144 pfault.wqe.wqe_index =
145 be16_to_cpu(pf_eqe->wqe.wqe_index);
146 pfault.wqe.packet_size =
147 be16_to_cpu(pf_eqe->wqe.packet_length);
148 mlx5_core_dbg(dev,
149 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
150 qpn, pfault.wqe.wqe_index);
151 mlx5_core_dbg(dev,
152 "PAGE_FAULT: bytes_committed: 0x%06x\n",
153 pfault.bytes_committed);
154 break;
155
156 default:
157 mlx5_core_warn(dev,
158 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
159 eqe->sub_type, qpn);
160 /* Unsupported page faults should still be resolved by the
161 * page fault handler
162 */
163 }
164
165 if (qp->pfault_handler) {
166 qp->pfault_handler(qp, &pfault);
167 } else {
168 mlx5_core_err(dev,
169 "ODP event for QP %08x, without a fault handler in QP\n",
170 qpn);
171 /* Page fault will remain unresolved. QP will hang until it is
172 * destroyed
173 */
174 }
175
176 mlx5_core_put_rsc(common);
177}
178#endif
179
e126ba97
EC
180int mlx5_core_create_qp(struct mlx5_core_dev *dev,
181 struct mlx5_core_qp *qp,
182 struct mlx5_create_qp_mbox_in *in,
183 int inlen)
184{
185 struct mlx5_qp_table *table = &dev->priv.qp_table;
186 struct mlx5_create_qp_mbox_out out;
187 struct mlx5_destroy_qp_mbox_in din;
188 struct mlx5_destroy_qp_mbox_out dout;
189 int err;
4aa17b28 190 void *qpc;
e126ba97 191
0b6e81b9 192 memset(&out, 0, sizeof(out));
e126ba97
EC
193 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
194
4aa17b28
HA
195 if (dev->issi) {
196 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
197 /* 0xffffff means we ask to work with cqe version 0 */
198 MLX5_SET(qpc, qpc, user_index, 0xffffff);
199 }
200
e126ba97
EC
201 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
202 if (err) {
1a91de28 203 mlx5_core_warn(dev, "ret %d\n", err);
e126ba97
EC
204 return err;
205 }
206
207 if (out.hdr.status) {
042b9ada
EC
208 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
209 atomic_read(&dev->num_qps));
e126ba97
EC
210 return mlx5_cmd_status_to_err(&out.hdr);
211 }
212
213 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
214 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
215
5903325a 216 qp->common.res = MLX5_RES_QP;
e126ba97
EC
217 spin_lock_irq(&table->lock);
218 err = radix_tree_insert(&table->tree, qp->qpn, qp);
219 spin_unlock_irq(&table->lock);
220 if (err) {
1a91de28 221 mlx5_core_warn(dev, "err %d\n", err);
e126ba97
EC
222 goto err_cmd;
223 }
224
225 err = mlx5_debug_qp_add(dev, qp);
226 if (err)
227 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
228 qp->qpn);
229
230 qp->pid = current->pid;
5903325a 231 atomic_set(&qp->common.refcount, 1);
e126ba97 232 atomic_inc(&dev->num_qps);
5903325a 233 init_completion(&qp->common.free);
e126ba97
EC
234
235 return 0;
236
237err_cmd:
238 memset(&din, 0, sizeof(din));
239 memset(&dout, 0, sizeof(dout));
240 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
241 din.qpn = cpu_to_be32(qp->qpn);
242 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
243
244 return err;
245}
246EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
247
248int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
249 struct mlx5_core_qp *qp)
250{
251 struct mlx5_destroy_qp_mbox_in in;
252 struct mlx5_destroy_qp_mbox_out out;
253 struct mlx5_qp_table *table = &dev->priv.qp_table;
254 unsigned long flags;
255 int err;
256
257 mlx5_debug_qp_remove(dev, qp);
258
259 spin_lock_irqsave(&table->lock, flags);
260 radix_tree_delete(&table->tree, qp->qpn);
261 spin_unlock_irqrestore(&table->lock, flags);
262
5903325a
EC
263 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
264 wait_for_completion(&qp->common.free);
e126ba97
EC
265
266 memset(&in, 0, sizeof(in));
267 memset(&out, 0, sizeof(out));
268 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
269 in.qpn = cpu_to_be32(qp->qpn);
270 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
271 if (err)
272 return err;
273
274 if (out.hdr.status)
275 return mlx5_cmd_status_to_err(&out.hdr);
276
277 atomic_dec(&dev->num_qps);
278 return 0;
279}
280EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
281
282int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
283 enum mlx5_qp_state new_state,
284 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
285 struct mlx5_core_qp *qp)
286{
287 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
288 [MLX5_QP_STATE_RST] = {
289 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
290 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
291 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
292 },
293 [MLX5_QP_STATE_INIT] = {
294 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
295 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
296 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
297 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
298 },
299 [MLX5_QP_STATE_RTR] = {
300 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
301 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
302 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
303 },
304 [MLX5_QP_STATE_RTS] = {
305 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
306 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
307 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
e126ba97
EC
308 },
309 [MLX5_QP_STATE_SQD] = {
310 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
311 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
e126ba97
EC
312 },
313 [MLX5_QP_STATE_SQER] = {
314 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
315 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
316 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
317 },
318 [MLX5_QP_STATE_ERR] = {
319 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
320 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
321 }
322 };
323
324 struct mlx5_modify_qp_mbox_out out;
325 int err = 0;
326 u16 op;
327
328 if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
329 !optab[cur_state][new_state])
330 return -EINVAL;
331
332 memset(&out, 0, sizeof(out));
333 op = optab[cur_state][new_state];
334 in->hdr.opcode = cpu_to_be16(op);
335 in->qpn = cpu_to_be32(qp->qpn);
336 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
337 if (err)
338 return err;
339
340 return mlx5_cmd_status_to_err(&out.hdr);
341}
342EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
343
344void mlx5_init_qp_table(struct mlx5_core_dev *dev)
345{
346 struct mlx5_qp_table *table = &dev->priv.qp_table;
347
348 spin_lock_init(&table->lock);
349 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
350 mlx5_qp_debugfs_init(dev);
351}
352
353void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
354{
355 mlx5_qp_debugfs_cleanup(dev);
356}
357
358int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
359 struct mlx5_query_qp_mbox_out *out, int outlen)
360{
361 struct mlx5_query_qp_mbox_in in;
362 int err;
363
364 memset(&in, 0, sizeof(in));
365 memset(out, 0, outlen);
366 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
367 in.qpn = cpu_to_be32(qp->qpn);
368 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
369 if (err)
370 return err;
371
372 if (out->hdr.status)
373 return mlx5_cmd_status_to_err(&out->hdr);
374
375 return err;
376}
377EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
378
379int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
380{
381 struct mlx5_alloc_xrcd_mbox_in in;
382 struct mlx5_alloc_xrcd_mbox_out out;
383 int err;
384
385 memset(&in, 0, sizeof(in));
386 memset(&out, 0, sizeof(out));
387 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
388 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
389 if (err)
390 return err;
391
392 if (out.hdr.status)
393 err = mlx5_cmd_status_to_err(&out.hdr);
394 else
395 *xrcdn = be32_to_cpu(out.xrcdn);
396
397 return err;
398}
399EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
400
401int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
402{
403 struct mlx5_dealloc_xrcd_mbox_in in;
404 struct mlx5_dealloc_xrcd_mbox_out out;
405 int err;
406
407 memset(&in, 0, sizeof(in));
408 memset(&out, 0, sizeof(out));
409 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
410 in.xrcdn = cpu_to_be32(xrcdn);
411 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
412 if (err)
413 return err;
414
415 if (out.hdr.status)
416 err = mlx5_cmd_status_to_err(&out.hdr);
417
418 return err;
419}
420EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
e420f0c0
HE
421
422#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
423int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
424 u8 flags, int error)
425{
426 struct mlx5_page_fault_resume_mbox_in in;
427 struct mlx5_page_fault_resume_mbox_out out;
428 int err;
429
430 memset(&in, 0, sizeof(in));
431 memset(&out, 0, sizeof(out));
432 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
433 in.hdr.opmod = 0;
434 flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
435 MLX5_PAGE_FAULT_RESUME_WRITE |
436 MLX5_PAGE_FAULT_RESUME_RDMA);
437 flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
438 in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
439 (flags << MLX5_QPN_BITS));
440 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
441 if (err)
442 return err;
443
444 if (out.hdr.status)
445 err = mlx5_cmd_status_to_err(&out.hdr);
446
447 return err;
448}
449EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
450#endif