]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/qp.c
36d240c9d15f74189b8dc59248904ccbb48c659a
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/transobj.h>
40
41 #include "mlx5_core.h"
42
43 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
44 u32 rsn)
45 {
46 struct mlx5_qp_table *table = &dev->priv.qp_table;
47 struct mlx5_core_rsc_common *common;
48
49 spin_lock(&table->lock);
50
51 common = radix_tree_lookup(&table->tree, rsn);
52 if (common)
53 atomic_inc(&common->refcount);
54
55 spin_unlock(&table->lock);
56
57 if (!common) {
58 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59 rsn);
60 return NULL;
61 }
62 return common;
63 }
64
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66 {
67 if (atomic_dec_and_test(&common->refcount))
68 complete(&common->free);
69 }
70
71 static u64 qp_allowed_event_types(void)
72 {
73 u64 mask;
74
75 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
76 BIT(MLX5_EVENT_TYPE_COMM_EST) |
77 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
78 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
79 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
80 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
81 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
82 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
83
84 return mask;
85 }
86
87 static u64 rq_allowed_event_types(void)
88 {
89 u64 mask;
90
91 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
92 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
93
94 return mask;
95 }
96
97 static u64 sq_allowed_event_types(void)
98 {
99 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
100 }
101
102 static bool is_event_type_allowed(int rsc_type, int event_type)
103 {
104 switch (rsc_type) {
105 case MLX5_EVENT_QUEUE_TYPE_QP:
106 return BIT(event_type) & qp_allowed_event_types();
107 case MLX5_EVENT_QUEUE_TYPE_RQ:
108 return BIT(event_type) & rq_allowed_event_types();
109 case MLX5_EVENT_QUEUE_TYPE_SQ:
110 return BIT(event_type) & sq_allowed_event_types();
111 default:
112 WARN(1, "Event arrived for unknown resource type");
113 return false;
114 }
115 }
116
117 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
118 {
119 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
120 struct mlx5_core_qp *qp;
121
122 if (!common)
123 return;
124
125 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
126 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
127 event_type, rsn);
128 return;
129 }
130
131 switch (common->res) {
132 case MLX5_RES_QP:
133 case MLX5_RES_RQ:
134 case MLX5_RES_SQ:
135 qp = (struct mlx5_core_qp *)common;
136 qp->event(qp, event_type);
137 break;
138
139 default:
140 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
141 }
142
143 mlx5_core_put_rsc(common);
144 }
145
146 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
147 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
148 {
149 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
150 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
151 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
152 struct mlx5_core_qp *qp =
153 container_of(common, struct mlx5_core_qp, common);
154 struct mlx5_pagefault pfault;
155
156 if (!qp) {
157 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
158 qpn);
159 return;
160 }
161
162 pfault.event_subtype = eqe->sub_type;
163 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
164 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
165 pfault.bytes_committed = be32_to_cpu(
166 pf_eqe->bytes_committed);
167
168 mlx5_core_dbg(dev,
169 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
170 eqe->sub_type, pfault.flags);
171
172 switch (eqe->sub_type) {
173 case MLX5_PFAULT_SUBTYPE_RDMA:
174 /* RDMA based event */
175 pfault.rdma.r_key =
176 be32_to_cpu(pf_eqe->rdma.r_key);
177 pfault.rdma.packet_size =
178 be16_to_cpu(pf_eqe->rdma.packet_length);
179 pfault.rdma.rdma_op_len =
180 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
181 pfault.rdma.rdma_va =
182 be64_to_cpu(pf_eqe->rdma.rdma_va);
183 mlx5_core_dbg(dev,
184 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
185 qpn, pfault.rdma.r_key);
186 mlx5_core_dbg(dev,
187 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
188 pfault.rdma.rdma_op_len);
189 mlx5_core_dbg(dev,
190 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
191 pfault.rdma.rdma_va);
192 mlx5_core_dbg(dev,
193 "PAGE_FAULT: bytes_committed: 0x%06x\n",
194 pfault.bytes_committed);
195 break;
196
197 case MLX5_PFAULT_SUBTYPE_WQE:
198 /* WQE based event */
199 pfault.wqe.wqe_index =
200 be16_to_cpu(pf_eqe->wqe.wqe_index);
201 pfault.wqe.packet_size =
202 be16_to_cpu(pf_eqe->wqe.packet_length);
203 mlx5_core_dbg(dev,
204 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
205 qpn, pfault.wqe.wqe_index);
206 mlx5_core_dbg(dev,
207 "PAGE_FAULT: bytes_committed: 0x%06x\n",
208 pfault.bytes_committed);
209 break;
210
211 default:
212 mlx5_core_warn(dev,
213 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
214 eqe->sub_type, qpn);
215 /* Unsupported page faults should still be resolved by the
216 * page fault handler
217 */
218 }
219
220 if (qp->pfault_handler) {
221 qp->pfault_handler(qp, &pfault);
222 } else {
223 mlx5_core_err(dev,
224 "ODP event for QP %08x, without a fault handler in QP\n",
225 qpn);
226 /* Page fault will remain unresolved. QP will hang until it is
227 * destroyed
228 */
229 }
230
231 mlx5_core_put_rsc(common);
232 }
233 #endif
234
235 static int create_qprqsq_common(struct mlx5_core_dev *dev,
236 struct mlx5_core_qp *qp,
237 int rsc_type)
238 {
239 struct mlx5_qp_table *table = &dev->priv.qp_table;
240 int err;
241
242 qp->common.res = rsc_type;
243 spin_lock_irq(&table->lock);
244 err = radix_tree_insert(&table->tree,
245 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
246 qp);
247 spin_unlock_irq(&table->lock);
248 if (err)
249 return err;
250
251 atomic_set(&qp->common.refcount, 1);
252 init_completion(&qp->common.free);
253 qp->pid = current->pid;
254
255 return 0;
256 }
257
258 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
259 struct mlx5_core_qp *qp)
260 {
261 struct mlx5_qp_table *table = &dev->priv.qp_table;
262 unsigned long flags;
263
264 spin_lock_irqsave(&table->lock, flags);
265 radix_tree_delete(&table->tree,
266 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
267 spin_unlock_irqrestore(&table->lock, flags);
268 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
269 wait_for_completion(&qp->common.free);
270 }
271
272 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
273 struct mlx5_core_qp *qp,
274 u32 *in, int inlen)
275 {
276 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
277 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
278 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
279 int err;
280
281 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
282
283 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
284 err = err ? : mlx5_cmd_status_to_err_v2(out);
285 if (err)
286 return err;
287
288 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
289 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
290
291 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
292 if (err)
293 goto err_cmd;
294
295 err = mlx5_debug_qp_add(dev, qp);
296 if (err)
297 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
298 qp->qpn);
299
300 atomic_inc(&dev->num_qps);
301
302 return 0;
303
304 err_cmd:
305 memset(din, 0, sizeof(din));
306 memset(dout, 0, sizeof(dout));
307 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
308 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
309 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
310 mlx5_cmd_status_to_err_v2(dout);
311 return err;
312 }
313 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
314
315 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
316 struct mlx5_core_qp *qp)
317 {
318 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
319 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
320 int err;
321
322 mlx5_debug_qp_remove(dev, qp);
323
324 destroy_qprqsq_common(dev, qp);
325
326 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
327 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
328 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
329 err = err ? : mlx5_cmd_status_to_err_v2(out);
330 if (err)
331 return err;
332
333 atomic_dec(&dev->num_qps);
334 return 0;
335 }
336 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
337
338 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
339 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
340 struct mlx5_core_qp *qp)
341 {
342 struct mlx5_modify_qp_mbox_out out;
343 int err = 0;
344
345 memset(&out, 0, sizeof(out));
346 in->hdr.opcode = cpu_to_be16(operation);
347 in->qpn = cpu_to_be32(qp->qpn);
348 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
349 if (err)
350 return err;
351
352 return mlx5_cmd_status_to_err(&out.hdr);
353 }
354 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
355
356 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
357 {
358 struct mlx5_qp_table *table = &dev->priv.qp_table;
359
360 memset(table, 0, sizeof(*table));
361 spin_lock_init(&table->lock);
362 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
363 mlx5_qp_debugfs_init(dev);
364 }
365
366 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
367 {
368 mlx5_qp_debugfs_cleanup(dev);
369 }
370
371 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
372 u32 *out, int outlen)
373 {
374 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
375 int err;
376
377 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
378 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
379
380 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
381 return err ? : mlx5_cmd_status_to_err_v2(out);
382 }
383 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
384
385 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
386 {
387 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
388 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
389 int err;
390
391 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
392 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
393 err = err ? : mlx5_cmd_status_to_err_v2(out);
394 if (!err)
395 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
396 return err;
397 }
398 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
399
400 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
401 {
402 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
403 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
404 int err;
405
406 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
407 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
408 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
409 return err ? : mlx5_cmd_status_to_err_v2(out);
410 }
411 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
412
413 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
414 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
415 u8 flags, int error)
416 {
417 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
418 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
419 int err;
420
421 MLX5_SET(page_fault_resume_in, in, opcode,
422 MLX5_CMD_OP_PAGE_FAULT_RESUME);
423
424 MLX5_SET(page_fault_resume_in, in, qpn, qpn);
425
426 if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
427 MLX5_SET(page_fault_resume_in, in, req_res, 1);
428 if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
429 MLX5_SET(page_fault_resume_in, in, read_write, 1);
430 if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
431 MLX5_SET(page_fault_resume_in, in, rdma, 1);
432 if (error)
433 MLX5_SET(page_fault_resume_in, in, error, 1);
434
435 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
436 return err ? : mlx5_cmd_status_to_err_v2(out);
437 }
438 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
439 #endif
440
441 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
442 struct mlx5_core_qp *rq)
443 {
444 int err;
445 u32 rqn;
446
447 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
448 if (err)
449 return err;
450
451 rq->qpn = rqn;
452 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
453 if (err)
454 goto err_destroy_rq;
455
456 return 0;
457
458 err_destroy_rq:
459 mlx5_core_destroy_rq(dev, rq->qpn);
460
461 return err;
462 }
463 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
464
465 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
466 struct mlx5_core_qp *rq)
467 {
468 destroy_qprqsq_common(dev, rq);
469 mlx5_core_destroy_rq(dev, rq->qpn);
470 }
471 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
472
473 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
474 struct mlx5_core_qp *sq)
475 {
476 int err;
477 u32 sqn;
478
479 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
480 if (err)
481 return err;
482
483 sq->qpn = sqn;
484 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
485 if (err)
486 goto err_destroy_sq;
487
488 return 0;
489
490 err_destroy_sq:
491 mlx5_core_destroy_sq(dev, sq->qpn);
492
493 return err;
494 }
495 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
496
497 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
498 struct mlx5_core_qp *sq)
499 {
500 destroy_qprqsq_common(dev, sq);
501 mlx5_core_destroy_sq(dev, sq->qpn);
502 }
503 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
504
505 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
506 {
507 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
508 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
509 int err;
510
511 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
512 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
513 if (!err)
514 *counter_id = MLX5_GET(alloc_q_counter_out, out,
515 counter_set_id);
516 return err;
517 }
518 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
519
520 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
521 {
522 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
523 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
524
525 MLX5_SET(dealloc_q_counter_in, in, opcode,
526 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
527 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
528 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
529 sizeof(out));
530 }
531 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
532
533 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
534 int reset, void *out, int out_size)
535 {
536 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
537
538 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
539 MLX5_SET(query_q_counter_in, in, clear, reset);
540 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
541 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
542 }
543 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
544
545 int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
546 u32 *out_of_buffer)
547 {
548 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
549 void *out;
550 int err;
551
552 out = mlx5_vzalloc(outlen);
553 if (!out)
554 return -ENOMEM;
555
556 err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
557 if (!err)
558 *out_of_buffer = MLX5_GET(query_q_counter_out, out,
559 out_of_buffer);
560
561 kfree(out);
562 return err;
563 }