]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eq.c
net/mlx5: Fix a race when moving command interface to events mode
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eq.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/interrupt.h>
0f597ed4 34#include <linux/notifier.h>
e126ba97
EC
35#include <linux/module.h>
36#include <linux/mlx5/driver.h>
bf3e4d38 37#include <linux/mlx5/vport.h>
7701707c 38#include <linux/mlx5/eq.h>
e126ba97 39#include <linux/mlx5/cmd.h>
1ef903bf
DJ
40#ifdef CONFIG_RFS_ACCEL
41#include <linux/cpu_rmap.h>
42#endif
e126ba97 43#include "mlx5_core.h"
f2f3df55 44#include "lib/eq.h"
e29341fb 45#include "fpga/core.h"
073bb189 46#include "eswitch.h"
6dbc80ca 47#include "lib/clock.h"
c71ad41c 48#include "diag/fw_tracer.h"
e126ba97
EC
49
50enum {
e126ba97
EC
51 MLX5_EQE_OWNER_INIT_VAL = 0x1,
52};
53
54enum {
55 MLX5_EQ_STATE_ARMED = 0x9,
56 MLX5_EQ_STATE_FIRED = 0xa,
57 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
58};
59
e126ba97
EC
60enum {
61 MLX5_EQ_DOORBEL_OFFSET = 0x40,
62};
63
081cc2d7
YA
64/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
65 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
66 * used to set the EQ size, budget must be smaller than the EQ size.
67 */
68enum {
69 MLX5_EQ_POLLING_BUDGET = 128,
70};
71
72static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
73
f2f3df55 74struct mlx5_eq_table {
16d76083 75 struct list_head comp_eqs_list;
ca390799
YA
76 struct mlx5_eq_async pages_eq;
77 struct mlx5_eq_async cmd_eq;
78 struct mlx5_eq_async async_eq;
0f597ed4
SM
79
80 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
16d76083 81
2742bc90
SM
82 /* Since CQ DB is stored in async_eq */
83 struct mlx5_nb cq_err_nb;
84
16d76083 85 struct mutex lock; /* sync async eqs creations */
561aa15a
YA
86 int num_comp_eqs;
87 struct mlx5_irq_table *irq_table;
f2f3df55
SM
88};
89
e126ba97
EC
90#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
91 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
92 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
93 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
95 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
98 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
101 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
102
e126ba97
EC
103static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
104{
73b626c1
SM
105 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
106 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
e126ba97 107
73b626c1
SM
108 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
109 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
c4f287c4 110 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
e126ba97
EC
111}
112
3ac7afdb
SM
113/* caller must eventually call mlx5_cq_put on the returned cq */
114static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
115{
116 struct mlx5_cq_table *table = &eq->cq_table;
117 struct mlx5_core_cq *cq = NULL;
118
1fbf1252 119 rcu_read_lock();
3ac7afdb
SM
120 cq = radix_tree_lookup(&table->tree, cqn);
121 if (likely(cq))
122 mlx5_cq_hold(cq);
1fbf1252 123 rcu_read_unlock();
3ac7afdb
SM
124
125 return cq;
126}
127
ca390799
YA
128static int mlx5_eq_comp_int(struct notifier_block *nb,
129 __always_unused unsigned long action,
130 __always_unused void *data)
3ac7afdb 131{
ca390799
YA
132 struct mlx5_eq_comp *eq_comp =
133 container_of(nb, struct mlx5_eq_comp, irq_nb);
134 struct mlx5_eq *eq = &eq_comp->core;
16d76083 135 struct mlx5_eqe *eqe;
081cc2d7 136 int num_eqes = 0;
16d76083 137 u32 cqn = -1;
3ac7afdb 138
081cc2d7
YA
139 eqe = next_eqe_sw(eq);
140 if (!eqe)
141 goto out;
142
143 do {
16d76083 144 struct mlx5_core_cq *cq;
081cc2d7 145
16d76083
SM
146 /* Make sure we read EQ entry contents after we've
147 * checked the ownership bit.
148 */
149 dma_rmb();
150 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
151 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
152
153 cq = mlx5_eq_cq_get(eq, cqn);
154 if (likely(cq)) {
155 ++cq->arm_sn;
4e0e2ea1 156 cq->comp(cq, eqe);
16d76083
SM
157 mlx5_cq_put(cq);
158 } else {
7396ae3d
PP
159 dev_dbg_ratelimited(eq->dev->device,
160 "Completion event for bogus CQ 0x%x\n", cqn);
16d76083
SM
161 }
162
163 ++eq->cons_index;
16d76083 164
081cc2d7 165 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
3ac7afdb 166
081cc2d7 167out:
16d76083 168 eq_update_ci(eq, 1);
3ac7afdb 169
16d76083
SM
170 if (cqn != -1)
171 tasklet_schedule(&eq_comp->tasklet_ctx.task);
172
ca390799 173 return 0;
3ac7afdb
SM
174}
175
16d76083
SM
176/* Some architectures don't latch interrupts when they are disabled, so using
177 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
178 * avoid losing them. It is not recommended to use it, unless this is the last
179 * resort.
180 */
181u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
182{
183 u32 count_eqe;
184
185 disable_irq(eq->core.irqn);
186 count_eqe = eq->core.cons_index;
ca390799 187 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
16d76083
SM
188 count_eqe = eq->core.cons_index - count_eqe;
189 enable_irq(eq->core.irqn);
190
191 return count_eqe;
192}
193
ca390799
YA
194static int mlx5_eq_async_int(struct notifier_block *nb,
195 unsigned long action, void *data)
d9aaed83 196{
ca390799
YA
197 struct mlx5_eq_async *eq_async =
198 container_of(nb, struct mlx5_eq_async, irq_nb);
199 struct mlx5_eq *eq = &eq_async->core;
0f597ed4
SM
200 struct mlx5_eq_table *eqt;
201 struct mlx5_core_dev *dev;
e126ba97 202 struct mlx5_eqe *eqe;
081cc2d7 203 int num_eqes = 0;
e126ba97 204
0f597ed4
SM
205 dev = eq->dev;
206 eqt = dev->priv.eq_table;
207
081cc2d7
YA
208 eqe = next_eqe_sw(eq);
209 if (!eqe)
210 goto out;
211
212 do {
e126ba97
EC
213 /*
214 * Make sure we read EQ entry contents after we've
215 * checked the ownership bit.
216 */
12b3375f 217 dma_rmb();
e126ba97 218
0470e5e3 219 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
0f597ed4
SM
220 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
221
e126ba97 222 ++eq->cons_index;
e126ba97 223
081cc2d7 224 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
e126ba97 225
081cc2d7 226out:
e126ba97
EC
227 eq_update_ci(eq, 1);
228
ca390799 229 return 0;
e126ba97
EC
230}
231
232static void init_eq_buf(struct mlx5_eq *eq)
233{
234 struct mlx5_eqe *eqe;
235 int i;
236
237 for (i = 0; i < eq->nent; i++) {
238 eqe = get_eqe(eq, i);
239 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
240 }
241}
242
f2f3df55 243static int
24163189 244create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
7701707c 245 struct mlx5_eq_param *param)
e126ba97 246{
02d92f79 247 struct mlx5_cq_table *cq_table = &eq->cq_table;
73b626c1 248 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
db058a18 249 struct mlx5_priv *priv = &dev->priv;
81bfa206 250 u8 vecidx = param->irq_index;
73b626c1
SM
251 __be64 *pas;
252 void *eqc;
e126ba97 253 int inlen;
73b626c1
SM
254 u32 *in;
255 int err;
b9a7ba55 256 int i;
e126ba97 257
02d92f79
SM
258 /* Init CQ table */
259 memset(cq_table, 0, sizeof(*cq_table));
260 spin_lock_init(&cq_table->lock);
261 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
262
7701707c 263 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
a31208b1 264 eq->cons_index = 0;
64ffaa21 265 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
e126ba97
EC
266 if (err)
267 return err;
268
269 init_eq_buf(eq);
270
73b626c1
SM
271 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
272 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
273
1b9a07ee 274 in = kvzalloc(inlen, GFP_KERNEL);
e126ba97
EC
275 if (!in) {
276 err = -ENOMEM;
277 goto err_buf;
278 }
e126ba97 279
73b626c1
SM
280 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
281 mlx5_fill_page_array(&eq->buf, pas);
e126ba97 282
73b626c1 283 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
b9a7ba55 284 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
c191f934
YH
285 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
286
b9a7ba55
YH
287 for (i = 0; i < 4; i++)
288 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
289 param->mask[i]);
e126ba97 290
73b626c1
SM
291 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
292 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
01187175 293 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
73b626c1
SM
294 MLX5_SET(eqc, eqc, intr, vecidx);
295 MLX5_SET(eqc, eqc, log_page_size,
296 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
e126ba97 297
73b626c1 298 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
73b626c1 299 if (err)
e126ba97 300 goto err_in;
e126ba97 301
7701707c 302 eq->vecidx = vecidx;
73b626c1 303 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
78249c42 304 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
a158906d 305 eq->dev = dev;
01187175 306 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
e126ba97 307
ca390799
YA
308 err = mlx5_debug_eq_add(dev, eq);
309 if (err)
1f8a7bee 310 goto err_eq;
e126ba97 311
479163f4 312 kvfree(in);
e126ba97
EC
313 return 0;
314
e126ba97
EC
315err_eq:
316 mlx5_cmd_destroy_eq(dev, eq->eqn);
317
318err_in:
479163f4 319 kvfree(in);
e126ba97
EC
320
321err_buf:
322 mlx5_buf_free(dev, &eq->buf);
323 return err;
324}
e126ba97 325
1f8a7bee
YA
326/**
327 * mlx5_eq_enable - Enable EQ for receiving EQEs
866ff8f2
SM
328 * @dev : Device which owns the eq
329 * @eq : EQ to enable
330 * @nb : Notifier call block
331 *
332 * Must be called after EQ is created in device.
333 *
334 * @return: 0 if no error
1f8a7bee
YA
335 */
336int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
337 struct notifier_block *nb)
338{
339 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
340 int err;
341
342 err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
343 if (!err)
344 eq_update_ci(eq, 1);
345
346 return err;
347}
348EXPORT_SYMBOL(mlx5_eq_enable);
349
350/**
866ff8f2
SM
351 * mlx5_eq_disable - Disable EQ for receiving EQEs
352 * @dev : Device which owns the eq
353 * @eq : EQ to disable
354 * @nb : Notifier call block
355 *
356 * Must be called before EQ is destroyed.
1f8a7bee
YA
357 */
358void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
359 struct notifier_block *nb)
360{
361 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
362
363 mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
364}
365EXPORT_SYMBOL(mlx5_eq_disable);
366
7701707c 367static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
e126ba97 368{
e126ba97
EC
369 int err;
370
371 mlx5_debug_eq_remove(dev, eq);
7701707c 372
e126ba97
EC
373 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
374 if (err)
375 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
376 eq->eqn);
61d0e73e 377 synchronize_irq(eq->irqn);
d9aaed83 378
e126ba97
EC
379 mlx5_buf_free(dev, &eq->buf);
380
381 return err;
382}
e126ba97 383
d5c07157
SM
384int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
385{
386 struct mlx5_cq_table *table = &eq->cq_table;
387 int err;
388
1fbf1252 389 spin_lock(&table->lock);
d5c07157 390 err = radix_tree_insert(&table->tree, cq->cqn, cq);
1fbf1252 391 spin_unlock(&table->lock);
d5c07157
SM
392
393 return err;
394}
395
1d49ce1e 396void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
d5c07157
SM
397{
398 struct mlx5_cq_table *table = &eq->cq_table;
399 struct mlx5_core_cq *tmp;
400
1fbf1252 401 spin_lock(&table->lock);
d5c07157 402 tmp = radix_tree_delete(&table->tree, cq->cqn);
1fbf1252 403 spin_unlock(&table->lock);
d5c07157
SM
404
405 if (!tmp) {
1d49ce1e
YH
406 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
407 eq->eqn, cq->cqn);
408 return;
d5c07157
SM
409 }
410
1d49ce1e
YH
411 if (tmp != cq)
412 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
413 eq->eqn, cq->cqn);
d5c07157
SM
414}
415
f2f3df55 416int mlx5_eq_table_init(struct mlx5_core_dev *dev)
e126ba97 417{
f2f3df55 418 struct mlx5_eq_table *eq_table;
9f818c8a 419 int i;
e126ba97 420
f2f3df55
SM
421 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
422 if (!eq_table)
423 return -ENOMEM;
424
425 dev->priv.eq_table = eq_table;
426
9f818c8a 427 mlx5_eq_debugfs_init(dev);
e126ba97 428
16d76083 429 mutex_init(&eq_table->lock);
0f597ed4
SM
430 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
431 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
16d76083 432
561aa15a 433 eq_table->irq_table = dev->priv.irq_table;
16d76083 434 return 0;
e126ba97
EC
435}
436
f2f3df55 437void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
e126ba97
EC
438{
439 mlx5_eq_debugfs_cleanup(dev);
f2f3df55 440 kvfree(dev->priv.eq_table);
e126ba97
EC
441}
442
ca828cb4
SM
443/* Async EQs */
444
24163189 445static int create_async_eq(struct mlx5_core_dev *dev,
7701707c 446 struct mlx5_eq *eq, struct mlx5_eq_param *param)
16d76083
SM
447{
448 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
16d76083
SM
449 int err;
450
451 mutex_lock(&eq_table->lock);
81bfa206
AL
452 /* Async EQs must share irq index 0 */
453 if (param->irq_index != 0) {
454 err = -EINVAL;
16d76083
SM
455 goto unlock;
456 }
457
24163189 458 err = create_map_eq(dev, eq, param);
16d76083
SM
459unlock:
460 mutex_unlock(&eq_table->lock);
461 return err;
462}
463
7701707c 464static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
16d76083
SM
465{
466 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
467 int err;
468
469 mutex_lock(&eq_table->lock);
7701707c 470 err = destroy_unmap_eq(dev, eq);
16d76083
SM
471 mutex_unlock(&eq_table->lock);
472 return err;
473}
474
2742bc90
SM
475static int cq_err_event_notifier(struct notifier_block *nb,
476 unsigned long type, void *data)
477{
478 struct mlx5_eq_table *eqt;
479 struct mlx5_core_cq *cq;
480 struct mlx5_eqe *eqe;
481 struct mlx5_eq *eq;
482 u32 cqn;
483
484 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
485
486 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
ca390799 487 eq = &eqt->async_eq.core;
2742bc90
SM
488 eqe = data;
489
490 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
491 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
492 cqn, eqe->data.cq_err.syndrome);
493
494 cq = mlx5_eq_cq_get(eq, cqn);
495 if (unlikely(!cq)) {
496 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
497 return NOTIFY_OK;
498 }
499
70a43d3f
YH
500 if (cq->event)
501 cq->event(cq, type);
2742bc90
SM
502
503 mlx5_cq_put(cq);
504
505 return NOTIFY_OK;
506}
507
b9a7ba55
YH
508static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
509{
510 __be64 *user_unaffiliated_events;
511 __be64 *user_affiliated_events;
512 int i;
513
514 user_affiliated_events =
515 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
516 user_unaffiliated_events =
517 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
518
519 for (i = 0; i < 4; i++)
520 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
521 user_unaffiliated_events[i]);
522}
523
524static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
e126ba97 525{
6887a825 526 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
e126ba97 527
a9f7705f 528 if (MLX5_VPORT_MANAGER(dev))
073bb189
SM
529 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
530
72c6f524 531 if (MLX5_CAP_GEN(dev, general_notification_event))
246ac981
MG
532 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
533
d4eb4cd7
HN
534 if (MLX5_CAP_GEN(dev, port_module_event))
535 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
536 else
537 mlx5_core_dbg(dev, "port_module_event is not set\n");
538
fa367688 539 if (MLX5_PPS_CAP(dev))
f9a1ef72
EE
540 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
541
e29341fb 542 if (MLX5_CAP_GEN(dev, fpga))
1f0cf89b
IT
543 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
544 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
57cda166
MS
545 if (MLX5_CAP_GEN_MAX(dev, dct))
546 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
547
1865ea9a
IT
548 if (MLX5_CAP_GEN(dev, temp_warn_event))
549 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
550
c71ad41c
FD
551 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
552 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
553
fd4572b3
ED
554 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
555 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
556
6706a3b9 557 if (mlx5_eswitch_is_funcs_handler(dev))
cd56f929
VP
558 async_event_mask |=
559 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
7f0d11c7 560
b9a7ba55
YH
561 mask[0] = async_event_mask;
562
563 if (MLX5_CAP_GEN(dev, event_cap))
564 gather_user_async_events(dev, mask);
7701707c
SM
565}
566
3ed87996
PP
567static int
568setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
569 struct mlx5_eq_param *param, const char *name)
570{
571 int err;
572
573 eq->irq_nb.notifier_call = mlx5_eq_async_int;
574
575 err = create_async_eq(dev, &eq->core, param);
576 if (err) {
577 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
578 return err;
579 }
580 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
581 if (err) {
582 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
583 destroy_async_eq(dev, &eq->core);
584 }
585 return err;
586}
587
588static void cleanup_async_eq(struct mlx5_core_dev *dev,
589 struct mlx5_eq_async *eq, const char *name)
590{
591 int err;
592
593 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
594 err = destroy_async_eq(dev, &eq->core);
595 if (err)
596 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
597 name, err);
598}
599
7701707c
SM
600static int create_async_eqs(struct mlx5_core_dev *dev)
601{
602 struct mlx5_eq_table *table = dev->priv.eq_table;
603 struct mlx5_eq_param param = {};
604 int err;
605
2742bc90
SM
606 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
607 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
608
7701707c 609 param = (struct mlx5_eq_param) {
81bfa206 610 .irq_index = 0,
7701707c 611 .nent = MLX5_NUM_CMD_EQE,
3ed87996 612 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
7701707c 613 };
d43b7007 614 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
3ed87996
PP
615 err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
616 if (err)
1f8a7bee 617 goto err1;
3ed87996 618
e126ba97 619 mlx5_cmd_use_events(dev);
d43b7007 620 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
e126ba97 621
7701707c 622 param = (struct mlx5_eq_param) {
81bfa206 623 .irq_index = 0,
7701707c 624 .nent = MLX5_NUM_ASYNC_EQE,
7701707c 625 };
b9a7ba55
YH
626
627 gather_async_events_mask(dev, param.mask);
3ed87996
PP
628 err = setup_async_eq(dev, &table->async_eq, &param, "async");
629 if (err)
1f8a7bee 630 goto err2;
e126ba97 631
7701707c 632 param = (struct mlx5_eq_param) {
81bfa206 633 .irq_index = 0,
7701707c 634 .nent = /* TODO: sriov max_vf + */ 1,
3ed87996 635 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
7701707c 636 };
b9a7ba55 637
3ed87996
PP
638 err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
639 if (err)
640 goto err3;
e126ba97 641
3ed87996 642 return 0;
e126ba97 643
1f8a7bee 644err3:
3ed87996 645 cleanup_async_eq(dev, &table->async_eq, "async");
1f8a7bee 646err2:
e126ba97 647 mlx5_cmd_use_polling(dev);
3ed87996 648 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
1f8a7bee 649err1:
d43b7007 650 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
2742bc90 651 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
e126ba97
EC
652 return err;
653}
654
c8e21b3b 655static void destroy_async_eqs(struct mlx5_core_dev *dev)
e126ba97 656{
f2f3df55 657 struct mlx5_eq_table *table = dev->priv.eq_table;
2742bc90 658
3ed87996
PP
659 cleanup_async_eq(dev, &table->pages_eq, "pages");
660 cleanup_async_eq(dev, &table->async_eq, "async");
e126ba97 661 mlx5_cmd_use_polling(dev);
3ed87996 662 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
2742bc90 663 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
e126ba97
EC
664}
665
f2f3df55
SM
666struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
667{
ca390799 668 return &dev->priv.eq_table->async_eq.core;
f2f3df55
SM
669}
670
16d76083
SM
671void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
672{
ca390799 673 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
16d76083
SM
674}
675
676void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
677{
ca390799 678 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
16d76083
SM
679}
680
7701707c
SM
681/* Generic EQ API for mlx5_core consumers
682 * Needed For RDMA ODP EQ for now
683 */
684struct mlx5_eq *
24163189 685mlx5_eq_create_generic(struct mlx5_core_dev *dev,
7701707c
SM
686 struct mlx5_eq_param *param)
687{
688 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
689 int err;
690
691 if (!eq)
692 return ERR_PTR(-ENOMEM);
693
24163189 694 err = create_async_eq(dev, eq, param);
7701707c
SM
695 if (err) {
696 kvfree(eq);
697 eq = ERR_PTR(err);
698 }
699
700 return eq;
701}
702EXPORT_SYMBOL(mlx5_eq_create_generic);
703
704int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
705{
706 int err;
707
708 if (IS_ERR(eq))
709 return -EINVAL;
710
711 err = destroy_async_eq(dev, eq);
712 if (err)
713 goto out;
714
715 kvfree(eq);
716out:
717 return err;
718}
719EXPORT_SYMBOL(mlx5_eq_destroy_generic);
720
721struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
722{
723 u32 ci = eq->cons_index + cc;
724 struct mlx5_eqe *eqe;
725
726 eqe = get_eqe(eq, ci & (eq->nent - 1));
727 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
728 /* Make sure we read EQ entry contents after we've
729 * checked the ownership bit.
730 */
731 if (eqe)
732 dma_rmb();
733
734 return eqe;
735}
736EXPORT_SYMBOL(mlx5_eq_get_eqe);
737
738void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
739{
740 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
741 u32 val;
742
743 eq->cons_index += cc;
744 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
745
746 __raw_writel((__force u32)cpu_to_be32(val), addr);
747 /* We still want ordering, just not swabbing, so add a barrier */
98df6d5b 748 wmb();
7701707c
SM
749}
750EXPORT_SYMBOL(mlx5_eq_update_ci);
751
c8e21b3b 752static void destroy_comp_eqs(struct mlx5_core_dev *dev)
ca828cb4 753{
f2f3df55 754 struct mlx5_eq_table *table = dev->priv.eq_table;
16d76083 755 struct mlx5_eq_comp *eq, *n;
ca828cb4 756
ca828cb4
SM
757 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
758 list_del(&eq->list);
1f8a7bee 759 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
7701707c 760 if (destroy_unmap_eq(dev, &eq->core))
16d76083
SM
761 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
762 eq->core.eqn);
763 tasklet_disable(&eq->tasklet_ctx.task);
ca828cb4
SM
764 kfree(eq);
765 }
766}
767
c8e21b3b 768static int create_comp_eqs(struct mlx5_core_dev *dev)
ca828cb4 769{
f2f3df55 770 struct mlx5_eq_table *table = dev->priv.eq_table;
16d76083 771 struct mlx5_eq_comp *eq;
561aa15a 772 int ncomp_eqs;
ca828cb4
SM
773 int nent;
774 int err;
775 int i;
776
777 INIT_LIST_HEAD(&table->comp_eqs_list);
561aa15a 778 ncomp_eqs = table->num_comp_eqs;
ca828cb4 779 nent = MLX5_COMP_EQ_SIZE;
561aa15a 780 for (i = 0; i < ncomp_eqs; i++) {
81bfa206 781 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
7701707c 782 struct mlx5_eq_param param = {};
ca828cb4
SM
783
784 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
785 if (!eq) {
786 err = -ENOMEM;
787 goto clean;
788 }
789
16d76083
SM
790 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
791 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
792 spin_lock_init(&eq->tasklet_ctx.lock);
793 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
794 (unsigned long)&eq->tasklet_ctx);
795
ca390799 796 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
7701707c 797 param = (struct mlx5_eq_param) {
81bfa206 798 .irq_index = vecidx,
7701707c 799 .nent = nent,
7701707c 800 };
24163189 801 err = create_map_eq(dev, &eq->core, &param);
ca828cb4
SM
802 if (err) {
803 kfree(eq);
804 goto clean;
805 }
1f8a7bee
YA
806 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
807 if (err) {
808 destroy_unmap_eq(dev, &eq->core);
809 kfree(eq);
810 goto clean;
811 }
812
16d76083 813 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
ca828cb4
SM
814 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
815 list_add_tail(&eq->list, &table->comp_eqs_list);
816 }
817
ca828cb4
SM
818 return 0;
819
820clean:
c8e21b3b 821 destroy_comp_eqs(dev);
ca828cb4
SM
822 return err;
823}
824
825int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
826 unsigned int *irqn)
827{
f2f3df55 828 struct mlx5_eq_table *table = dev->priv.eq_table;
16d76083 829 struct mlx5_eq_comp *eq, *n;
ca828cb4
SM
830 int err = -ENOENT;
831 int i = 0;
832
833 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
834 if (i++ == vector) {
16d76083
SM
835 *eqn = eq->core.eqn;
836 *irqn = eq->core.irqn;
ca828cb4
SM
837 err = 0;
838 break;
839 }
840 }
841
842 return err;
843}
844EXPORT_SYMBOL(mlx5_vector2eqn);
845
f2f3df55
SM
846unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
847{
561aa15a 848 return dev->priv.eq_table->num_comp_eqs;
f2f3df55
SM
849}
850EXPORT_SYMBOL(mlx5_comp_vectors_count);
851
852struct cpumask *
853mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
854{
81bfa206 855 int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
561aa15a 856
bfb49549
YA
857 return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
858 vecidx);
f2f3df55
SM
859}
860EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
861
092ead48 862#ifdef CONFIG_RFS_ACCEL
f2f3df55
SM
863struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
864{
bfb49549 865 return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
f2f3df55 866}
092ead48 867#endif
f2f3df55 868
16d76083 869struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
ca828cb4 870{
f2f3df55 871 struct mlx5_eq_table *table = dev->priv.eq_table;
16d76083 872 struct mlx5_eq_comp *eq;
ca828cb4
SM
873
874 list_for_each_entry(eq, &table->comp_eqs_list, list) {
16d76083 875 if (eq->core.eqn == eqn)
ca828cb4
SM
876 return eq;
877 }
878
879 return ERR_PTR(-ENOENT);
880}
881
1ef903bf
DJ
882/* This function should only be called after mlx5_cmd_force_teardown_hca */
883void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
884{
f2f3df55 885 struct mlx5_eq_table *table = dev->priv.eq_table;
1ef903bf 886
7701707c 887 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
e1706e62 888 mlx5_irq_table_destroy(dev);
7701707c 889 mutex_unlock(&table->lock);
1ef903bf 890}
c8e21b3b 891
c8e21b3b
SM
892int mlx5_eq_table_create(struct mlx5_core_dev *dev)
893{
561aa15a 894 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
c8e21b3b
SM
895 int err;
896
561aa15a
YA
897 eq_table->num_comp_eqs =
898 mlx5_irq_get_num_comp(eq_table->irq_table);
899
c8e21b3b
SM
900 err = create_async_eqs(dev);
901 if (err) {
902 mlx5_core_err(dev, "Failed to create async EQs\n");
903 goto err_async_eqs;
904 }
905
906 err = create_comp_eqs(dev);
907 if (err) {
908 mlx5_core_err(dev, "Failed to create completion EQs\n");
909 goto err_comp_eqs;
910 }
911
912 return 0;
913err_comp_eqs:
914 destroy_async_eqs(dev);
915err_async_eqs:
c8e21b3b
SM
916 return err;
917}
918
919void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
920{
921 destroy_comp_eqs(dev);
922 destroy_async_eqs(dev);
c8e21b3b 923}
0f597ed4
SM
924
925int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
926{
927 struct mlx5_eq_table *eqt = dev->priv.eq_table;
928
0f597ed4
SM
929 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
930}
c0670781 931EXPORT_SYMBOL(mlx5_eq_notifier_register);
0f597ed4
SM
932
933int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
934{
935 struct mlx5_eq_table *eqt = dev->priv.eq_table;
936
0f597ed4
SM
937 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
938}
c0670781 939EXPORT_SYMBOL(mlx5_eq_notifier_unregister);