2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include "mlx5_core.h"
38 #include "fpga/core.h"
39 #ifdef CONFIG_MLX5_CORE_EN
44 MLX5_EQE_SIZE
= sizeof(struct mlx5_eqe
),
45 MLX5_EQE_OWNER_INIT_VAL
= 0x1,
49 MLX5_EQ_STATE_ARMED
= 0x9,
50 MLX5_EQ_STATE_FIRED
= 0xa,
51 MLX5_EQ_STATE_ALWAYS_ARMED
= 0xb,
55 MLX5_NUM_SPARE_EQE
= 0x80,
56 MLX5_NUM_ASYNC_EQE
= 0x100,
57 MLX5_NUM_CMD_EQE
= 32,
58 MLX5_NUM_PF_DRAIN
= 64,
62 MLX5_EQ_DOORBEL_OFFSET
= 0x40,
65 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
66 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
67 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
68 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
69 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
71 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
72 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
74 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
75 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
76 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
89 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev
*dev
, u8 eqn
)
91 u32 out
[MLX5_ST_SZ_DW(destroy_eq_out
)] = {0};
92 u32 in
[MLX5_ST_SZ_DW(destroy_eq_in
)] = {0};
94 MLX5_SET(destroy_eq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_EQ
);
95 MLX5_SET(destroy_eq_in
, in
, eq_number
, eqn
);
96 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
99 static struct mlx5_eqe
*get_eqe(struct mlx5_eq
*eq
, u32 entry
)
101 return mlx5_buf_offset(&eq
->buf
, entry
* MLX5_EQE_SIZE
);
104 static struct mlx5_eqe
*next_eqe_sw(struct mlx5_eq
*eq
)
106 struct mlx5_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
& (eq
->nent
- 1));
108 return ((eqe
->owner
& 1) ^ !!(eq
->cons_index
& eq
->nent
)) ? NULL
: eqe
;
111 static const char *eqe_type_str(u8 type
)
114 case MLX5_EVENT_TYPE_COMP
:
115 return "MLX5_EVENT_TYPE_COMP";
116 case MLX5_EVENT_TYPE_PATH_MIG
:
117 return "MLX5_EVENT_TYPE_PATH_MIG";
118 case MLX5_EVENT_TYPE_COMM_EST
:
119 return "MLX5_EVENT_TYPE_COMM_EST";
120 case MLX5_EVENT_TYPE_SQ_DRAINED
:
121 return "MLX5_EVENT_TYPE_SQ_DRAINED";
122 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
123 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
124 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
125 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
126 case MLX5_EVENT_TYPE_CQ_ERROR
:
127 return "MLX5_EVENT_TYPE_CQ_ERROR";
128 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
129 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
130 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
131 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
132 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
133 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
134 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
135 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
136 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
137 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
138 case MLX5_EVENT_TYPE_INTERNAL_ERROR
:
139 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
140 case MLX5_EVENT_TYPE_PORT_CHANGE
:
141 return "MLX5_EVENT_TYPE_PORT_CHANGE";
142 case MLX5_EVENT_TYPE_GPIO_EVENT
:
143 return "MLX5_EVENT_TYPE_GPIO_EVENT";
144 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT
:
145 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
146 case MLX5_EVENT_TYPE_REMOTE_CONFIG
:
147 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
148 case MLX5_EVENT_TYPE_DB_BF_CONGESTION
:
149 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
150 case MLX5_EVENT_TYPE_STALL_EVENT
:
151 return "MLX5_EVENT_TYPE_STALL_EVENT";
152 case MLX5_EVENT_TYPE_CMD
:
153 return "MLX5_EVENT_TYPE_CMD";
154 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
155 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
156 case MLX5_EVENT_TYPE_PAGE_FAULT
:
157 return "MLX5_EVENT_TYPE_PAGE_FAULT";
158 case MLX5_EVENT_TYPE_PPS_EVENT
:
159 return "MLX5_EVENT_TYPE_PPS_EVENT";
160 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
:
161 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
162 case MLX5_EVENT_TYPE_FPGA_ERROR
:
163 return "MLX5_EVENT_TYPE_FPGA_ERROR";
165 return "Unrecognized event";
169 static enum mlx5_dev_event
port_subtype_event(u8 subtype
)
172 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
173 return MLX5_DEV_EVENT_PORT_DOWN
;
174 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
175 return MLX5_DEV_EVENT_PORT_UP
;
176 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
177 return MLX5_DEV_EVENT_PORT_INITIALIZED
;
178 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
179 return MLX5_DEV_EVENT_LID_CHANGE
;
180 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
181 return MLX5_DEV_EVENT_PKEY_CHANGE
;
182 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
183 return MLX5_DEV_EVENT_GUID_CHANGE
;
184 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
185 return MLX5_DEV_EVENT_CLIENT_REREG
;
190 static void eq_update_ci(struct mlx5_eq
*eq
, int arm
)
192 __be32 __iomem
*addr
= eq
->doorbell
+ (arm
? 0 : 2);
193 u32 val
= (eq
->cons_index
& 0xffffff) | (eq
->eqn
<< 24);
194 __raw_writel((__force u32
)cpu_to_be32(val
), addr
);
195 /* We still want ordering, just not swabbing, so add a barrier */
199 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
200 static void eqe_pf_action(struct work_struct
*work
)
202 struct mlx5_pagefault
*pfault
= container_of(work
,
203 struct mlx5_pagefault
,
205 struct mlx5_eq
*eq
= pfault
->eq
;
207 mlx5_core_page_fault(eq
->dev
, pfault
);
208 mempool_free(pfault
, eq
->pf_ctx
.pool
);
211 static void eq_pf_process(struct mlx5_eq
*eq
)
213 struct mlx5_core_dev
*dev
= eq
->dev
;
214 struct mlx5_eqe_page_fault
*pf_eqe
;
215 struct mlx5_pagefault
*pfault
;
216 struct mlx5_eqe
*eqe
;
219 while ((eqe
= next_eqe_sw(eq
))) {
220 pfault
= mempool_alloc(eq
->pf_ctx
.pool
, GFP_ATOMIC
);
222 schedule_work(&eq
->pf_ctx
.work
);
227 pf_eqe
= &eqe
->data
.page_fault
;
228 pfault
->event_subtype
= eqe
->sub_type
;
229 pfault
->bytes_committed
= be32_to_cpu(pf_eqe
->bytes_committed
);
232 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
233 eqe
->sub_type
, pfault
->bytes_committed
);
235 switch (eqe
->sub_type
) {
236 case MLX5_PFAULT_SUBTYPE_RDMA
:
237 /* RDMA based event */
239 be32_to_cpu(pf_eqe
->rdma
.pftype_token
) >> 24;
241 be32_to_cpu(pf_eqe
->rdma
.pftype_token
) &
244 be32_to_cpu(pf_eqe
->rdma
.r_key
);
245 pfault
->rdma
.packet_size
=
246 be16_to_cpu(pf_eqe
->rdma
.packet_length
);
247 pfault
->rdma
.rdma_op_len
=
248 be32_to_cpu(pf_eqe
->rdma
.rdma_op_len
);
249 pfault
->rdma
.rdma_va
=
250 be64_to_cpu(pf_eqe
->rdma
.rdma_va
);
252 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
253 pfault
->type
, pfault
->token
,
256 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
257 pfault
->rdma
.rdma_op_len
,
258 pfault
->rdma
.rdma_va
);
261 case MLX5_PFAULT_SUBTYPE_WQE
:
262 /* WQE based event */
264 be32_to_cpu(pf_eqe
->wqe
.pftype_wq
) >> 24;
266 be32_to_cpu(pf_eqe
->wqe
.token
);
268 be32_to_cpu(pf_eqe
->wqe
.pftype_wq
) &
270 pfault
->wqe
.wqe_index
=
271 be16_to_cpu(pf_eqe
->wqe
.wqe_index
);
272 pfault
->wqe
.packet_size
=
273 be16_to_cpu(pf_eqe
->wqe
.packet_length
);
275 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
276 pfault
->type
, pfault
->token
,
278 pfault
->wqe
.wqe_index
);
283 "Unsupported page fault event sub-type: 0x%02hhx\n",
285 /* Unsupported page faults should still be
286 * resolved by the page fault handler
291 INIT_WORK(&pfault
->work
, eqe_pf_action
);
292 queue_work(eq
->pf_ctx
.wq
, &pfault
->work
);
297 if (unlikely(set_ci
>= MLX5_NUM_SPARE_EQE
)) {
306 static irqreturn_t
mlx5_eq_pf_int(int irq
, void *eq_ptr
)
308 struct mlx5_eq
*eq
= eq_ptr
;
311 if (spin_trylock_irqsave(&eq
->pf_ctx
.lock
, flags
)) {
313 spin_unlock_irqrestore(&eq
->pf_ctx
.lock
, flags
);
315 schedule_work(&eq
->pf_ctx
.work
);
321 /* mempool_refill() was proposed but unfortunately wasn't accepted
322 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
325 static void mempool_refill(mempool_t
*pool
)
327 while (pool
->curr_nr
< pool
->min_nr
)
328 mempool_free(mempool_alloc(pool
, GFP_KERNEL
), pool
);
331 static void eq_pf_action(struct work_struct
*work
)
333 struct mlx5_eq
*eq
= container_of(work
, struct mlx5_eq
, pf_ctx
.work
);
335 mempool_refill(eq
->pf_ctx
.pool
);
337 spin_lock_irq(&eq
->pf_ctx
.lock
);
339 spin_unlock_irq(&eq
->pf_ctx
.lock
);
342 static int init_pf_ctx(struct mlx5_eq_pagefault
*pf_ctx
, const char *name
)
344 spin_lock_init(&pf_ctx
->lock
);
345 INIT_WORK(&pf_ctx
->work
, eq_pf_action
);
347 pf_ctx
->wq
= alloc_ordered_workqueue(name
,
352 pf_ctx
->pool
= mempool_create_kmalloc_pool
353 (MLX5_NUM_PF_DRAIN
, sizeof(struct mlx5_pagefault
));
359 destroy_workqueue(pf_ctx
->wq
);
363 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 token
,
364 u32 wq_num
, u8 type
, int error
)
366 u32 out
[MLX5_ST_SZ_DW(page_fault_resume_out
)] = {0};
367 u32 in
[MLX5_ST_SZ_DW(page_fault_resume_in
)] = {0};
369 MLX5_SET(page_fault_resume_in
, in
, opcode
,
370 MLX5_CMD_OP_PAGE_FAULT_RESUME
);
371 MLX5_SET(page_fault_resume_in
, in
, error
, !!error
);
372 MLX5_SET(page_fault_resume_in
, in
, page_fault_type
, type
);
373 MLX5_SET(page_fault_resume_in
, in
, wq_number
, wq_num
);
374 MLX5_SET(page_fault_resume_in
, in
, token
, token
);
376 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
378 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume
);
381 static irqreturn_t
mlx5_eq_int(int irq
, void *eq_ptr
)
383 struct mlx5_eq
*eq
= eq_ptr
;
384 struct mlx5_core_dev
*dev
= eq
->dev
;
385 struct mlx5_eqe
*eqe
;
391 while ((eqe
= next_eqe_sw(eq
))) {
393 * Make sure we read EQ entry contents after we've
394 * checked the ownership bit.
398 mlx5_core_dbg(eq
->dev
, "eqn %d, eqe type %s\n",
399 eq
->eqn
, eqe_type_str(eqe
->type
));
401 case MLX5_EVENT_TYPE_COMP
:
402 cqn
= be32_to_cpu(eqe
->data
.comp
.cqn
) & 0xffffff;
403 mlx5_cq_completion(dev
, cqn
);
406 case MLX5_EVENT_TYPE_PATH_MIG
:
407 case MLX5_EVENT_TYPE_COMM_EST
:
408 case MLX5_EVENT_TYPE_SQ_DRAINED
:
409 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
410 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
411 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
412 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
413 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
414 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
415 rsn
|= (eqe
->data
.qp_srq
.type
<< MLX5_USER_INDEX_LEN
);
416 mlx5_core_dbg(dev
, "event %s(%d) arrived on resource 0x%x\n",
417 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
418 mlx5_rsc_event(dev
, rsn
, eqe
->type
);
421 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
422 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
423 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
424 mlx5_core_dbg(dev
, "SRQ event %s(%d): srqn 0x%x\n",
425 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
426 mlx5_srq_event(dev
, rsn
, eqe
->type
);
429 case MLX5_EVENT_TYPE_CMD
:
430 mlx5_cmd_comp_handler(dev
, be32_to_cpu(eqe
->data
.cmd
.vector
), false);
433 case MLX5_EVENT_TYPE_PORT_CHANGE
:
434 port
= (eqe
->data
.port
.port
>> 4) & 0xf;
435 switch (eqe
->sub_type
) {
436 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
437 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
438 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
439 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
440 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
441 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
442 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
444 dev
->event(dev
, port_subtype_event(eqe
->sub_type
),
445 (unsigned long)port
);
448 mlx5_core_warn(dev
, "Port event with unrecognized subtype: port %d, sub_type %d\n",
449 port
, eqe
->sub_type
);
452 case MLX5_EVENT_TYPE_CQ_ERROR
:
453 cqn
= be32_to_cpu(eqe
->data
.cq_err
.cqn
) & 0xffffff;
454 mlx5_core_warn(dev
, "CQ error on CQN 0x%x, syndrom 0x%x\n",
455 cqn
, eqe
->data
.cq_err
.syndrome
);
456 mlx5_cq_event(dev
, cqn
, eqe
->type
);
459 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
461 u16 func_id
= be16_to_cpu(eqe
->data
.req_pages
.func_id
);
462 s32 npages
= be32_to_cpu(eqe
->data
.req_pages
.num_pages
);
464 mlx5_core_dbg(dev
, "page request for func 0x%x, npages %d\n",
466 mlx5_core_req_pages_handler(dev
, func_id
, npages
);
470 #ifdef CONFIG_MLX5_CORE_EN
471 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
:
472 mlx5_eswitch_vport_event(dev
->priv
.eswitch
, eqe
);
476 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT
:
477 mlx5_port_module_event(dev
, eqe
);
480 case MLX5_EVENT_TYPE_PPS_EVENT
:
482 dev
->event(dev
, MLX5_DEV_EVENT_PPS
, (unsigned long)eqe
);
485 case MLX5_EVENT_TYPE_FPGA_ERROR
:
486 mlx5_fpga_event(dev
, eqe
->type
, &eqe
->data
.raw
);
490 mlx5_core_warn(dev
, "Unhandled event 0x%x on EQ 0x%x\n",
498 /* The HCA will think the queue has overflowed if we
499 * don't tell it we've been processing events. We
500 * create our EQs with MLX5_NUM_SPARE_EQE extra
501 * entries, so we must update our consumer index at
504 if (unlikely(set_ci
>= MLX5_NUM_SPARE_EQE
)) {
513 tasklet_schedule(&eq
->tasklet_ctx
.task
);
518 static void init_eq_buf(struct mlx5_eq
*eq
)
520 struct mlx5_eqe
*eqe
;
523 for (i
= 0; i
< eq
->nent
; i
++) {
524 eqe
= get_eqe(eq
, i
);
525 eqe
->owner
= MLX5_EQE_OWNER_INIT_VAL
;
529 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
530 int nent
, u64 mask
, const char *name
,
531 enum mlx5_eq_type type
)
533 u32 out
[MLX5_ST_SZ_DW(create_eq_out
)] = {0};
534 struct mlx5_priv
*priv
= &dev
->priv
;
535 irq_handler_t handler
;
543 eq
->nent
= roundup_pow_of_two(nent
+ MLX5_NUM_SPARE_EQE
);
545 err
= mlx5_buf_alloc(dev
, eq
->nent
* MLX5_EQE_SIZE
, &eq
->buf
);
549 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
550 if (type
== MLX5_EQ_TYPE_PF
)
551 handler
= mlx5_eq_pf_int
;
554 handler
= mlx5_eq_int
;
558 inlen
= MLX5_ST_SZ_BYTES(create_eq_in
) +
559 MLX5_FLD_SZ_BYTES(create_eq_in
, pas
[0]) * eq
->buf
.npages
;
561 in
= kvzalloc(inlen
, GFP_KERNEL
);
567 pas
= (__be64
*)MLX5_ADDR_OF(create_eq_in
, in
, pas
);
568 mlx5_fill_page_array(&eq
->buf
, pas
);
570 MLX5_SET(create_eq_in
, in
, opcode
, MLX5_CMD_OP_CREATE_EQ
);
571 MLX5_SET64(create_eq_in
, in
, event_bitmask
, mask
);
573 eqc
= MLX5_ADDR_OF(create_eq_in
, in
, eq_context_entry
);
574 MLX5_SET(eqc
, eqc
, log_eq_size
, ilog2(eq
->nent
));
575 MLX5_SET(eqc
, eqc
, uar_page
, priv
->uar
->index
);
576 MLX5_SET(eqc
, eqc
, intr
, vecidx
);
577 MLX5_SET(eqc
, eqc
, log_page_size
,
578 eq
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
580 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
584 snprintf(priv
->irq_info
[vecidx
].name
, MLX5_MAX_IRQ_NAME
, "%s@pci:%s",
585 name
, pci_name(dev
->pdev
));
587 eq
->eqn
= MLX5_GET(create_eq_out
, out
, eq_number
);
588 eq
->irqn
= priv
->msix_arr
[vecidx
].vector
;
590 eq
->doorbell
= priv
->uar
->map
+ MLX5_EQ_DOORBEL_OFFSET
;
591 err
= request_irq(eq
->irqn
, handler
, 0,
592 priv
->irq_info
[vecidx
].name
, eq
);
596 err
= mlx5_debug_eq_add(dev
, eq
);
600 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
601 if (type
== MLX5_EQ_TYPE_PF
) {
602 err
= init_pf_ctx(&eq
->pf_ctx
, name
);
608 INIT_LIST_HEAD(&eq
->tasklet_ctx
.list
);
609 INIT_LIST_HEAD(&eq
->tasklet_ctx
.process_list
);
610 spin_lock_init(&eq
->tasklet_ctx
.lock
);
611 tasklet_init(&eq
->tasklet_ctx
.task
, mlx5_cq_tasklet_cb
,
612 (unsigned long)&eq
->tasklet_ctx
);
615 /* EQs are created in ARMED state
623 free_irq(priv
->msix_arr
[vecidx
].vector
, eq
);
626 mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
632 mlx5_buf_free(dev
, &eq
->buf
);
635 EXPORT_SYMBOL_GPL(mlx5_create_map_eq
);
637 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
)
641 mlx5_debug_eq_remove(dev
, eq
);
642 free_irq(eq
->irqn
, eq
);
643 err
= mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
645 mlx5_core_warn(dev
, "failed to destroy a previously created eq: eqn %d\n",
647 synchronize_irq(eq
->irqn
);
649 if (eq
->type
== MLX5_EQ_TYPE_COMP
) {
650 tasklet_disable(&eq
->tasklet_ctx
.task
);
651 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
652 } else if (eq
->type
== MLX5_EQ_TYPE_PF
) {
653 cancel_work_sync(&eq
->pf_ctx
.work
);
654 destroy_workqueue(eq
->pf_ctx
.wq
);
655 mempool_destroy(eq
->pf_ctx
.pool
);
658 mlx5_buf_free(dev
, &eq
->buf
);
662 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq
);
664 u32
mlx5_get_msix_vec(struct mlx5_core_dev
*dev
, int vecidx
)
666 return dev
->priv
.msix_arr
[MLX5_EQ_VEC_ASYNC
].vector
;
669 int mlx5_eq_init(struct mlx5_core_dev
*dev
)
673 spin_lock_init(&dev
->priv
.eq_table
.lock
);
675 err
= mlx5_eq_debugfs_init(dev
);
680 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
)
682 mlx5_eq_debugfs_cleanup(dev
);
685 int mlx5_start_eqs(struct mlx5_core_dev
*dev
)
687 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
688 u64 async_event_mask
= MLX5_ASYNC_EVENT_MASK
;
691 if (MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
&&
692 MLX5_CAP_GEN(dev
, vport_group_manager
) &&
693 mlx5_core_is_pf(dev
))
694 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
);
696 if (MLX5_CAP_GEN(dev
, port_module_event
))
697 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT
);
699 mlx5_core_dbg(dev
, "port_module_event is not set\n");
701 if (MLX5_PPS_CAP(dev
))
702 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_PPS_EVENT
);
704 if (MLX5_CAP_GEN(dev
, fpga
))
705 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR
);
707 err
= mlx5_create_map_eq(dev
, &table
->cmd_eq
, MLX5_EQ_VEC_CMD
,
708 MLX5_NUM_CMD_EQE
, 1ull << MLX5_EVENT_TYPE_CMD
,
709 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC
);
711 mlx5_core_warn(dev
, "failed to create cmd EQ %d\n", err
);
715 mlx5_cmd_use_events(dev
);
717 err
= mlx5_create_map_eq(dev
, &table
->async_eq
, MLX5_EQ_VEC_ASYNC
,
718 MLX5_NUM_ASYNC_EQE
, async_event_mask
,
719 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC
);
721 mlx5_core_warn(dev
, "failed to create async EQ %d\n", err
);
725 err
= mlx5_create_map_eq(dev
, &table
->pages_eq
,
727 /* TODO: sriov max_vf + */ 1,
728 1 << MLX5_EVENT_TYPE_PAGE_REQUEST
, "mlx5_pages_eq",
731 mlx5_core_warn(dev
, "failed to create pages EQ %d\n", err
);
735 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
736 if (MLX5_CAP_GEN(dev
, pg
)) {
737 err
= mlx5_create_map_eq(dev
, &table
->pfault_eq
,
740 1 << MLX5_EVENT_TYPE_PAGE_FAULT
,
741 "mlx5_page_fault_eq",
744 mlx5_core_warn(dev
, "failed to create page fault EQ %d\n",
752 mlx5_destroy_unmap_eq(dev
, &table
->pages_eq
);
758 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
761 mlx5_cmd_use_polling(dev
);
762 mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
766 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
)
768 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
771 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
772 if (MLX5_CAP_GEN(dev
, pg
)) {
773 err
= mlx5_destroy_unmap_eq(dev
, &table
->pfault_eq
);
779 err
= mlx5_destroy_unmap_eq(dev
, &table
->pages_eq
);
783 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
784 mlx5_cmd_use_polling(dev
);
786 err
= mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
788 mlx5_cmd_use_events(dev
);
793 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
794 u32
*out
, int outlen
)
796 u32 in
[MLX5_ST_SZ_DW(query_eq_in
)] = {0};
798 MLX5_SET(query_eq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_EQ
);
799 MLX5_SET(query_eq_in
, in
, eq_number
, eq
->eqn
);
800 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
802 EXPORT_SYMBOL_GPL(mlx5_core_eq_query
);