2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include "mlx5_core.h"
38 #ifdef CONFIG_MLX5_CORE_EN
43 MLX5_EQE_SIZE
= sizeof(struct mlx5_eqe
),
44 MLX5_EQE_OWNER_INIT_VAL
= 0x1,
48 MLX5_EQ_STATE_ARMED
= 0x9,
49 MLX5_EQ_STATE_FIRED
= 0xa,
50 MLX5_EQ_STATE_ALWAYS_ARMED
= 0xb,
54 MLX5_NUM_SPARE_EQE
= 0x80,
55 MLX5_NUM_ASYNC_EQE
= 0x100,
56 MLX5_NUM_CMD_EQE
= 32,
57 MLX5_NUM_PF_DRAIN
= 64,
61 MLX5_EQ_DOORBEL_OFFSET
= 0x40,
64 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
65 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
66 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
67 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
68 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
69 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
70 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
71 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
72 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
73 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
74 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
75 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
88 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev
*dev
, u8 eqn
)
90 u32 out
[MLX5_ST_SZ_DW(destroy_eq_out
)] = {0};
91 u32 in
[MLX5_ST_SZ_DW(destroy_eq_in
)] = {0};
93 MLX5_SET(destroy_eq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_EQ
);
94 MLX5_SET(destroy_eq_in
, in
, eq_number
, eqn
);
95 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
98 static struct mlx5_eqe
*get_eqe(struct mlx5_eq
*eq
, u32 entry
)
100 return mlx5_buf_offset(&eq
->buf
, entry
* MLX5_EQE_SIZE
);
103 static struct mlx5_eqe
*next_eqe_sw(struct mlx5_eq
*eq
)
105 struct mlx5_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
& (eq
->nent
- 1));
107 return ((eqe
->owner
& 1) ^ !!(eq
->cons_index
& eq
->nent
)) ? NULL
: eqe
;
110 static const char *eqe_type_str(u8 type
)
113 case MLX5_EVENT_TYPE_COMP
:
114 return "MLX5_EVENT_TYPE_COMP";
115 case MLX5_EVENT_TYPE_PATH_MIG
:
116 return "MLX5_EVENT_TYPE_PATH_MIG";
117 case MLX5_EVENT_TYPE_COMM_EST
:
118 return "MLX5_EVENT_TYPE_COMM_EST";
119 case MLX5_EVENT_TYPE_SQ_DRAINED
:
120 return "MLX5_EVENT_TYPE_SQ_DRAINED";
121 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
122 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
123 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
124 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
125 case MLX5_EVENT_TYPE_CQ_ERROR
:
126 return "MLX5_EVENT_TYPE_CQ_ERROR";
127 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
128 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
129 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
130 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
131 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
132 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
133 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
134 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
135 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
136 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
137 case MLX5_EVENT_TYPE_INTERNAL_ERROR
:
138 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
139 case MLX5_EVENT_TYPE_PORT_CHANGE
:
140 return "MLX5_EVENT_TYPE_PORT_CHANGE";
141 case MLX5_EVENT_TYPE_GPIO_EVENT
:
142 return "MLX5_EVENT_TYPE_GPIO_EVENT";
143 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT
:
144 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
145 case MLX5_EVENT_TYPE_REMOTE_CONFIG
:
146 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
147 case MLX5_EVENT_TYPE_DB_BF_CONGESTION
:
148 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
149 case MLX5_EVENT_TYPE_STALL_EVENT
:
150 return "MLX5_EVENT_TYPE_STALL_EVENT";
151 case MLX5_EVENT_TYPE_CMD
:
152 return "MLX5_EVENT_TYPE_CMD";
153 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
154 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
155 case MLX5_EVENT_TYPE_PAGE_FAULT
:
156 return "MLX5_EVENT_TYPE_PAGE_FAULT";
157 case MLX5_EVENT_TYPE_PPS_EVENT
:
158 return "MLX5_EVENT_TYPE_PPS_EVENT";
160 return "Unrecognized event";
164 static enum mlx5_dev_event
port_subtype_event(u8 subtype
)
167 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
168 return MLX5_DEV_EVENT_PORT_DOWN
;
169 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
170 return MLX5_DEV_EVENT_PORT_UP
;
171 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
172 return MLX5_DEV_EVENT_PORT_INITIALIZED
;
173 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
174 return MLX5_DEV_EVENT_LID_CHANGE
;
175 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
176 return MLX5_DEV_EVENT_PKEY_CHANGE
;
177 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
178 return MLX5_DEV_EVENT_GUID_CHANGE
;
179 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
180 return MLX5_DEV_EVENT_CLIENT_REREG
;
185 static void eq_update_ci(struct mlx5_eq
*eq
, int arm
)
187 __be32 __iomem
*addr
= eq
->doorbell
+ (arm
? 0 : 2);
188 u32 val
= (eq
->cons_index
& 0xffffff) | (eq
->eqn
<< 24);
189 __raw_writel((__force u32
) cpu_to_be32(val
), addr
);
190 /* We still want ordering, just not swabbing, so add a barrier */
194 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
195 static void eqe_pf_action(struct work_struct
*work
)
197 struct mlx5_pagefault
*pfault
= container_of(work
,
198 struct mlx5_pagefault
,
200 struct mlx5_eq
*eq
= pfault
->eq
;
202 mlx5_core_page_fault(eq
->dev
, pfault
);
203 mempool_free(pfault
, eq
->pf_ctx
.pool
);
206 static void eq_pf_process(struct mlx5_eq
*eq
)
208 struct mlx5_core_dev
*dev
= eq
->dev
;
209 struct mlx5_eqe_page_fault
*pf_eqe
;
210 struct mlx5_pagefault
*pfault
;
211 struct mlx5_eqe
*eqe
;
214 while ((eqe
= next_eqe_sw(eq
))) {
215 pfault
= mempool_alloc(eq
->pf_ctx
.pool
, GFP_ATOMIC
);
217 schedule_work(&eq
->pf_ctx
.work
);
222 pf_eqe
= &eqe
->data
.page_fault
;
223 pfault
->event_subtype
= eqe
->sub_type
;
224 pfault
->bytes_committed
= be32_to_cpu(pf_eqe
->bytes_committed
);
227 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
228 eqe
->sub_type
, pfault
->bytes_committed
);
230 switch (eqe
->sub_type
) {
231 case MLX5_PFAULT_SUBTYPE_RDMA
:
232 /* RDMA based event */
234 be32_to_cpu(pf_eqe
->rdma
.pftype_token
) >> 24;
236 be32_to_cpu(pf_eqe
->rdma
.pftype_token
) &
239 be32_to_cpu(pf_eqe
->rdma
.r_key
);
240 pfault
->rdma
.packet_size
=
241 be16_to_cpu(pf_eqe
->rdma
.packet_length
);
242 pfault
->rdma
.rdma_op_len
=
243 be32_to_cpu(pf_eqe
->rdma
.rdma_op_len
);
244 pfault
->rdma
.rdma_va
=
245 be64_to_cpu(pf_eqe
->rdma
.rdma_va
);
247 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
248 pfault
->type
, pfault
->token
,
251 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
252 pfault
->rdma
.rdma_op_len
,
253 pfault
->rdma
.rdma_va
);
256 case MLX5_PFAULT_SUBTYPE_WQE
:
257 /* WQE based event */
259 be32_to_cpu(pf_eqe
->wqe
.pftype_wq
) >> 24;
261 be32_to_cpu(pf_eqe
->wqe
.token
);
263 be32_to_cpu(pf_eqe
->wqe
.pftype_wq
) &
265 pfault
->wqe
.wqe_index
=
266 be16_to_cpu(pf_eqe
->wqe
.wqe_index
);
267 pfault
->wqe
.packet_size
=
268 be16_to_cpu(pf_eqe
->wqe
.packet_length
);
270 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
271 pfault
->type
, pfault
->token
,
273 pfault
->wqe
.wqe_index
);
278 "Unsupported page fault event sub-type: 0x%02hhx\n",
280 /* Unsupported page faults should still be
281 * resolved by the page fault handler
286 INIT_WORK(&pfault
->work
, eqe_pf_action
);
287 queue_work(eq
->pf_ctx
.wq
, &pfault
->work
);
292 if (unlikely(set_ci
>= MLX5_NUM_SPARE_EQE
)) {
301 static irqreturn_t
mlx5_eq_pf_int(int irq
, void *eq_ptr
)
303 struct mlx5_eq
*eq
= eq_ptr
;
306 if (spin_trylock_irqsave(&eq
->pf_ctx
.lock
, flags
)) {
308 spin_unlock_irqrestore(&eq
->pf_ctx
.lock
, flags
);
310 schedule_work(&eq
->pf_ctx
.work
);
316 /* mempool_refill() was proposed but unfortunately wasn't accepted
317 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
320 static void mempool_refill(mempool_t
*pool
)
322 while (pool
->curr_nr
< pool
->min_nr
)
323 mempool_free(mempool_alloc(pool
, GFP_KERNEL
), pool
);
326 static void eq_pf_action(struct work_struct
*work
)
328 struct mlx5_eq
*eq
= container_of(work
, struct mlx5_eq
, pf_ctx
.work
);
330 mempool_refill(eq
->pf_ctx
.pool
);
332 spin_lock_irq(&eq
->pf_ctx
.lock
);
334 spin_unlock_irq(&eq
->pf_ctx
.lock
);
337 static int init_pf_ctx(struct mlx5_eq_pagefault
*pf_ctx
, const char *name
)
339 spin_lock_init(&pf_ctx
->lock
);
340 INIT_WORK(&pf_ctx
->work
, eq_pf_action
);
342 pf_ctx
->wq
= alloc_ordered_workqueue(name
,
347 pf_ctx
->pool
= mempool_create_kmalloc_pool
348 (MLX5_NUM_PF_DRAIN
, sizeof(struct mlx5_pagefault
));
354 destroy_workqueue(pf_ctx
->wq
);
358 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 token
,
359 u32 wq_num
, u8 type
, int error
)
361 u32 out
[MLX5_ST_SZ_DW(page_fault_resume_out
)] = {0};
362 u32 in
[MLX5_ST_SZ_DW(page_fault_resume_in
)] = {0};
364 MLX5_SET(page_fault_resume_in
, in
, opcode
,
365 MLX5_CMD_OP_PAGE_FAULT_RESUME
);
366 MLX5_SET(page_fault_resume_in
, in
, error
, !!error
);
367 MLX5_SET(page_fault_resume_in
, in
, page_fault_type
, type
);
368 MLX5_SET(page_fault_resume_in
, in
, wq_number
, wq_num
);
369 MLX5_SET(page_fault_resume_in
, in
, token
, token
);
371 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
373 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume
);
376 static irqreturn_t
mlx5_eq_int(int irq
, void *eq_ptr
)
378 struct mlx5_eq
*eq
= eq_ptr
;
379 struct mlx5_core_dev
*dev
= eq
->dev
;
380 struct mlx5_eqe
*eqe
;
386 while ((eqe
= next_eqe_sw(eq
))) {
388 * Make sure we read EQ entry contents after we've
389 * checked the ownership bit.
393 mlx5_core_dbg(eq
->dev
, "eqn %d, eqe type %s\n",
394 eq
->eqn
, eqe_type_str(eqe
->type
));
396 case MLX5_EVENT_TYPE_COMP
:
397 cqn
= be32_to_cpu(eqe
->data
.comp
.cqn
) & 0xffffff;
398 mlx5_cq_completion(dev
, cqn
);
401 case MLX5_EVENT_TYPE_PATH_MIG
:
402 case MLX5_EVENT_TYPE_COMM_EST
:
403 case MLX5_EVENT_TYPE_SQ_DRAINED
:
404 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
405 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
406 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
407 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
408 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
409 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
410 rsn
|= (eqe
->data
.qp_srq
.type
<< MLX5_USER_INDEX_LEN
);
411 mlx5_core_dbg(dev
, "event %s(%d) arrived on resource 0x%x\n",
412 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
413 mlx5_rsc_event(dev
, rsn
, eqe
->type
);
416 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
417 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
418 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
419 mlx5_core_dbg(dev
, "SRQ event %s(%d): srqn 0x%x\n",
420 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
421 mlx5_srq_event(dev
, rsn
, eqe
->type
);
424 case MLX5_EVENT_TYPE_CMD
:
425 mlx5_cmd_comp_handler(dev
, be32_to_cpu(eqe
->data
.cmd
.vector
), false);
428 case MLX5_EVENT_TYPE_PORT_CHANGE
:
429 port
= (eqe
->data
.port
.port
>> 4) & 0xf;
430 switch (eqe
->sub_type
) {
431 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
432 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
433 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
434 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
435 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
436 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
437 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
439 dev
->event(dev
, port_subtype_event(eqe
->sub_type
),
440 (unsigned long)port
);
443 mlx5_core_warn(dev
, "Port event with unrecognized subtype: port %d, sub_type %d\n",
444 port
, eqe
->sub_type
);
447 case MLX5_EVENT_TYPE_CQ_ERROR
:
448 cqn
= be32_to_cpu(eqe
->data
.cq_err
.cqn
) & 0xffffff;
449 mlx5_core_warn(dev
, "CQ error on CQN 0x%x, syndrom 0x%x\n",
450 cqn
, eqe
->data
.cq_err
.syndrome
);
451 mlx5_cq_event(dev
, cqn
, eqe
->type
);
454 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
456 u16 func_id
= be16_to_cpu(eqe
->data
.req_pages
.func_id
);
457 s32 npages
= be32_to_cpu(eqe
->data
.req_pages
.num_pages
);
459 mlx5_core_dbg(dev
, "page request for func 0x%x, npages %d\n",
461 mlx5_core_req_pages_handler(dev
, func_id
, npages
);
465 #ifdef CONFIG_MLX5_CORE_EN
466 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
:
467 mlx5_eswitch_vport_event(dev
->priv
.eswitch
, eqe
);
471 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT
:
472 mlx5_port_module_event(dev
, eqe
);
475 case MLX5_EVENT_TYPE_PPS_EVENT
:
477 dev
->event(dev
, MLX5_DEV_EVENT_PPS
, (unsigned long)eqe
);
480 mlx5_core_warn(dev
, "Unhandled event 0x%x on EQ 0x%x\n",
488 /* The HCA will think the queue has overflowed if we
489 * don't tell it we've been processing events. We
490 * create our EQs with MLX5_NUM_SPARE_EQE extra
491 * entries, so we must update our consumer index at
494 if (unlikely(set_ci
>= MLX5_NUM_SPARE_EQE
)) {
503 tasklet_schedule(&eq
->tasklet_ctx
.task
);
508 static void init_eq_buf(struct mlx5_eq
*eq
)
510 struct mlx5_eqe
*eqe
;
513 for (i
= 0; i
< eq
->nent
; i
++) {
514 eqe
= get_eqe(eq
, i
);
515 eqe
->owner
= MLX5_EQE_OWNER_INIT_VAL
;
519 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
520 int nent
, u64 mask
, const char *name
,
521 enum mlx5_eq_type type
)
523 u32 out
[MLX5_ST_SZ_DW(create_eq_out
)] = {0};
524 struct mlx5_priv
*priv
= &dev
->priv
;
525 irq_handler_t handler
;
533 eq
->nent
= roundup_pow_of_two(nent
+ MLX5_NUM_SPARE_EQE
);
535 err
= mlx5_buf_alloc(dev
, eq
->nent
* MLX5_EQE_SIZE
, &eq
->buf
);
539 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
540 if (type
== MLX5_EQ_TYPE_PF
)
541 handler
= mlx5_eq_pf_int
;
544 handler
= mlx5_eq_int
;
548 inlen
= MLX5_ST_SZ_BYTES(create_eq_in
) +
549 MLX5_FLD_SZ_BYTES(create_eq_in
, pas
[0]) * eq
->buf
.npages
;
551 in
= mlx5_vzalloc(inlen
);
557 pas
= (__be64
*)MLX5_ADDR_OF(create_eq_in
, in
, pas
);
558 mlx5_fill_page_array(&eq
->buf
, pas
);
560 MLX5_SET(create_eq_in
, in
, opcode
, MLX5_CMD_OP_CREATE_EQ
);
561 MLX5_SET64(create_eq_in
, in
, event_bitmask
, mask
);
563 eqc
= MLX5_ADDR_OF(create_eq_in
, in
, eq_context_entry
);
564 MLX5_SET(eqc
, eqc
, log_eq_size
, ilog2(eq
->nent
));
565 MLX5_SET(eqc
, eqc
, uar_page
, priv
->uar
->index
);
566 MLX5_SET(eqc
, eqc
, intr
, vecidx
);
567 MLX5_SET(eqc
, eqc
, log_page_size
,
568 eq
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
570 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
574 snprintf(priv
->irq_info
[vecidx
].name
, MLX5_MAX_IRQ_NAME
, "%s@pci:%s",
575 name
, pci_name(dev
->pdev
));
577 eq
->eqn
= MLX5_GET(create_eq_out
, out
, eq_number
);
578 eq
->irqn
= priv
->msix_arr
[vecidx
].vector
;
580 eq
->doorbell
= priv
->uar
->map
+ MLX5_EQ_DOORBEL_OFFSET
;
581 err
= request_irq(eq
->irqn
, handler
, 0,
582 priv
->irq_info
[vecidx
].name
, eq
);
586 err
= mlx5_debug_eq_add(dev
, eq
);
590 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
591 if (type
== MLX5_EQ_TYPE_PF
) {
592 err
= init_pf_ctx(&eq
->pf_ctx
, name
);
598 INIT_LIST_HEAD(&eq
->tasklet_ctx
.list
);
599 INIT_LIST_HEAD(&eq
->tasklet_ctx
.process_list
);
600 spin_lock_init(&eq
->tasklet_ctx
.lock
);
601 tasklet_init(&eq
->tasklet_ctx
.task
, mlx5_cq_tasklet_cb
,
602 (unsigned long)&eq
->tasklet_ctx
);
605 /* EQs are created in ARMED state
613 free_irq(priv
->msix_arr
[vecidx
].vector
, eq
);
616 mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
622 mlx5_buf_free(dev
, &eq
->buf
);
625 EXPORT_SYMBOL_GPL(mlx5_create_map_eq
);
627 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
)
631 mlx5_debug_eq_remove(dev
, eq
);
632 free_irq(eq
->irqn
, eq
);
633 err
= mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
635 mlx5_core_warn(dev
, "failed to destroy a previously created eq: eqn %d\n",
637 synchronize_irq(eq
->irqn
);
639 if (eq
->type
== MLX5_EQ_TYPE_COMP
) {
640 tasklet_disable(&eq
->tasklet_ctx
.task
);
641 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
642 } else if (eq
->type
== MLX5_EQ_TYPE_PF
) {
643 cancel_work_sync(&eq
->pf_ctx
.work
);
644 destroy_workqueue(eq
->pf_ctx
.wq
);
645 mempool_destroy(eq
->pf_ctx
.pool
);
648 mlx5_buf_free(dev
, &eq
->buf
);
652 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq
);
654 u32
mlx5_get_msix_vec(struct mlx5_core_dev
*dev
, int vecidx
)
656 return dev
->priv
.msix_arr
[MLX5_EQ_VEC_ASYNC
].vector
;
659 int mlx5_eq_init(struct mlx5_core_dev
*dev
)
663 spin_lock_init(&dev
->priv
.eq_table
.lock
);
665 err
= mlx5_eq_debugfs_init(dev
);
671 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
)
673 mlx5_eq_debugfs_cleanup(dev
);
676 int mlx5_start_eqs(struct mlx5_core_dev
*dev
)
678 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
679 u64 async_event_mask
= MLX5_ASYNC_EVENT_MASK
;
683 if (MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
&&
684 MLX5_CAP_GEN(dev
, vport_group_manager
) &&
685 mlx5_core_is_pf(dev
))
686 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
);
688 if (MLX5_CAP_GEN(dev
, port_module_event
))
689 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT
);
691 mlx5_core_dbg(dev
, "port_module_event is not set\n");
693 if (MLX5_CAP_GEN(dev
, pps
))
694 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_PPS_EVENT
);
696 err
= mlx5_create_map_eq(dev
, &table
->cmd_eq
, MLX5_EQ_VEC_CMD
,
697 MLX5_NUM_CMD_EQE
, 1ull << MLX5_EVENT_TYPE_CMD
,
698 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC
);
700 mlx5_core_warn(dev
, "failed to create cmd EQ %d\n", err
);
704 mlx5_cmd_use_events(dev
);
706 err
= mlx5_create_map_eq(dev
, &table
->async_eq
, MLX5_EQ_VEC_ASYNC
,
707 MLX5_NUM_ASYNC_EQE
, async_event_mask
,
708 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC
);
710 mlx5_core_warn(dev
, "failed to create async EQ %d\n", err
);
714 err
= mlx5_create_map_eq(dev
, &table
->pages_eq
,
716 /* TODO: sriov max_vf + */ 1,
717 1 << MLX5_EVENT_TYPE_PAGE_REQUEST
, "mlx5_pages_eq",
720 mlx5_core_warn(dev
, "failed to create pages EQ %d\n", err
);
724 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
725 if (MLX5_CAP_GEN(dev
, pg
)) {
726 err
= mlx5_create_map_eq(dev
, &table
->pfault_eq
,
729 1 << MLX5_EVENT_TYPE_PAGE_FAULT
,
730 "mlx5_page_fault_eq",
733 mlx5_core_warn(dev
, "failed to create page fault EQ %d\n",
741 mlx5_destroy_unmap_eq(dev
, &table
->pages_eq
);
747 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
750 mlx5_cmd_use_polling(dev
);
751 mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
755 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
)
757 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
760 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
761 if (MLX5_CAP_GEN(dev
, pg
)) {
762 err
= mlx5_destroy_unmap_eq(dev
, &table
->pfault_eq
);
768 err
= mlx5_destroy_unmap_eq(dev
, &table
->pages_eq
);
772 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
773 mlx5_cmd_use_polling(dev
);
775 err
= mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
777 mlx5_cmd_use_events(dev
);
782 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
783 u32
*out
, int outlen
)
785 u32 in
[MLX5_ST_SZ_DW(query_eq_in
)] = {0};
787 MLX5_SET(query_eq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_EQ
);
788 MLX5_SET(query_eq_in
, in
, eq_number
, eq
->eqn
);
789 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
791 EXPORT_SYMBOL_GPL(mlx5_core_eq_query
);