2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/cpu_rmap.h>
47 MLX4_IRQNAME_SIZE
= 32
51 MLX4_NUM_ASYNC_EQE
= 0x100,
52 MLX4_NUM_SPARE_EQE
= 0x80,
53 MLX4_EQ_ENTRY_SIZE
= 0x20
56 #define MLX4_EQ_STATUS_OK ( 0 << 28)
57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58 #define MLX4_EQ_OWNER_SW ( 0 << 24)
59 #define MLX4_EQ_OWNER_HW ( 1 << 24)
60 #define MLX4_EQ_FLAG_EC ( 1 << 18)
61 #define MLX4_EQ_FLAG_OI ( 1 << 17)
62 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
63 #define MLX4_EQ_STATE_FIRED (10 << 8)
64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
86 static u64
get_async_ev_mask(struct mlx4_dev
*dev
)
88 u64 async_ev_mask
= MLX4_ASYNC_EVENT_MASK
;
89 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
)
90 async_ev_mask
|= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
);
91 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT
)
92 async_ev_mask
|= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT
);
97 static void eq_set_ci(struct mlx4_eq
*eq
, int req_not
)
99 __raw_writel((__force u32
) cpu_to_be32((eq
->cons_index
& 0xffffff) |
102 /* We still want ordering, just not swabbing, so add a barrier */
106 static struct mlx4_eqe
*get_eqe(struct mlx4_eq
*eq
, u32 entry
, u8 eqe_factor
,
109 /* (entry & (eq->nent - 1)) gives us a cyclic array */
110 unsigned long offset
= (entry
& (eq
->nent
- 1)) * eqe_size
;
111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with
112 * strides of 64B,128B and 256B.
113 * When 64B EQE is used, the first (in the lower addresses)
114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
115 * contain the legacy EQE information.
116 * In all other cases, the first 32B contains the legacy EQE info.
118 return eq
->page_list
[offset
/ PAGE_SIZE
].buf
+ (offset
+ (eqe_factor
? MLX4_EQ_ENTRY_SIZE
: 0)) % PAGE_SIZE
;
121 static struct mlx4_eqe
*next_eqe_sw(struct mlx4_eq
*eq
, u8 eqe_factor
, u8 size
)
123 struct mlx4_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
, eqe_factor
, size
);
124 return !!(eqe
->owner
& 0x80) ^ !!(eq
->cons_index
& eq
->nent
) ? NULL
: eqe
;
127 static struct mlx4_eqe
*next_slave_event_eqe(struct mlx4_slave_event_eq
*slave_eq
)
129 struct mlx4_eqe
*eqe
=
130 &slave_eq
->event_eqe
[slave_eq
->cons
& (SLAVE_EVENT_EQ_SIZE
- 1)];
131 return (!!(eqe
->owner
& 0x80) ^
132 !!(slave_eq
->cons
& SLAVE_EVENT_EQ_SIZE
)) ?
136 void mlx4_gen_slave_eqe(struct work_struct
*work
)
138 struct mlx4_mfunc_master_ctx
*master
=
139 container_of(work
, struct mlx4_mfunc_master_ctx
,
141 struct mlx4_mfunc
*mfunc
=
142 container_of(master
, struct mlx4_mfunc
, master
);
143 struct mlx4_priv
*priv
= container_of(mfunc
, struct mlx4_priv
, mfunc
);
144 struct mlx4_dev
*dev
= &priv
->dev
;
145 struct mlx4_slave_event_eq
*slave_eq
= &mfunc
->master
.slave_eq
;
146 struct mlx4_eqe
*eqe
;
148 int i
, phys_port
, slave_port
;
150 for (eqe
= next_slave_event_eqe(slave_eq
); eqe
;
151 eqe
= next_slave_event_eqe(slave_eq
)) {
152 slave
= eqe
->slave_id
;
154 /* All active slaves need to receive the event */
155 if (slave
== ALL_SLAVES
) {
156 for (i
= 0; i
<= dev
->persist
->num_vfs
; i
++) {
158 if (eqe
->type
== MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
&&
159 eqe
->subtype
== MLX4_DEV_PMC_SUBTYPE_PORT_INFO
) {
160 phys_port
= eqe
->event
.port_mgmt_change
.port
;
161 slave_port
= mlx4_phys_to_slave_port(dev
, i
, phys_port
);
162 if (slave_port
< 0) /* VF doesn't have this port */
164 eqe
->event
.port_mgmt_change
.port
= slave_port
;
166 if (mlx4_GEN_EQE(dev
, i
, eqe
))
167 mlx4_warn(dev
, "Failed to generate event for slave %d\n",
170 eqe
->event
.port_mgmt_change
.port
= phys_port
;
173 if (mlx4_GEN_EQE(dev
, slave
, eqe
))
174 mlx4_warn(dev
, "Failed to generate event for slave %d\n",
182 static void slave_event(struct mlx4_dev
*dev
, u8 slave
, struct mlx4_eqe
*eqe
)
184 struct mlx4_priv
*priv
= mlx4_priv(dev
);
185 struct mlx4_slave_event_eq
*slave_eq
= &priv
->mfunc
.master
.slave_eq
;
186 struct mlx4_eqe
*s_eqe
;
189 spin_lock_irqsave(&slave_eq
->event_lock
, flags
);
190 s_eqe
= &slave_eq
->event_eqe
[slave_eq
->prod
& (SLAVE_EVENT_EQ_SIZE
- 1)];
191 if ((!!(s_eqe
->owner
& 0x80)) ^
192 (!!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
))) {
193 mlx4_warn(dev
, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
195 spin_unlock_irqrestore(&slave_eq
->event_lock
, flags
);
199 memcpy(s_eqe
, eqe
, dev
->caps
.eqe_size
- 1);
200 s_eqe
->slave_id
= slave
;
201 /* ensure all information is written before setting the ownersip bit */
203 s_eqe
->owner
= !!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
) ? 0x0 : 0x80;
206 queue_work(priv
->mfunc
.master
.comm_wq
,
207 &priv
->mfunc
.master
.slave_event_work
);
208 spin_unlock_irqrestore(&slave_eq
->event_lock
, flags
);
211 static void mlx4_slave_event(struct mlx4_dev
*dev
, int slave
,
212 struct mlx4_eqe
*eqe
)
214 struct mlx4_priv
*priv
= mlx4_priv(dev
);
216 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
217 slave
== dev
->caps
.function
||
218 !priv
->mfunc
.master
.slave_state
[slave
].active
)
221 slave_event(dev
, slave
, eqe
);
224 #if defined(CONFIG_SMP)
225 static void mlx4_set_eq_affinity_hint(struct mlx4_priv
*priv
, int vec
)
228 struct mlx4_dev
*dev
= &priv
->dev
;
229 struct mlx4_eq
*eq
= &priv
->eq_table
.eq
[vec
];
231 if (!eq
->affinity_mask
|| cpumask_empty(eq
->affinity_mask
))
234 hint_err
= irq_set_affinity_hint(eq
->irq
, eq
->affinity_mask
);
236 mlx4_warn(dev
, "irq_set_affinity_hint failed, err %d\n", hint_err
);
240 int mlx4_gen_pkey_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
)
244 struct mlx4_priv
*priv
= mlx4_priv(dev
);
245 struct mlx4_slave_state
*s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
247 if (!s_slave
->active
)
250 memset(&eqe
, 0, sizeof eqe
);
252 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
253 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE
;
254 eqe
.event
.port_mgmt_change
.port
= mlx4_phys_to_slave_port(dev
, slave
, port
);
256 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
258 EXPORT_SYMBOL(mlx4_gen_pkey_eqe
);
260 int mlx4_gen_guid_change_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
)
264 /*don't send if we don't have the that slave */
265 if (dev
->persist
->num_vfs
< slave
)
267 memset(&eqe
, 0, sizeof eqe
);
269 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
270 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_GUID_INFO
;
271 eqe
.event
.port_mgmt_change
.port
= mlx4_phys_to_slave_port(dev
, slave
, port
);
273 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
275 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe
);
277 int mlx4_gen_port_state_change_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
,
278 u8 port_subtype_change
)
281 u8 slave_port
= mlx4_phys_to_slave_port(dev
, slave
, port
);
283 /*don't send if we don't have the that slave */
284 if (dev
->persist
->num_vfs
< slave
)
286 memset(&eqe
, 0, sizeof eqe
);
288 eqe
.type
= MLX4_EVENT_TYPE_PORT_CHANGE
;
289 eqe
.subtype
= port_subtype_change
;
290 eqe
.event
.port_change
.port
= cpu_to_be32(slave_port
<< 28);
292 mlx4_dbg(dev
, "%s: sending: %d to slave: %d on port: %d\n", __func__
,
293 port_subtype_change
, slave
, port
);
294 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
296 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe
);
298 enum slave_port_state
mlx4_get_slave_port_state(struct mlx4_dev
*dev
, int slave
, u8 port
)
300 struct mlx4_priv
*priv
= mlx4_priv(dev
);
301 struct mlx4_slave_state
*s_state
= priv
->mfunc
.master
.slave_state
;
302 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
304 if (slave
>= dev
->num_slaves
|| port
> dev
->caps
.num_ports
||
305 port
<= 0 || !test_bit(port
- 1, actv_ports
.ports
)) {
306 pr_err("%s: Error: asking for slave:%d, port:%d\n",
307 __func__
, slave
, port
);
308 return SLAVE_PORT_DOWN
;
310 return s_state
[slave
].port_state
[port
];
312 EXPORT_SYMBOL(mlx4_get_slave_port_state
);
314 static int mlx4_set_slave_port_state(struct mlx4_dev
*dev
, int slave
, u8 port
,
315 enum slave_port_state state
)
317 struct mlx4_priv
*priv
= mlx4_priv(dev
);
318 struct mlx4_slave_state
*s_state
= priv
->mfunc
.master
.slave_state
;
319 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
321 if (slave
>= dev
->num_slaves
|| port
> dev
->caps
.num_ports
||
322 port
<= 0 || !test_bit(port
- 1, actv_ports
.ports
)) {
323 pr_err("%s: Error: asking for slave:%d, port:%d\n",
324 __func__
, slave
, port
);
327 s_state
[slave
].port_state
[port
] = state
;
332 static void set_all_slave_state(struct mlx4_dev
*dev
, u8 port
, int event
)
335 enum slave_port_gen_event gen_event
;
336 struct mlx4_slaves_pport slaves_pport
= mlx4_phys_to_slaves_pport(dev
,
339 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++)
340 if (test_bit(i
, slaves_pport
.slaves
))
341 set_and_calc_slave_port_state(dev
, i
, port
,
344 /**************************************************************************
345 The function get as input the new event to that port,
346 and according to the prev state change the slave's port state.
348 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
349 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
350 MLX4_PORT_STATE_IB_EVENT_GID_VALID
351 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
352 ***************************************************************************/
353 int set_and_calc_slave_port_state(struct mlx4_dev
*dev
, int slave
,
355 enum slave_port_gen_event
*gen_event
)
357 struct mlx4_priv
*priv
= mlx4_priv(dev
);
358 struct mlx4_slave_state
*ctx
= NULL
;
361 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
362 enum slave_port_state cur_state
=
363 mlx4_get_slave_port_state(dev
, slave
, port
);
365 *gen_event
= SLAVE_PORT_GEN_EVENT_NONE
;
367 if (slave
>= dev
->num_slaves
|| port
> dev
->caps
.num_ports
||
368 port
<= 0 || !test_bit(port
- 1, actv_ports
.ports
)) {
369 pr_err("%s: Error: asking for slave:%d, port:%d\n",
370 __func__
, slave
, port
);
374 ctx
= &priv
->mfunc
.master
.slave_state
[slave
];
375 spin_lock_irqsave(&ctx
->lock
, flags
);
378 case SLAVE_PORT_DOWN
:
379 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP
== event
)
380 mlx4_set_slave_port_state(dev
, slave
, port
,
383 case SLAVE_PENDING_UP
:
384 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
== event
)
385 mlx4_set_slave_port_state(dev
, slave
, port
,
387 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID
== event
) {
388 mlx4_set_slave_port_state(dev
, slave
, port
,
390 *gen_event
= SLAVE_PORT_GEN_EVENT_UP
;
394 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
== event
) {
395 mlx4_set_slave_port_state(dev
, slave
, port
,
397 *gen_event
= SLAVE_PORT_GEN_EVENT_DOWN
;
398 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID
==
400 mlx4_set_slave_port_state(dev
, slave
, port
,
402 *gen_event
= SLAVE_PORT_GEN_EVENT_DOWN
;
406 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
407 __func__
, slave
, port
);
410 ret
= mlx4_get_slave_port_state(dev
, slave
, port
);
413 spin_unlock_irqrestore(&ctx
->lock
, flags
);
417 EXPORT_SYMBOL(set_and_calc_slave_port_state
);
419 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev
*dev
, u8 port
, int attr
)
423 memset(&eqe
, 0, sizeof eqe
);
425 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
426 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_PORT_INFO
;
427 eqe
.event
.port_mgmt_change
.port
= port
;
428 eqe
.event
.port_mgmt_change
.params
.port_info
.changed_attr
=
429 cpu_to_be32((u32
) attr
);
431 slave_event(dev
, ALL_SLAVES
, &eqe
);
434 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev
);
436 void mlx4_master_handle_slave_flr(struct work_struct
*work
)
438 struct mlx4_mfunc_master_ctx
*master
=
439 container_of(work
, struct mlx4_mfunc_master_ctx
,
440 slave_flr_event_work
);
441 struct mlx4_mfunc
*mfunc
=
442 container_of(master
, struct mlx4_mfunc
, master
);
443 struct mlx4_priv
*priv
=
444 container_of(mfunc
, struct mlx4_priv
, mfunc
);
445 struct mlx4_dev
*dev
= &priv
->dev
;
446 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
451 mlx4_dbg(dev
, "mlx4_handle_slave_flr\n");
453 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
455 if (MLX4_COMM_CMD_FLR
== slave_state
[i
].last_cmd
) {
456 mlx4_dbg(dev
, "mlx4_handle_slave_flr: clean slave: %d\n",
458 /* In case of 'Reset flow' FLR can be generated for
459 * a slave before mlx4_load_one is done.
460 * make sure interface is up before trying to delete
461 * slave resources which weren't allocated yet.
463 if (dev
->persist
->interface_state
&
464 MLX4_INTERFACE_STATE_UP
)
465 mlx4_delete_all_resources_for_slave(dev
, i
);
466 /*return the slave to running mode*/
467 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
468 slave_state
[i
].last_cmd
= MLX4_COMM_CMD_RESET
;
469 slave_state
[i
].is_slave_going_down
= 0;
470 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
472 err
= mlx4_cmd(dev
, 0, i
, 0, MLX4_CMD_INFORM_FLR_DONE
,
473 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
475 mlx4_warn(dev
, "Failed to notify FW on FLR done (slave:%d)\n",
481 static int mlx4_eq_int(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
483 struct mlx4_priv
*priv
= mlx4_priv(dev
);
484 struct mlx4_eqe
*eqe
;
492 u8 update_slave_state
;
494 enum slave_port_gen_event gen_event
;
496 struct mlx4_vport_state
*s_info
;
497 int eqe_size
= dev
->caps
.eqe_size
;
499 while ((eqe
= next_eqe_sw(eq
, dev
->caps
.eqe_factor
, eqe_size
))) {
501 * Make sure we read EQ entry contents after we've
502 * checked the ownership bit.
507 case MLX4_EVENT_TYPE_COMP
:
508 cqn
= be32_to_cpu(eqe
->event
.comp
.cqn
) & 0xffffff;
509 mlx4_cq_completion(dev
, cqn
);
512 case MLX4_EVENT_TYPE_PATH_MIG
:
513 case MLX4_EVENT_TYPE_COMM_EST
:
514 case MLX4_EVENT_TYPE_SQ_DRAINED
:
515 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
516 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
517 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
518 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
519 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
520 mlx4_dbg(dev
, "event %d arrived\n", eqe
->type
);
521 if (mlx4_is_master(dev
)) {
522 /* forward only to slave owning the QP */
523 ret
= mlx4_get_slave_from_resource_id(dev
,
525 be32_to_cpu(eqe
->event
.qp
.qpn
)
527 if (ret
&& ret
!= -ENOENT
) {
528 mlx4_dbg(dev
, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
529 eqe
->type
, eqe
->subtype
,
530 eq
->eqn
, eq
->cons_index
, ret
);
534 if (!ret
&& slave
!= dev
->caps
.function
) {
535 mlx4_slave_event(dev
, slave
, eqe
);
540 mlx4_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) &
541 0xffffff, eqe
->type
);
544 case MLX4_EVENT_TYPE_SRQ_LIMIT
:
545 mlx4_dbg(dev
, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
547 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR
:
548 if (mlx4_is_master(dev
)) {
549 /* forward only to slave owning the SRQ */
550 ret
= mlx4_get_slave_from_resource_id(dev
,
552 be32_to_cpu(eqe
->event
.srq
.srqn
)
555 if (ret
&& ret
!= -ENOENT
) {
556 mlx4_warn(dev
, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
557 eqe
->type
, eqe
->subtype
,
558 eq
->eqn
, eq
->cons_index
, ret
);
561 mlx4_warn(dev
, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
563 be32_to_cpu(eqe
->event
.srq
.srqn
),
564 eqe
->type
, eqe
->subtype
);
566 if (!ret
&& slave
!= dev
->caps
.function
) {
567 mlx4_warn(dev
, "%s: sending event %02x(%02x) to slave:%d\n",
569 eqe
->subtype
, slave
);
570 mlx4_slave_event(dev
, slave
, eqe
);
574 mlx4_srq_event(dev
, be32_to_cpu(eqe
->event
.srq
.srqn
) &
575 0xffffff, eqe
->type
);
578 case MLX4_EVENT_TYPE_CMD
:
580 be16_to_cpu(eqe
->event
.cmd
.token
),
581 eqe
->event
.cmd
.status
,
582 be64_to_cpu(eqe
->event
.cmd
.out_param
));
585 case MLX4_EVENT_TYPE_PORT_CHANGE
: {
586 struct mlx4_slaves_pport slaves_port
;
587 port
= be32_to_cpu(eqe
->event
.port_change
.port
) >> 28;
588 slaves_port
= mlx4_phys_to_slaves_pport(dev
, port
);
589 if (eqe
->subtype
== MLX4_PORT_CHANGE_SUBTYPE_DOWN
) {
590 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_DOWN
,
592 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 1;
593 if (!mlx4_is_master(dev
))
595 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1;
597 if (!test_bit(i
, slaves_port
.slaves
))
599 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
) {
600 if (i
== mlx4_master_func_num(dev
))
602 mlx4_dbg(dev
, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
604 s_info
= &priv
->mfunc
.master
.vf_oper
[i
].vport
[port
].state
;
605 if (IFLA_VF_LINK_STATE_AUTO
== s_info
->link_state
) {
606 eqe
->event
.port_change
.port
=
608 (be32_to_cpu(eqe
->event
.port_change
.port
) & 0xFFFFFFF)
609 | (mlx4_phys_to_slave_port(dev
, i
, port
) << 28));
610 mlx4_slave_event(dev
, i
, eqe
);
612 } else { /* IB port */
613 set_and_calc_slave_port_state(dev
, i
, port
,
614 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
,
616 /*we can be in pending state, then do not send port_down event*/
617 if (SLAVE_PORT_GEN_EVENT_DOWN
== gen_event
) {
618 if (i
== mlx4_master_func_num(dev
))
620 eqe
->event
.port_change
.port
=
622 (be32_to_cpu(eqe
->event
.port_change
.port
) & 0xFFFFFFF)
623 | (mlx4_phys_to_slave_port(dev
, i
, port
) << 28));
624 mlx4_slave_event(dev
, i
, eqe
);
629 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_UP
, port
);
631 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 0;
633 if (!mlx4_is_master(dev
))
635 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
637 i
< dev
->persist
->num_vfs
+ 1;
639 if (!test_bit(i
, slaves_port
.slaves
))
641 if (i
== mlx4_master_func_num(dev
))
643 s_info
= &priv
->mfunc
.master
.vf_oper
[i
].vport
[port
].state
;
644 if (IFLA_VF_LINK_STATE_AUTO
== s_info
->link_state
) {
645 eqe
->event
.port_change
.port
=
647 (be32_to_cpu(eqe
->event
.port_change
.port
) & 0xFFFFFFF)
648 | (mlx4_phys_to_slave_port(dev
, i
, port
) << 28));
649 mlx4_slave_event(dev
, i
, eqe
);
653 /* port-up event will be sent to a slave when the
654 * slave's alias-guid is set. This is done in alias_GUID.c
656 set_all_slave_state(dev
, port
, MLX4_DEV_EVENT_PORT_UP
);
661 case MLX4_EVENT_TYPE_CQ_ERROR
:
662 mlx4_warn(dev
, "CQ %s on CQN %06x\n",
663 eqe
->event
.cq_err
.syndrome
== 1 ?
664 "overrun" : "access violation",
665 be32_to_cpu(eqe
->event
.cq_err
.cqn
) & 0xffffff);
666 if (mlx4_is_master(dev
)) {
667 ret
= mlx4_get_slave_from_resource_id(dev
,
669 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
671 if (ret
&& ret
!= -ENOENT
) {
672 mlx4_dbg(dev
, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
673 eqe
->type
, eqe
->subtype
,
674 eq
->eqn
, eq
->cons_index
, ret
);
678 if (!ret
&& slave
!= dev
->caps
.function
) {
679 mlx4_slave_event(dev
, slave
, eqe
);
684 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
689 case MLX4_EVENT_TYPE_EQ_OVERFLOW
:
690 mlx4_warn(dev
, "EQ overrun on EQN %d\n", eq
->eqn
);
693 case MLX4_EVENT_TYPE_OP_REQUIRED
:
694 atomic_inc(&priv
->opreq_count
);
695 /* FW commands can't be executed from interrupt context
696 * working in deferred task
698 queue_work(mlx4_wq
, &priv
->opreq_task
);
701 case MLX4_EVENT_TYPE_COMM_CHANNEL
:
702 if (!mlx4_is_master(dev
)) {
703 mlx4_warn(dev
, "Received comm channel event for non master device\n");
706 memcpy(&priv
->mfunc
.master
.comm_arm_bit_vector
,
707 eqe
->event
.comm_channel_arm
.bit_vec
,
708 sizeof eqe
->event
.comm_channel_arm
.bit_vec
);
709 queue_work(priv
->mfunc
.master
.comm_wq
,
710 &priv
->mfunc
.master
.comm_work
);
713 case MLX4_EVENT_TYPE_FLR_EVENT
:
714 flr_slave
= be32_to_cpu(eqe
->event
.flr_event
.slave_id
);
715 if (!mlx4_is_master(dev
)) {
716 mlx4_warn(dev
, "Non-master function received FLR event\n");
720 mlx4_dbg(dev
, "FLR event for slave: %d\n", flr_slave
);
722 if (flr_slave
>= dev
->num_slaves
) {
724 "Got FLR for unknown function: %d\n",
726 update_slave_state
= 0;
728 update_slave_state
= 1;
730 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
731 if (update_slave_state
) {
732 priv
->mfunc
.master
.slave_state
[flr_slave
].active
= false;
733 priv
->mfunc
.master
.slave_state
[flr_slave
].last_cmd
= MLX4_COMM_CMD_FLR
;
734 priv
->mfunc
.master
.slave_state
[flr_slave
].is_slave_going_down
= 1;
736 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
737 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_SHUTDOWN
,
739 queue_work(priv
->mfunc
.master
.comm_wq
,
740 &priv
->mfunc
.master
.slave_flr_event_work
);
743 case MLX4_EVENT_TYPE_FATAL_WARNING
:
744 if (eqe
->subtype
== MLX4_FATAL_WARNING_SUBTYPE_WARMING
) {
745 if (mlx4_is_master(dev
))
746 for (i
= 0; i
< dev
->num_slaves
; i
++) {
747 mlx4_dbg(dev
, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
749 if (i
== dev
->caps
.function
)
751 mlx4_slave_event(dev
, i
, eqe
);
753 mlx4_err(dev
, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
754 be16_to_cpu(eqe
->event
.warming
.warning_threshold
),
755 be16_to_cpu(eqe
->event
.warming
.current_temperature
));
757 mlx4_warn(dev
, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
758 eqe
->type
, eqe
->subtype
, eq
->eqn
,
759 eq
->cons_index
, eqe
->owner
, eq
->nent
,
761 !!(eqe
->owner
& 0x80) ^
762 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
766 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
:
767 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_MGMT_CHANGE
,
768 (unsigned long) eqe
);
771 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT
:
772 switch (eqe
->subtype
) {
773 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE
:
774 mlx4_warn(dev
, "Bad cable detected on port %u\n",
775 eqe
->event
.bad_cable
.port
);
777 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE
:
778 mlx4_warn(dev
, "Unsupported cable detected\n");
782 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
783 eqe
->type
, eqe
->subtype
, eq
->eqn
,
784 eq
->cons_index
, eqe
->owner
, eq
->nent
,
785 !!(eqe
->owner
& 0x80) ^
786 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
791 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR
:
792 case MLX4_EVENT_TYPE_ECC_DETECT
:
794 mlx4_warn(dev
, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
795 eqe
->type
, eqe
->subtype
, eq
->eqn
,
796 eq
->cons_index
, eqe
->owner
, eq
->nent
,
798 !!(eqe
->owner
& 0x80) ^
799 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
808 * The HCA will think the queue has overflowed if we
809 * don't tell it we've been processing events. We
810 * create our EQs with MLX4_NUM_SPARE_EQE extra
811 * entries, so we must update our consumer index at
814 if (unlikely(set_ci
>= MLX4_NUM_SPARE_EQE
)) {
822 /* cqn is 24bit wide but is initialized such that its higher bits
823 * are ones too. Thus, if we got any event, cqn's high bits should be off
824 * and we need to schedule the tasklet.
826 if (!(cqn
& ~0xffffff))
827 tasklet_schedule(&eq
->tasklet_ctx
.task
);
832 static irqreturn_t
mlx4_interrupt(int irq
, void *dev_ptr
)
834 struct mlx4_dev
*dev
= dev_ptr
;
835 struct mlx4_priv
*priv
= mlx4_priv(dev
);
839 writel(priv
->eq_table
.clr_mask
, priv
->eq_table
.clr_int
);
841 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
842 work
|= mlx4_eq_int(dev
, &priv
->eq_table
.eq
[i
]);
844 return IRQ_RETVAL(work
);
847 static irqreturn_t
mlx4_msi_x_interrupt(int irq
, void *eq_ptr
)
849 struct mlx4_eq
*eq
= eq_ptr
;
850 struct mlx4_dev
*dev
= eq
->dev
;
852 mlx4_eq_int(dev
, eq
);
854 /* MSI-X vectors always belong to us */
858 int mlx4_MAP_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
859 struct mlx4_vhcr
*vhcr
,
860 struct mlx4_cmd_mailbox
*inbox
,
861 struct mlx4_cmd_mailbox
*outbox
,
862 struct mlx4_cmd_info
*cmd
)
864 struct mlx4_priv
*priv
= mlx4_priv(dev
);
865 struct mlx4_slave_event_eq_info
*event_eq
=
866 priv
->mfunc
.master
.slave_state
[slave
].event_eq
;
867 u32 in_modifier
= vhcr
->in_modifier
;
868 u32 eqn
= in_modifier
& 0x3FF;
869 u64 in_param
= vhcr
->in_param
;
873 if (slave
== dev
->caps
.function
)
874 err
= mlx4_cmd(dev
, in_param
, (in_modifier
& 0x80000000) | eqn
,
875 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
878 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
)
879 if (in_param
& (1LL << i
))
880 event_eq
[i
].eqn
= in_modifier
>> 31 ? -1 : eqn
;
885 static int mlx4_MAP_EQ(struct mlx4_dev
*dev
, u64 event_mask
, int unmap
,
888 return mlx4_cmd(dev
, event_mask
, (unmap
<< 31) | eq_num
,
889 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
893 static int mlx4_SW2HW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
896 return mlx4_cmd(dev
, mailbox
->dma
, eq_num
, 0,
897 MLX4_CMD_SW2HW_EQ
, MLX4_CMD_TIME_CLASS_A
,
901 static int mlx4_HW2SW_EQ(struct mlx4_dev
*dev
, int eq_num
)
903 return mlx4_cmd(dev
, 0, eq_num
, 1, MLX4_CMD_HW2SW_EQ
,
904 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
907 static int mlx4_num_eq_uar(struct mlx4_dev
*dev
)
910 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
911 * we need to map, take the difference of highest index and
912 * the lowest index we'll use and add 1.
914 return (dev
->caps
.num_comp_vectors
+ 1 + dev
->caps
.reserved_eqs
) / 4 -
915 dev
->caps
.reserved_eqs
/ 4 + 1;
918 static void __iomem
*mlx4_get_eq_uar(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
920 struct mlx4_priv
*priv
= mlx4_priv(dev
);
923 index
= eq
->eqn
/ 4 - dev
->caps
.reserved_eqs
/ 4;
925 if (!priv
->eq_table
.uar_map
[index
]) {
926 priv
->eq_table
.uar_map
[index
] =
927 ioremap(pci_resource_start(dev
->persist
->pdev
, 2) +
928 ((eq
->eqn
/ 4) << PAGE_SHIFT
),
930 if (!priv
->eq_table
.uar_map
[index
]) {
931 mlx4_err(dev
, "Couldn't map EQ doorbell for EQN 0x%06x\n",
937 return priv
->eq_table
.uar_map
[index
] + 0x800 + 8 * (eq
->eqn
% 4);
940 static void mlx4_unmap_uar(struct mlx4_dev
*dev
)
942 struct mlx4_priv
*priv
= mlx4_priv(dev
);
945 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
946 if (priv
->eq_table
.uar_map
[i
]) {
947 iounmap(priv
->eq_table
.uar_map
[i
]);
948 priv
->eq_table
.uar_map
[i
] = NULL
;
952 static int mlx4_create_eq(struct mlx4_dev
*dev
, int nent
,
953 u8 intr
, struct mlx4_eq
*eq
)
955 struct mlx4_priv
*priv
= mlx4_priv(dev
);
956 struct mlx4_cmd_mailbox
*mailbox
;
957 struct mlx4_eq_context
*eq_context
;
959 u64
*dma_list
= NULL
;
966 eq
->nent
= roundup_pow_of_two(max(nent
, 2));
967 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
968 * strides of 64B,128B and 256B.
970 npages
= PAGE_ALIGN(eq
->nent
* dev
->caps
.eqe_size
) / PAGE_SIZE
;
972 eq
->page_list
= kmalloc(npages
* sizeof *eq
->page_list
,
977 for (i
= 0; i
< npages
; ++i
)
978 eq
->page_list
[i
].buf
= NULL
;
980 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
984 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
987 eq_context
= mailbox
->buf
;
989 for (i
= 0; i
< npages
; ++i
) {
990 eq
->page_list
[i
].buf
= dma_alloc_coherent(&dev
->persist
->
994 if (!eq
->page_list
[i
].buf
)
995 goto err_out_free_pages
;
998 eq
->page_list
[i
].map
= t
;
1000 memset(eq
->page_list
[i
].buf
, 0, PAGE_SIZE
);
1003 eq
->eqn
= mlx4_bitmap_alloc(&priv
->eq_table
.bitmap
);
1005 goto err_out_free_pages
;
1007 eq
->doorbell
= mlx4_get_eq_uar(dev
, eq
);
1008 if (!eq
->doorbell
) {
1010 goto err_out_free_eq
;
1013 err
= mlx4_mtt_init(dev
, npages
, PAGE_SHIFT
, &eq
->mtt
);
1015 goto err_out_free_eq
;
1017 err
= mlx4_write_mtt(dev
, &eq
->mtt
, 0, npages
, dma_list
);
1019 goto err_out_free_mtt
;
1021 eq_context
->flags
= cpu_to_be32(MLX4_EQ_STATUS_OK
|
1022 MLX4_EQ_STATE_ARMED
);
1023 eq_context
->log_eq_size
= ilog2(eq
->nent
);
1024 eq_context
->intr
= intr
;
1025 eq_context
->log_page_size
= PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
;
1027 mtt_addr
= mlx4_mtt_addr(dev
, &eq
->mtt
);
1028 eq_context
->mtt_base_addr_h
= mtt_addr
>> 32;
1029 eq_context
->mtt_base_addr_l
= cpu_to_be32(mtt_addr
& 0xffffffff);
1031 err
= mlx4_SW2HW_EQ(dev
, mailbox
, eq
->eqn
);
1033 mlx4_warn(dev
, "SW2HW_EQ failed (%d)\n", err
);
1034 goto err_out_free_mtt
;
1038 mlx4_free_cmd_mailbox(dev
, mailbox
);
1042 INIT_LIST_HEAD(&eq
->tasklet_ctx
.list
);
1043 INIT_LIST_HEAD(&eq
->tasklet_ctx
.process_list
);
1044 spin_lock_init(&eq
->tasklet_ctx
.lock
);
1045 tasklet_init(&eq
->tasklet_ctx
.task
, mlx4_cq_tasklet_cb
,
1046 (unsigned long)&eq
->tasklet_ctx
);
1051 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
1054 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
, MLX4_USE_RR
);
1057 for (i
= 0; i
< npages
; ++i
)
1058 if (eq
->page_list
[i
].buf
)
1059 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
1060 eq
->page_list
[i
].buf
,
1061 eq
->page_list
[i
].map
);
1063 mlx4_free_cmd_mailbox(dev
, mailbox
);
1066 kfree(eq
->page_list
);
1073 static void mlx4_free_eq(struct mlx4_dev
*dev
,
1076 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1079 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
1080 * strides of 64B,128B and 256B
1082 int npages
= PAGE_ALIGN(dev
->caps
.eqe_size
* eq
->nent
) / PAGE_SIZE
;
1084 err
= mlx4_HW2SW_EQ(dev
, eq
->eqn
);
1086 mlx4_warn(dev
, "HW2SW_EQ failed (%d)\n", err
);
1088 synchronize_irq(eq
->irq
);
1089 tasklet_disable(&eq
->tasklet_ctx
.task
);
1091 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
1092 for (i
= 0; i
< npages
; ++i
)
1093 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
1094 eq
->page_list
[i
].buf
,
1095 eq
->page_list
[i
].map
);
1097 kfree(eq
->page_list
);
1098 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
, MLX4_USE_RR
);
1101 static void mlx4_free_irqs(struct mlx4_dev
*dev
)
1103 struct mlx4_eq_table
*eq_table
= &mlx4_priv(dev
)->eq_table
;
1106 if (eq_table
->have_irq
)
1107 free_irq(dev
->persist
->pdev
->irq
, dev
);
1109 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
1110 if (eq_table
->eq
[i
].have_irq
) {
1111 free_cpumask_var(eq_table
->eq
[i
].affinity_mask
);
1112 #if defined(CONFIG_SMP)
1113 irq_set_affinity_hint(eq_table
->eq
[i
].irq
, NULL
);
1115 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
1116 eq_table
->eq
[i
].have_irq
= 0;
1119 kfree(eq_table
->irq_names
);
1122 static int mlx4_map_clr_int(struct mlx4_dev
*dev
)
1124 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1126 priv
->clr_base
= ioremap(pci_resource_start(dev
->persist
->pdev
,
1127 priv
->fw
.clr_int_bar
) +
1128 priv
->fw
.clr_int_base
, MLX4_CLR_INT_SIZE
);
1129 if (!priv
->clr_base
) {
1130 mlx4_err(dev
, "Couldn't map interrupt clear register, aborting\n");
1137 static void mlx4_unmap_clr_int(struct mlx4_dev
*dev
)
1139 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1141 iounmap(priv
->clr_base
);
1144 int mlx4_alloc_eq_table(struct mlx4_dev
*dev
)
1146 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1148 priv
->eq_table
.eq
= kcalloc(dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
1149 sizeof *priv
->eq_table
.eq
, GFP_KERNEL
);
1150 if (!priv
->eq_table
.eq
)
1156 void mlx4_free_eq_table(struct mlx4_dev
*dev
)
1158 kfree(mlx4_priv(dev
)->eq_table
.eq
);
1161 int mlx4_init_eq_table(struct mlx4_dev
*dev
)
1163 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1167 priv
->eq_table
.uar_map
= kcalloc(mlx4_num_eq_uar(dev
),
1168 sizeof *priv
->eq_table
.uar_map
,
1170 if (!priv
->eq_table
.uar_map
) {
1175 err
= mlx4_bitmap_init(&priv
->eq_table
.bitmap
,
1176 roundup_pow_of_two(dev
->caps
.num_eqs
),
1177 dev
->caps
.num_eqs
- 1,
1178 dev
->caps
.reserved_eqs
,
1179 roundup_pow_of_two(dev
->caps
.num_eqs
) -
1184 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
1185 priv
->eq_table
.uar_map
[i
] = NULL
;
1187 if (!mlx4_is_slave(dev
)) {
1188 err
= mlx4_map_clr_int(dev
);
1190 goto err_out_bitmap
;
1192 priv
->eq_table
.clr_mask
=
1193 swab32(1 << (priv
->eq_table
.inta_pin
& 31));
1194 priv
->eq_table
.clr_int
= priv
->clr_base
+
1195 (priv
->eq_table
.inta_pin
< 32 ? 4 : 0);
1198 priv
->eq_table
.irq_names
=
1199 kmalloc(MLX4_IRQNAME_SIZE
* (dev
->caps
.num_comp_vectors
+ 1),
1201 if (!priv
->eq_table
.irq_names
) {
1203 goto err_out_clr_int
;
1206 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
) {
1207 if (i
== MLX4_EQ_ASYNC
) {
1208 err
= mlx4_create_eq(dev
,
1209 MLX4_NUM_ASYNC_EQE
+ MLX4_NUM_SPARE_EQE
,
1210 0, &priv
->eq_table
.eq
[MLX4_EQ_ASYNC
]);
1212 struct mlx4_eq
*eq
= &priv
->eq_table
.eq
[i
];
1213 #ifdef CONFIG_RFS_ACCEL
1214 int port
= find_first_bit(eq
->actv_ports
.ports
,
1215 dev
->caps
.num_ports
) + 1;
1217 if (port
<= dev
->caps
.num_ports
) {
1218 struct mlx4_port_info
*info
=
1219 &mlx4_priv(dev
)->port
[port
];
1222 info
->rmap
= alloc_irq_cpu_rmap(
1223 mlx4_get_eqs_per_port(dev
, port
));
1225 mlx4_warn(dev
, "Failed to allocate cpu rmap\n");
1231 err
= irq_cpu_rmap_add(
1232 info
->rmap
, eq
->irq
);
1234 mlx4_warn(dev
, "Failed adding irq rmap\n");
1237 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
1238 dev
->caps
.reserved_cqs
+
1240 (dev
->flags
& MLX4_FLAG_MSI_X
) ?
1241 i
+ 1 - !!(i
> MLX4_EQ_ASYNC
) : 0,
1248 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1249 const char *eq_name
;
1251 snprintf(priv
->eq_table
.irq_names
+
1252 MLX4_EQ_ASYNC
* MLX4_IRQNAME_SIZE
,
1254 "mlx4-async@pci:%s",
1255 pci_name(dev
->persist
->pdev
));
1256 eq_name
= priv
->eq_table
.irq_names
+
1257 MLX4_EQ_ASYNC
* MLX4_IRQNAME_SIZE
;
1259 err
= request_irq(priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
,
1260 mlx4_msi_x_interrupt
, 0, eq_name
,
1261 priv
->eq_table
.eq
+ MLX4_EQ_ASYNC
);
1265 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].have_irq
= 1;
1267 snprintf(priv
->eq_table
.irq_names
,
1270 pci_name(dev
->persist
->pdev
));
1271 err
= request_irq(dev
->persist
->pdev
->irq
, mlx4_interrupt
,
1272 IRQF_SHARED
, priv
->eq_table
.irq_names
, dev
);
1276 priv
->eq_table
.have_irq
= 1;
1279 err
= mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1280 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].eqn
);
1282 mlx4_warn(dev
, "MAP_EQ for async EQ %d failed (%d)\n",
1283 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].eqn
, err
);
1286 eq_set_ci(&priv
->eq_table
.eq
[MLX4_EQ_ASYNC
], 1);
1292 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
--]);
1293 #ifdef CONFIG_RFS_ACCEL
1294 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1295 if (mlx4_priv(dev
)->port
[i
].rmap
) {
1296 free_irq_cpu_rmap(mlx4_priv(dev
)->port
[i
].rmap
);
1297 mlx4_priv(dev
)->port
[i
].rmap
= NULL
;
1301 mlx4_free_irqs(dev
);
1304 if (!mlx4_is_slave(dev
))
1305 mlx4_unmap_clr_int(dev
);
1308 mlx4_unmap_uar(dev
);
1309 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
1312 kfree(priv
->eq_table
.uar_map
);
1317 void mlx4_cleanup_eq_table(struct mlx4_dev
*dev
)
1319 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1322 mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 1,
1323 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].eqn
);
1325 #ifdef CONFIG_RFS_ACCEL
1326 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1327 if (mlx4_priv(dev
)->port
[i
].rmap
) {
1328 free_irq_cpu_rmap(mlx4_priv(dev
)->port
[i
].rmap
);
1329 mlx4_priv(dev
)->port
[i
].rmap
= NULL
;
1333 mlx4_free_irqs(dev
);
1335 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
1336 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
1338 if (!mlx4_is_slave(dev
))
1339 mlx4_unmap_clr_int(dev
);
1341 mlx4_unmap_uar(dev
);
1342 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
1344 kfree(priv
->eq_table
.uar_map
);
1347 /* A test that verifies that we can accept interrupts on all
1348 * the irq vectors of the device.
1349 * Interrupts are checked using the NOP command.
1351 int mlx4_test_interrupts(struct mlx4_dev
*dev
)
1353 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1357 err
= mlx4_NOP(dev
);
1358 /* When not in MSI_X, there is only one irq to check */
1359 if (!(dev
->flags
& MLX4_FLAG_MSI_X
) || mlx4_is_slave(dev
))
1362 /* A loop over all completion vectors, for each vector we will check
1363 * whether it works by mapping command completions to that vector
1364 * and performing a NOP command
1366 for(i
= 0; !err
&& (i
< dev
->caps
.num_comp_vectors
); ++i
) {
1367 /* Make sure request_irq was called */
1368 if (!priv
->eq_table
.eq
[i
].have_irq
)
1371 /* Temporary use polling for command completions */
1372 mlx4_cmd_use_polling(dev
);
1374 /* Map the new eq to handle all asynchronous events */
1375 err
= mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1376 priv
->eq_table
.eq
[i
].eqn
);
1378 mlx4_warn(dev
, "Failed mapping eq for interrupt test\n");
1379 mlx4_cmd_use_events(dev
);
1383 /* Go back to using events */
1384 mlx4_cmd_use_events(dev
);
1385 err
= mlx4_NOP(dev
);
1388 /* Return to default */
1389 mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1390 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].eqn
);
1393 EXPORT_SYMBOL(mlx4_test_interrupts
);
1395 bool mlx4_is_eq_vector_valid(struct mlx4_dev
*dev
, u8 port
, int vector
)
1397 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1399 vector
= MLX4_CQ_TO_EQ_VECTOR(vector
);
1400 if (vector
< 0 || (vector
>= dev
->caps
.num_comp_vectors
+ 1) ||
1401 (vector
== MLX4_EQ_ASYNC
))
1404 return test_bit(port
- 1, priv
->eq_table
.eq
[vector
].actv_ports
.ports
);
1406 EXPORT_SYMBOL(mlx4_is_eq_vector_valid
);
1408 u32
mlx4_get_eqs_per_port(struct mlx4_dev
*dev
, u8 port
)
1410 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1412 unsigned int sum
= 0;
1414 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; i
++)
1415 sum
+= !!test_bit(port
- 1,
1416 priv
->eq_table
.eq
[i
].actv_ports
.ports
);
1420 EXPORT_SYMBOL(mlx4_get_eqs_per_port
);
1422 int mlx4_is_eq_shared(struct mlx4_dev
*dev
, int vector
)
1424 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1426 vector
= MLX4_CQ_TO_EQ_VECTOR(vector
);
1427 if (vector
<= 0 || (vector
>= dev
->caps
.num_comp_vectors
+ 1))
1430 return !!(bitmap_weight(priv
->eq_table
.eq
[vector
].actv_ports
.ports
,
1431 dev
->caps
.num_ports
) > 1);
1433 EXPORT_SYMBOL(mlx4_is_eq_shared
);
1435 struct cpu_rmap
*mlx4_get_cpu_rmap(struct mlx4_dev
*dev
, int port
)
1437 return mlx4_priv(dev
)->port
[port
].rmap
;
1439 EXPORT_SYMBOL(mlx4_get_cpu_rmap
);
1441 int mlx4_assign_eq(struct mlx4_dev
*dev
, u8 port
, int *vector
)
1443 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1445 u32 min_ref_count_val
= (u32
)-1;
1446 int requested_vector
= MLX4_CQ_TO_EQ_VECTOR(*vector
);
1447 int *prequested_vector
= NULL
;
1450 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1451 if (requested_vector
< (dev
->caps
.num_comp_vectors
+ 1) &&
1452 (requested_vector
>= 0) &&
1453 (requested_vector
!= MLX4_EQ_ASYNC
)) {
1454 if (test_bit(port
- 1,
1455 priv
->eq_table
.eq
[requested_vector
].actv_ports
.ports
)) {
1456 prequested_vector
= &requested_vector
;
1460 for (i
= 1; i
< port
;
1461 requested_vector
+= mlx4_get_eqs_per_port(dev
, i
++))
1464 eq
= &priv
->eq_table
.eq
[requested_vector
];
1465 if (requested_vector
< dev
->caps
.num_comp_vectors
+ 1 &&
1466 test_bit(port
- 1, eq
->actv_ports
.ports
)) {
1467 prequested_vector
= &requested_vector
;
1472 if (!prequested_vector
) {
1473 requested_vector
= -1;
1474 for (i
= 0; min_ref_count_val
&& i
< dev
->caps
.num_comp_vectors
+ 1;
1476 struct mlx4_eq
*eq
= &priv
->eq_table
.eq
[i
];
1478 if (min_ref_count_val
> eq
->ref_count
&&
1479 test_bit(port
- 1, eq
->actv_ports
.ports
)) {
1480 min_ref_count_val
= eq
->ref_count
;
1481 requested_vector
= i
;
1485 if (requested_vector
< 0) {
1490 prequested_vector
= &requested_vector
;
1493 if (!test_bit(*prequested_vector
, priv
->msix_ctl
.pool_bm
) &&
1494 dev
->flags
& MLX4_FLAG_MSI_X
) {
1495 set_bit(*prequested_vector
, priv
->msix_ctl
.pool_bm
);
1496 snprintf(priv
->eq_table
.irq_names
+
1497 *prequested_vector
* MLX4_IRQNAME_SIZE
,
1498 MLX4_IRQNAME_SIZE
, "mlx4-%d@%s",
1499 *prequested_vector
, dev_name(&dev
->persist
->pdev
->dev
));
1501 err
= request_irq(priv
->eq_table
.eq
[*prequested_vector
].irq
,
1502 mlx4_msi_x_interrupt
, 0,
1503 &priv
->eq_table
.irq_names
[*prequested_vector
<< 5],
1504 priv
->eq_table
.eq
+ *prequested_vector
);
1507 clear_bit(*prequested_vector
, priv
->msix_ctl
.pool_bm
);
1508 *prequested_vector
= -1;
1510 #if defined(CONFIG_SMP)
1511 mlx4_set_eq_affinity_hint(priv
, *prequested_vector
);
1513 eq_set_ci(&priv
->eq_table
.eq
[*prequested_vector
], 1);
1514 priv
->eq_table
.eq
[*prequested_vector
].have_irq
= 1;
1518 if (!err
&& *prequested_vector
>= 0)
1519 priv
->eq_table
.eq
[*prequested_vector
].ref_count
++;
1522 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1524 if (!err
&& *prequested_vector
>= 0)
1525 *vector
= MLX4_EQ_TO_CQ_VECTOR(*prequested_vector
);
1531 EXPORT_SYMBOL(mlx4_assign_eq
);
1533 int mlx4_eq_get_irq(struct mlx4_dev
*dev
, int cq_vec
)
1535 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1537 return priv
->eq_table
.eq
[MLX4_CQ_TO_EQ_VECTOR(cq_vec
)].irq
;
1539 EXPORT_SYMBOL(mlx4_eq_get_irq
);
1541 void mlx4_release_eq(struct mlx4_dev
*dev
, int vec
)
1543 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1544 int eq_vec
= MLX4_CQ_TO_EQ_VECTOR(vec
);
1546 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1547 priv
->eq_table
.eq
[eq_vec
].ref_count
--;
1549 /* once we allocated EQ, we don't release it because it might be binded
1552 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1554 EXPORT_SYMBOL(mlx4_release_eq
);