2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/cpu_rmap.h>
48 MLX4_IRQNAME_SIZE
= 32
52 MLX4_NUM_ASYNC_EQE
= 0x100,
53 MLX4_NUM_SPARE_EQE
= 0x80,
54 MLX4_EQ_ENTRY_SIZE
= 0x20
57 #define MLX4_EQ_STATUS_OK ( 0 << 28)
58 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
59 #define MLX4_EQ_OWNER_SW ( 0 << 24)
60 #define MLX4_EQ_OWNER_HW ( 1 << 24)
61 #define MLX4_EQ_FLAG_EC ( 1 << 18)
62 #define MLX4_EQ_FLAG_OI ( 1 << 17)
63 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
64 #define MLX4_EQ_STATE_FIRED (10 << 8)
65 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
67 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
68 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
69 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
70 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
73 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
76 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
77 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
81 (1ull << MLX4_EVENT_TYPE_CMD) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
86 static u64
get_async_ev_mask(struct mlx4_dev
*dev
)
88 u64 async_ev_mask
= MLX4_ASYNC_EVENT_MASK
;
89 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
)
90 async_ev_mask
|= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
);
95 static void eq_set_ci(struct mlx4_eq
*eq
, int req_not
)
97 __raw_writel((__force u32
) cpu_to_be32((eq
->cons_index
& 0xffffff) |
100 /* We still want ordering, just not swabbing, so add a barrier */
104 static struct mlx4_eqe
*get_eqe(struct mlx4_eq
*eq
, u32 entry
)
106 unsigned long off
= (entry
& (eq
->nent
- 1)) * MLX4_EQ_ENTRY_SIZE
;
107 return eq
->page_list
[off
/ PAGE_SIZE
].buf
+ off
% PAGE_SIZE
;
110 static struct mlx4_eqe
*next_eqe_sw(struct mlx4_eq
*eq
)
112 struct mlx4_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
);
113 return !!(eqe
->owner
& 0x80) ^ !!(eq
->cons_index
& eq
->nent
) ? NULL
: eqe
;
116 static struct mlx4_eqe
*next_slave_event_eqe(struct mlx4_slave_event_eq
*slave_eq
)
118 struct mlx4_eqe
*eqe
=
119 &slave_eq
->event_eqe
[slave_eq
->cons
& (SLAVE_EVENT_EQ_SIZE
- 1)];
120 return (!!(eqe
->owner
& 0x80) ^
121 !!(slave_eq
->cons
& SLAVE_EVENT_EQ_SIZE
)) ?
125 void mlx4_gen_slave_eqe(struct work_struct
*work
)
127 struct mlx4_mfunc_master_ctx
*master
=
128 container_of(work
, struct mlx4_mfunc_master_ctx
,
130 struct mlx4_mfunc
*mfunc
=
131 container_of(master
, struct mlx4_mfunc
, master
);
132 struct mlx4_priv
*priv
= container_of(mfunc
, struct mlx4_priv
, mfunc
);
133 struct mlx4_dev
*dev
= &priv
->dev
;
134 struct mlx4_slave_event_eq
*slave_eq
= &mfunc
->master
.slave_eq
;
135 struct mlx4_eqe
*eqe
;
139 for (eqe
= next_slave_event_eqe(slave_eq
); eqe
;
140 eqe
= next_slave_event_eqe(slave_eq
)) {
141 slave
= eqe
->slave_id
;
143 /* All active slaves need to receive the event */
144 if (slave
== ALL_SLAVES
) {
145 for (i
= 0; i
< dev
->num_slaves
; i
++) {
146 if (i
!= dev
->caps
.function
&&
147 master
->slave_state
[i
].active
)
148 if (mlx4_GEN_EQE(dev
, i
, eqe
))
149 mlx4_warn(dev
, "Failed to "
151 "for slave %d\n", i
);
154 if (mlx4_GEN_EQE(dev
, slave
, eqe
))
155 mlx4_warn(dev
, "Failed to generate event "
156 "for slave %d\n", slave
);
163 static void slave_event(struct mlx4_dev
*dev
, u8 slave
, struct mlx4_eqe
*eqe
)
165 struct mlx4_priv
*priv
= mlx4_priv(dev
);
166 struct mlx4_slave_event_eq
*slave_eq
= &priv
->mfunc
.master
.slave_eq
;
167 struct mlx4_eqe
*s_eqe
;
170 spin_lock_irqsave(&slave_eq
->event_lock
, flags
);
171 s_eqe
= &slave_eq
->event_eqe
[slave_eq
->prod
& (SLAVE_EVENT_EQ_SIZE
- 1)];
172 if ((!!(s_eqe
->owner
& 0x80)) ^
173 (!!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
))) {
174 mlx4_warn(dev
, "Master failed to generate an EQE for slave: %d. "
175 "No free EQE on slave events queue\n", slave
);
176 spin_unlock_irqrestore(&slave_eq
->event_lock
, flags
);
180 memcpy(s_eqe
, eqe
, sizeof(struct mlx4_eqe
) - 1);
181 s_eqe
->slave_id
= slave
;
182 /* ensure all information is written before setting the ownersip bit */
184 s_eqe
->owner
= !!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
) ? 0x0 : 0x80;
187 queue_work(priv
->mfunc
.master
.comm_wq
,
188 &priv
->mfunc
.master
.slave_event_work
);
189 spin_unlock_irqrestore(&slave_eq
->event_lock
, flags
);
192 static void mlx4_slave_event(struct mlx4_dev
*dev
, int slave
,
193 struct mlx4_eqe
*eqe
)
195 struct mlx4_priv
*priv
= mlx4_priv(dev
);
196 struct mlx4_slave_state
*s_slave
=
197 &priv
->mfunc
.master
.slave_state
[slave
];
199 if (!s_slave
->active
) {
200 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
204 slave_event(dev
, slave
, eqe
);
207 int mlx4_gen_pkey_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
)
211 struct mlx4_priv
*priv
= mlx4_priv(dev
);
212 struct mlx4_slave_state
*s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
214 if (!s_slave
->active
)
217 memset(&eqe
, 0, sizeof eqe
);
219 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
220 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE
;
221 eqe
.event
.port_mgmt_change
.port
= port
;
223 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
225 EXPORT_SYMBOL(mlx4_gen_pkey_eqe
);
227 int mlx4_gen_guid_change_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
)
231 /*don't send if we don't have the that slave */
232 if (dev
->num_vfs
< slave
)
234 memset(&eqe
, 0, sizeof eqe
);
236 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
237 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_GUID_INFO
;
238 eqe
.event
.port_mgmt_change
.port
= port
;
240 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
242 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe
);
244 int mlx4_gen_port_state_change_eqe(struct mlx4_dev
*dev
, int slave
, u8 port
,
245 u8 port_subtype_change
)
249 /*don't send if we don't have the that slave */
250 if (dev
->num_vfs
< slave
)
252 memset(&eqe
, 0, sizeof eqe
);
254 eqe
.type
= MLX4_EVENT_TYPE_PORT_CHANGE
;
255 eqe
.subtype
= port_subtype_change
;
256 eqe
.event
.port_change
.port
= cpu_to_be32(port
<< 28);
258 mlx4_dbg(dev
, "%s: sending: %d to slave: %d on port: %d\n", __func__
,
259 port_subtype_change
, slave
, port
);
260 return mlx4_GEN_EQE(dev
, slave
, &eqe
);
262 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe
);
264 enum slave_port_state
mlx4_get_slave_port_state(struct mlx4_dev
*dev
, int slave
, u8 port
)
266 struct mlx4_priv
*priv
= mlx4_priv(dev
);
267 struct mlx4_slave_state
*s_state
= priv
->mfunc
.master
.slave_state
;
268 if (slave
>= dev
->num_slaves
|| port
> MLX4_MAX_PORTS
) {
269 pr_err("%s: Error: asking for slave:%d, port:%d\n",
270 __func__
, slave
, port
);
271 return SLAVE_PORT_DOWN
;
273 return s_state
[slave
].port_state
[port
];
275 EXPORT_SYMBOL(mlx4_get_slave_port_state
);
277 static int mlx4_set_slave_port_state(struct mlx4_dev
*dev
, int slave
, u8 port
,
278 enum slave_port_state state
)
280 struct mlx4_priv
*priv
= mlx4_priv(dev
);
281 struct mlx4_slave_state
*s_state
= priv
->mfunc
.master
.slave_state
;
283 if (slave
>= dev
->num_slaves
|| port
> MLX4_MAX_PORTS
|| port
== 0) {
284 pr_err("%s: Error: asking for slave:%d, port:%d\n",
285 __func__
, slave
, port
);
288 s_state
[slave
].port_state
[port
] = state
;
293 static void set_all_slave_state(struct mlx4_dev
*dev
, u8 port
, int event
)
296 enum slave_port_gen_event gen_event
;
298 for (i
= 0; i
< dev
->num_slaves
; i
++)
299 set_and_calc_slave_port_state(dev
, i
, port
, event
, &gen_event
);
301 /**************************************************************************
302 The function get as input the new event to that port,
303 and according to the prev state change the slave's port state.
305 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
306 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
307 MLX4_PORT_STATE_IB_EVENT_GID_VALID
308 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
309 ***************************************************************************/
310 int set_and_calc_slave_port_state(struct mlx4_dev
*dev
, int slave
,
312 enum slave_port_gen_event
*gen_event
)
314 struct mlx4_priv
*priv
= mlx4_priv(dev
);
315 struct mlx4_slave_state
*ctx
= NULL
;
318 enum slave_port_state cur_state
=
319 mlx4_get_slave_port_state(dev
, slave
, port
);
321 *gen_event
= SLAVE_PORT_GEN_EVENT_NONE
;
323 if (slave
>= dev
->num_slaves
|| port
> MLX4_MAX_PORTS
|| port
== 0) {
324 pr_err("%s: Error: asking for slave:%d, port:%d\n",
325 __func__
, slave
, port
);
329 ctx
= &priv
->mfunc
.master
.slave_state
[slave
];
330 spin_lock_irqsave(&ctx
->lock
, flags
);
333 case SLAVE_PORT_DOWN
:
334 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP
== event
)
335 mlx4_set_slave_port_state(dev
, slave
, port
,
338 case SLAVE_PENDING_UP
:
339 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
== event
)
340 mlx4_set_slave_port_state(dev
, slave
, port
,
342 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID
== event
) {
343 mlx4_set_slave_port_state(dev
, slave
, port
,
345 *gen_event
= SLAVE_PORT_GEN_EVENT_UP
;
349 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
== event
) {
350 mlx4_set_slave_port_state(dev
, slave
, port
,
352 *gen_event
= SLAVE_PORT_GEN_EVENT_DOWN
;
353 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID
==
355 mlx4_set_slave_port_state(dev
, slave
, port
,
357 *gen_event
= SLAVE_PORT_GEN_EVENT_DOWN
;
361 pr_err("%s: BUG!!! UNKNOWN state: "
362 "slave:%d, port:%d\n", __func__
, slave
, port
);
365 ret
= mlx4_get_slave_port_state(dev
, slave
, port
);
368 spin_unlock_irqrestore(&ctx
->lock
, flags
);
372 EXPORT_SYMBOL(set_and_calc_slave_port_state
);
374 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev
*dev
, u8 port
, int attr
)
378 memset(&eqe
, 0, sizeof eqe
);
380 eqe
.type
= MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
;
381 eqe
.subtype
= MLX4_DEV_PMC_SUBTYPE_PORT_INFO
;
382 eqe
.event
.port_mgmt_change
.port
= port
;
383 eqe
.event
.port_mgmt_change
.params
.port_info
.changed_attr
=
384 cpu_to_be32((u32
) attr
);
386 slave_event(dev
, ALL_SLAVES
, &eqe
);
389 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev
);
391 void mlx4_master_handle_slave_flr(struct work_struct
*work
)
393 struct mlx4_mfunc_master_ctx
*master
=
394 container_of(work
, struct mlx4_mfunc_master_ctx
,
395 slave_flr_event_work
);
396 struct mlx4_mfunc
*mfunc
=
397 container_of(master
, struct mlx4_mfunc
, master
);
398 struct mlx4_priv
*priv
=
399 container_of(mfunc
, struct mlx4_priv
, mfunc
);
400 struct mlx4_dev
*dev
= &priv
->dev
;
401 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
405 mlx4_dbg(dev
, "mlx4_handle_slave_flr\n");
407 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
409 if (MLX4_COMM_CMD_FLR
== slave_state
[i
].last_cmd
) {
410 mlx4_dbg(dev
, "mlx4_handle_slave_flr: "
411 "clean slave: %d\n", i
);
413 mlx4_delete_all_resources_for_slave(dev
, i
);
414 /*return the slave to running mode*/
415 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
416 slave_state
[i
].last_cmd
= MLX4_COMM_CMD_RESET
;
417 slave_state
[i
].is_slave_going_down
= 0;
418 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
420 err
= mlx4_cmd(dev
, 0, i
, 0, MLX4_CMD_INFORM_FLR_DONE
,
421 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
423 mlx4_warn(dev
, "Failed to notify FW on "
424 "FLR done (slave:%d)\n", i
);
429 static int mlx4_eq_int(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
431 struct mlx4_priv
*priv
= mlx4_priv(dev
);
432 struct mlx4_eqe
*eqe
;
440 u8 update_slave_state
;
442 enum slave_port_gen_event gen_event
;
444 while ((eqe
= next_eqe_sw(eq
))) {
446 * Make sure we read EQ entry contents after we've
447 * checked the ownership bit.
452 case MLX4_EVENT_TYPE_COMP
:
453 cqn
= be32_to_cpu(eqe
->event
.comp
.cqn
) & 0xffffff;
454 mlx4_cq_completion(dev
, cqn
);
457 case MLX4_EVENT_TYPE_PATH_MIG
:
458 case MLX4_EVENT_TYPE_COMM_EST
:
459 case MLX4_EVENT_TYPE_SQ_DRAINED
:
460 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
461 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
462 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
463 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
464 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
465 mlx4_dbg(dev
, "event %d arrived\n", eqe
->type
);
466 if (mlx4_is_master(dev
)) {
467 /* forward only to slave owning the QP */
468 ret
= mlx4_get_slave_from_resource_id(dev
,
470 be32_to_cpu(eqe
->event
.qp
.qpn
)
472 if (ret
&& ret
!= -ENOENT
) {
473 mlx4_dbg(dev
, "QP event %02x(%02x) on "
474 "EQ %d at index %u: could "
475 "not get slave id (%d)\n",
476 eqe
->type
, eqe
->subtype
,
477 eq
->eqn
, eq
->cons_index
, ret
);
481 if (!ret
&& slave
!= dev
->caps
.function
) {
482 mlx4_slave_event(dev
, slave
, eqe
);
487 mlx4_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) &
488 0xffffff, eqe
->type
);
491 case MLX4_EVENT_TYPE_SRQ_LIMIT
:
492 mlx4_warn(dev
, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
494 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR
:
495 if (mlx4_is_master(dev
)) {
496 /* forward only to slave owning the SRQ */
497 ret
= mlx4_get_slave_from_resource_id(dev
,
499 be32_to_cpu(eqe
->event
.srq
.srqn
)
502 if (ret
&& ret
!= -ENOENT
) {
503 mlx4_warn(dev
, "SRQ event %02x(%02x) "
504 "on EQ %d at index %u: could"
505 " not get slave id (%d)\n",
506 eqe
->type
, eqe
->subtype
,
507 eq
->eqn
, eq
->cons_index
, ret
);
510 mlx4_warn(dev
, "%s: slave:%d, srq_no:0x%x,"
511 " event: %02x(%02x)\n", __func__
,
513 be32_to_cpu(eqe
->event
.srq
.srqn
),
514 eqe
->type
, eqe
->subtype
);
516 if (!ret
&& slave
!= dev
->caps
.function
) {
517 mlx4_warn(dev
, "%s: sending event "
518 "%02x(%02x) to slave:%d\n",
520 eqe
->subtype
, slave
);
521 mlx4_slave_event(dev
, slave
, eqe
);
525 mlx4_srq_event(dev
, be32_to_cpu(eqe
->event
.srq
.srqn
) &
526 0xffffff, eqe
->type
);
529 case MLX4_EVENT_TYPE_CMD
:
531 be16_to_cpu(eqe
->event
.cmd
.token
),
532 eqe
->event
.cmd
.status
,
533 be64_to_cpu(eqe
->event
.cmd
.out_param
));
536 case MLX4_EVENT_TYPE_PORT_CHANGE
:
537 port
= be32_to_cpu(eqe
->event
.port_change
.port
) >> 28;
538 if (eqe
->subtype
== MLX4_PORT_CHANGE_SUBTYPE_DOWN
) {
539 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_DOWN
,
541 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 1;
542 if (!mlx4_is_master(dev
))
544 for (i
= 0; i
< dev
->num_slaves
; i
++) {
545 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
) {
546 if (i
== mlx4_master_func_num(dev
))
548 mlx4_dbg(dev
, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
549 " to slave: %d, port:%d\n",
551 mlx4_slave_event(dev
, i
, eqe
);
552 } else { /* IB port */
553 set_and_calc_slave_port_state(dev
, i
, port
,
554 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
,
556 /*we can be in pending state, then do not send port_down event*/
557 if (SLAVE_PORT_GEN_EVENT_DOWN
== gen_event
) {
558 if (i
== mlx4_master_func_num(dev
))
560 mlx4_slave_event(dev
, i
, eqe
);
565 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_UP
, port
);
567 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 0;
569 if (!mlx4_is_master(dev
))
571 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
572 for (i
= 0; i
< dev
->num_slaves
; i
++) {
573 if (i
== mlx4_master_func_num(dev
))
575 mlx4_slave_event(dev
, i
, eqe
);
578 /* port-up event will be sent to a slave when the
579 * slave's alias-guid is set. This is done in alias_GUID.c
581 set_all_slave_state(dev
, port
, MLX4_DEV_EVENT_PORT_UP
);
585 case MLX4_EVENT_TYPE_CQ_ERROR
:
586 mlx4_warn(dev
, "CQ %s on CQN %06x\n",
587 eqe
->event
.cq_err
.syndrome
== 1 ?
588 "overrun" : "access violation",
589 be32_to_cpu(eqe
->event
.cq_err
.cqn
) & 0xffffff);
590 if (mlx4_is_master(dev
)) {
591 ret
= mlx4_get_slave_from_resource_id(dev
,
593 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
595 if (ret
&& ret
!= -ENOENT
) {
596 mlx4_dbg(dev
, "CQ event %02x(%02x) on "
597 "EQ %d at index %u: could "
598 "not get slave id (%d)\n",
599 eqe
->type
, eqe
->subtype
,
600 eq
->eqn
, eq
->cons_index
, ret
);
604 if (!ret
&& slave
!= dev
->caps
.function
) {
605 mlx4_slave_event(dev
, slave
, eqe
);
610 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
615 case MLX4_EVENT_TYPE_EQ_OVERFLOW
:
616 mlx4_warn(dev
, "EQ overrun on EQN %d\n", eq
->eqn
);
619 case MLX4_EVENT_TYPE_COMM_CHANNEL
:
620 if (!mlx4_is_master(dev
)) {
621 mlx4_warn(dev
, "Received comm channel event "
622 "for non master device\n");
625 memcpy(&priv
->mfunc
.master
.comm_arm_bit_vector
,
626 eqe
->event
.comm_channel_arm
.bit_vec
,
627 sizeof eqe
->event
.comm_channel_arm
.bit_vec
);
628 queue_work(priv
->mfunc
.master
.comm_wq
,
629 &priv
->mfunc
.master
.comm_work
);
632 case MLX4_EVENT_TYPE_FLR_EVENT
:
633 flr_slave
= be32_to_cpu(eqe
->event
.flr_event
.slave_id
);
634 if (!mlx4_is_master(dev
)) {
635 mlx4_warn(dev
, "Non-master function received"
640 mlx4_dbg(dev
, "FLR event for slave: %d\n", flr_slave
);
642 if (flr_slave
>= dev
->num_slaves
) {
644 "Got FLR for unknown function: %d\n",
646 update_slave_state
= 0;
648 update_slave_state
= 1;
650 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
651 if (update_slave_state
) {
652 priv
->mfunc
.master
.slave_state
[flr_slave
].active
= false;
653 priv
->mfunc
.master
.slave_state
[flr_slave
].last_cmd
= MLX4_COMM_CMD_FLR
;
654 priv
->mfunc
.master
.slave_state
[flr_slave
].is_slave_going_down
= 1;
656 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
657 queue_work(priv
->mfunc
.master
.comm_wq
,
658 &priv
->mfunc
.master
.slave_flr_event_work
);
661 case MLX4_EVENT_TYPE_FATAL_WARNING
:
662 if (eqe
->subtype
== MLX4_FATAL_WARNING_SUBTYPE_WARMING
) {
663 if (mlx4_is_master(dev
))
664 for (i
= 0; i
< dev
->num_slaves
; i
++) {
665 mlx4_dbg(dev
, "%s: Sending "
666 "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
667 " to slave: %d\n", __func__
, i
);
668 if (i
== dev
->caps
.function
)
670 mlx4_slave_event(dev
, i
, eqe
);
672 mlx4_err(dev
, "Temperature Threshold was reached! "
673 "Threshold: %d celsius degrees; "
674 "Current Temperature: %d\n",
675 be16_to_cpu(eqe
->event
.warming
.warning_threshold
),
676 be16_to_cpu(eqe
->event
.warming
.current_temperature
));
678 mlx4_warn(dev
, "Unhandled event FATAL WARNING (%02x), "
679 "subtype %02x on EQ %d at index %u. owner=%x, "
680 "nent=0x%x, slave=%x, ownership=%s\n",
681 eqe
->type
, eqe
->subtype
, eq
->eqn
,
682 eq
->cons_index
, eqe
->owner
, eq
->nent
,
684 !!(eqe
->owner
& 0x80) ^
685 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
689 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
:
690 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_MGMT_CHANGE
,
691 (unsigned long) eqe
);
694 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR
:
695 case MLX4_EVENT_TYPE_ECC_DETECT
:
697 mlx4_warn(dev
, "Unhandled event %02x(%02x) on EQ %d at "
698 "index %u. owner=%x, nent=0x%x, slave=%x, "
700 eqe
->type
, eqe
->subtype
, eq
->eqn
,
701 eq
->cons_index
, eqe
->owner
, eq
->nent
,
703 !!(eqe
->owner
& 0x80) ^
704 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
713 * The HCA will think the queue has overflowed if we
714 * don't tell it we've been processing events. We
715 * create our EQs with MLX4_NUM_SPARE_EQE extra
716 * entries, so we must update our consumer index at
719 if (unlikely(set_ci
>= MLX4_NUM_SPARE_EQE
)) {
730 static irqreturn_t
mlx4_interrupt(int irq
, void *dev_ptr
)
732 struct mlx4_dev
*dev
= dev_ptr
;
733 struct mlx4_priv
*priv
= mlx4_priv(dev
);
737 writel(priv
->eq_table
.clr_mask
, priv
->eq_table
.clr_int
);
739 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
740 work
|= mlx4_eq_int(dev
, &priv
->eq_table
.eq
[i
]);
742 return IRQ_RETVAL(work
);
745 static irqreturn_t
mlx4_msi_x_interrupt(int irq
, void *eq_ptr
)
747 struct mlx4_eq
*eq
= eq_ptr
;
748 struct mlx4_dev
*dev
= eq
->dev
;
750 mlx4_eq_int(dev
, eq
);
752 /* MSI-X vectors always belong to us */
756 int mlx4_MAP_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
757 struct mlx4_vhcr
*vhcr
,
758 struct mlx4_cmd_mailbox
*inbox
,
759 struct mlx4_cmd_mailbox
*outbox
,
760 struct mlx4_cmd_info
*cmd
)
762 struct mlx4_priv
*priv
= mlx4_priv(dev
);
763 struct mlx4_slave_event_eq_info
*event_eq
=
764 priv
->mfunc
.master
.slave_state
[slave
].event_eq
;
765 u32 in_modifier
= vhcr
->in_modifier
;
766 u32 eqn
= in_modifier
& 0x1FF;
767 u64 in_param
= vhcr
->in_param
;
771 if (slave
== dev
->caps
.function
)
772 err
= mlx4_cmd(dev
, in_param
, (in_modifier
& 0x80000000) | eqn
,
773 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
776 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
)
777 if (in_param
& (1LL << i
))
778 event_eq
[i
].eqn
= in_modifier
>> 31 ? -1 : eqn
;
783 static int mlx4_MAP_EQ(struct mlx4_dev
*dev
, u64 event_mask
, int unmap
,
786 return mlx4_cmd(dev
, event_mask
, (unmap
<< 31) | eq_num
,
787 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
791 static int mlx4_SW2HW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
794 return mlx4_cmd(dev
, mailbox
->dma
, eq_num
, 0,
795 MLX4_CMD_SW2HW_EQ
, MLX4_CMD_TIME_CLASS_A
,
799 static int mlx4_HW2SW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
802 return mlx4_cmd_box(dev
, 0, mailbox
->dma
, eq_num
,
803 0, MLX4_CMD_HW2SW_EQ
, MLX4_CMD_TIME_CLASS_A
,
807 static int mlx4_num_eq_uar(struct mlx4_dev
*dev
)
810 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
811 * we need to map, take the difference of highest index and
812 * the lowest index we'll use and add 1.
814 return (dev
->caps
.num_comp_vectors
+ 1 + dev
->caps
.reserved_eqs
+
815 dev
->caps
.comp_pool
)/4 - dev
->caps
.reserved_eqs
/4 + 1;
818 static void __iomem
*mlx4_get_eq_uar(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
820 struct mlx4_priv
*priv
= mlx4_priv(dev
);
823 index
= eq
->eqn
/ 4 - dev
->caps
.reserved_eqs
/ 4;
825 if (!priv
->eq_table
.uar_map
[index
]) {
826 priv
->eq_table
.uar_map
[index
] =
827 ioremap(pci_resource_start(dev
->pdev
, 2) +
828 ((eq
->eqn
/ 4) << PAGE_SHIFT
),
830 if (!priv
->eq_table
.uar_map
[index
]) {
831 mlx4_err(dev
, "Couldn't map EQ doorbell for EQN 0x%06x\n",
837 return priv
->eq_table
.uar_map
[index
] + 0x800 + 8 * (eq
->eqn
% 4);
840 static void mlx4_unmap_uar(struct mlx4_dev
*dev
)
842 struct mlx4_priv
*priv
= mlx4_priv(dev
);
845 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
846 if (priv
->eq_table
.uar_map
[i
]) {
847 iounmap(priv
->eq_table
.uar_map
[i
]);
848 priv
->eq_table
.uar_map
[i
] = NULL
;
852 static int mlx4_create_eq(struct mlx4_dev
*dev
, int nent
,
853 u8 intr
, struct mlx4_eq
*eq
)
855 struct mlx4_priv
*priv
= mlx4_priv(dev
);
856 struct mlx4_cmd_mailbox
*mailbox
;
857 struct mlx4_eq_context
*eq_context
;
859 u64
*dma_list
= NULL
;
866 eq
->nent
= roundup_pow_of_two(max(nent
, 2));
867 npages
= PAGE_ALIGN(eq
->nent
* MLX4_EQ_ENTRY_SIZE
) / PAGE_SIZE
;
869 eq
->page_list
= kmalloc(npages
* sizeof *eq
->page_list
,
874 for (i
= 0; i
< npages
; ++i
)
875 eq
->page_list
[i
].buf
= NULL
;
877 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
881 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
884 eq_context
= mailbox
->buf
;
886 for (i
= 0; i
< npages
; ++i
) {
887 eq
->page_list
[i
].buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
888 PAGE_SIZE
, &t
, GFP_KERNEL
);
889 if (!eq
->page_list
[i
].buf
)
890 goto err_out_free_pages
;
893 eq
->page_list
[i
].map
= t
;
895 memset(eq
->page_list
[i
].buf
, 0, PAGE_SIZE
);
898 eq
->eqn
= mlx4_bitmap_alloc(&priv
->eq_table
.bitmap
);
900 goto err_out_free_pages
;
902 eq
->doorbell
= mlx4_get_eq_uar(dev
, eq
);
905 goto err_out_free_eq
;
908 err
= mlx4_mtt_init(dev
, npages
, PAGE_SHIFT
, &eq
->mtt
);
910 goto err_out_free_eq
;
912 err
= mlx4_write_mtt(dev
, &eq
->mtt
, 0, npages
, dma_list
);
914 goto err_out_free_mtt
;
916 memset(eq_context
, 0, sizeof *eq_context
);
917 eq_context
->flags
= cpu_to_be32(MLX4_EQ_STATUS_OK
|
918 MLX4_EQ_STATE_ARMED
);
919 eq_context
->log_eq_size
= ilog2(eq
->nent
);
920 eq_context
->intr
= intr
;
921 eq_context
->log_page_size
= PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
;
923 mtt_addr
= mlx4_mtt_addr(dev
, &eq
->mtt
);
924 eq_context
->mtt_base_addr_h
= mtt_addr
>> 32;
925 eq_context
->mtt_base_addr_l
= cpu_to_be32(mtt_addr
& 0xffffffff);
927 err
= mlx4_SW2HW_EQ(dev
, mailbox
, eq
->eqn
);
929 mlx4_warn(dev
, "SW2HW_EQ failed (%d)\n", err
);
930 goto err_out_free_mtt
;
934 mlx4_free_cmd_mailbox(dev
, mailbox
);
941 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
944 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
947 for (i
= 0; i
< npages
; ++i
)
948 if (eq
->page_list
[i
].buf
)
949 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
950 eq
->page_list
[i
].buf
,
951 eq
->page_list
[i
].map
);
953 mlx4_free_cmd_mailbox(dev
, mailbox
);
956 kfree(eq
->page_list
);
963 static void mlx4_free_eq(struct mlx4_dev
*dev
,
966 struct mlx4_priv
*priv
= mlx4_priv(dev
);
967 struct mlx4_cmd_mailbox
*mailbox
;
969 int npages
= PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE
* eq
->nent
) / PAGE_SIZE
;
972 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
976 err
= mlx4_HW2SW_EQ(dev
, mailbox
, eq
->eqn
);
978 mlx4_warn(dev
, "HW2SW_EQ failed (%d)\n", err
);
981 mlx4_dbg(dev
, "Dumping EQ context %02x:\n", eq
->eqn
);
982 for (i
= 0; i
< sizeof (struct mlx4_eq_context
) / 4; ++i
) {
984 pr_cont("[%02x] ", i
* 4);
985 pr_cont(" %08x", be32_to_cpup(mailbox
->buf
+ i
* 4));
986 if ((i
+ 1) % 4 == 0)
991 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
992 for (i
= 0; i
< npages
; ++i
)
993 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
994 eq
->page_list
[i
].buf
,
995 eq
->page_list
[i
].map
);
997 kfree(eq
->page_list
);
998 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
999 mlx4_free_cmd_mailbox(dev
, mailbox
);
1002 static void mlx4_free_irqs(struct mlx4_dev
*dev
)
1004 struct mlx4_eq_table
*eq_table
= &mlx4_priv(dev
)->eq_table
;
1005 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1008 if (eq_table
->have_irq
)
1009 free_irq(dev
->pdev
->irq
, dev
);
1011 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
1012 if (eq_table
->eq
[i
].have_irq
) {
1013 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
1014 eq_table
->eq
[i
].have_irq
= 0;
1017 for (i
= 0; i
< dev
->caps
.comp_pool
; i
++) {
1019 * Freeing the assigned irq's
1020 * all bits should be 0, but we need to validate
1022 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
1023 /* NO need protecting*/
1024 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
1025 free_irq(priv
->eq_table
.eq
[vec
].irq
,
1026 &priv
->eq_table
.eq
[vec
]);
1031 kfree(eq_table
->irq_names
);
1034 static int mlx4_map_clr_int(struct mlx4_dev
*dev
)
1036 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1038 priv
->clr_base
= ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clr_int_bar
) +
1039 priv
->fw
.clr_int_base
, MLX4_CLR_INT_SIZE
);
1040 if (!priv
->clr_base
) {
1041 mlx4_err(dev
, "Couldn't map interrupt clear register, aborting.\n");
1048 static void mlx4_unmap_clr_int(struct mlx4_dev
*dev
)
1050 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1052 iounmap(priv
->clr_base
);
1055 int mlx4_alloc_eq_table(struct mlx4_dev
*dev
)
1057 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1059 priv
->eq_table
.eq
= kcalloc(dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
1060 sizeof *priv
->eq_table
.eq
, GFP_KERNEL
);
1061 if (!priv
->eq_table
.eq
)
1067 void mlx4_free_eq_table(struct mlx4_dev
*dev
)
1069 kfree(mlx4_priv(dev
)->eq_table
.eq
);
1072 int mlx4_init_eq_table(struct mlx4_dev
*dev
)
1074 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1078 priv
->eq_table
.uar_map
= kcalloc(mlx4_num_eq_uar(dev
),
1079 sizeof *priv
->eq_table
.uar_map
,
1081 if (!priv
->eq_table
.uar_map
) {
1086 err
= mlx4_bitmap_init(&priv
->eq_table
.bitmap
, dev
->caps
.num_eqs
,
1087 dev
->caps
.num_eqs
- 1, dev
->caps
.reserved_eqs
, 0);
1091 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
1092 priv
->eq_table
.uar_map
[i
] = NULL
;
1094 if (!mlx4_is_slave(dev
)) {
1095 err
= mlx4_map_clr_int(dev
);
1097 goto err_out_bitmap
;
1099 priv
->eq_table
.clr_mask
=
1100 swab32(1 << (priv
->eq_table
.inta_pin
& 31));
1101 priv
->eq_table
.clr_int
= priv
->clr_base
+
1102 (priv
->eq_table
.inta_pin
< 32 ? 4 : 0);
1105 priv
->eq_table
.irq_names
=
1106 kmalloc(MLX4_IRQNAME_SIZE
* (dev
->caps
.num_comp_vectors
+ 1 +
1107 dev
->caps
.comp_pool
),
1109 if (!priv
->eq_table
.irq_names
) {
1111 goto err_out_bitmap
;
1114 for (i
= 0; i
< dev
->caps
.num_comp_vectors
; ++i
) {
1115 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
1116 dev
->caps
.reserved_cqs
+
1118 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
1119 &priv
->eq_table
.eq
[i
]);
1126 err
= mlx4_create_eq(dev
, MLX4_NUM_ASYNC_EQE
+ MLX4_NUM_SPARE_EQE
,
1127 (dev
->flags
& MLX4_FLAG_MSI_X
) ? dev
->caps
.num_comp_vectors
: 0,
1128 &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
1132 /*if additional completion vectors poolsize is 0 this loop will not run*/
1133 for (i
= dev
->caps
.num_comp_vectors
+ 1;
1134 i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
) {
1136 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
1137 dev
->caps
.reserved_cqs
+
1139 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
1140 &priv
->eq_table
.eq
[i
]);
1148 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1149 const char *eq_name
;
1151 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
) {
1152 if (i
< dev
->caps
.num_comp_vectors
) {
1153 snprintf(priv
->eq_table
.irq_names
+
1154 i
* MLX4_IRQNAME_SIZE
,
1156 "mlx4-comp-%d@pci:%s", i
,
1157 pci_name(dev
->pdev
));
1159 snprintf(priv
->eq_table
.irq_names
+
1160 i
* MLX4_IRQNAME_SIZE
,
1162 "mlx4-async@pci:%s",
1163 pci_name(dev
->pdev
));
1166 eq_name
= priv
->eq_table
.irq_names
+
1167 i
* MLX4_IRQNAME_SIZE
;
1168 err
= request_irq(priv
->eq_table
.eq
[i
].irq
,
1169 mlx4_msi_x_interrupt
, 0, eq_name
,
1170 priv
->eq_table
.eq
+ i
);
1174 priv
->eq_table
.eq
[i
].have_irq
= 1;
1177 snprintf(priv
->eq_table
.irq_names
,
1180 pci_name(dev
->pdev
));
1181 err
= request_irq(dev
->pdev
->irq
, mlx4_interrupt
,
1182 IRQF_SHARED
, priv
->eq_table
.irq_names
, dev
);
1186 priv
->eq_table
.have_irq
= 1;
1189 err
= mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1190 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
1192 mlx4_warn(dev
, "MAP_EQ for async EQ %d failed (%d)\n",
1193 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
, err
);
1195 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
1196 eq_set_ci(&priv
->eq_table
.eq
[i
], 1);
1201 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
1204 i
= dev
->caps
.num_comp_vectors
- 1;
1208 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
1211 if (!mlx4_is_slave(dev
))
1212 mlx4_unmap_clr_int(dev
);
1213 mlx4_free_irqs(dev
);
1216 mlx4_unmap_uar(dev
);
1217 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
1220 kfree(priv
->eq_table
.uar_map
);
1225 void mlx4_cleanup_eq_table(struct mlx4_dev
*dev
)
1227 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1230 mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 1,
1231 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
1233 mlx4_free_irqs(dev
);
1235 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
)
1236 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
1238 if (!mlx4_is_slave(dev
))
1239 mlx4_unmap_clr_int(dev
);
1241 mlx4_unmap_uar(dev
);
1242 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
1244 kfree(priv
->eq_table
.uar_map
);
1247 /* A test that verifies that we can accept interrupts on all
1248 * the irq vectors of the device.
1249 * Interrupts are checked using the NOP command.
1251 int mlx4_test_interrupts(struct mlx4_dev
*dev
)
1253 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1257 err
= mlx4_NOP(dev
);
1258 /* When not in MSI_X, there is only one irq to check */
1259 if (!(dev
->flags
& MLX4_FLAG_MSI_X
) || mlx4_is_slave(dev
))
1262 /* A loop over all completion vectors, for each vector we will check
1263 * whether it works by mapping command completions to that vector
1264 * and performing a NOP command
1266 for(i
= 0; !err
&& (i
< dev
->caps
.num_comp_vectors
); ++i
) {
1267 /* Temporary use polling for command completions */
1268 mlx4_cmd_use_polling(dev
);
1270 /* Map the new eq to handle all asyncronous events */
1271 err
= mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1272 priv
->eq_table
.eq
[i
].eqn
);
1274 mlx4_warn(dev
, "Failed mapping eq for interrupt test\n");
1275 mlx4_cmd_use_events(dev
);
1279 /* Go back to using events */
1280 mlx4_cmd_use_events(dev
);
1281 err
= mlx4_NOP(dev
);
1284 /* Return to default */
1285 mlx4_MAP_EQ(dev
, get_async_ev_mask(dev
), 0,
1286 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
1289 EXPORT_SYMBOL(mlx4_test_interrupts
);
1291 int mlx4_assign_eq(struct mlx4_dev
*dev
, char *name
, struct cpu_rmap
*rmap
,
1295 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1296 int vec
= 0, err
= 0, i
;
1298 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1299 for (i
= 0; !vec
&& i
< dev
->caps
.comp_pool
; i
++) {
1300 if (~priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
1301 priv
->msix_ctl
.pool_bm
|= 1ULL << i
;
1302 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
1303 snprintf(priv
->eq_table
.irq_names
+
1304 vec
* MLX4_IRQNAME_SIZE
,
1305 MLX4_IRQNAME_SIZE
, "%s", name
);
1306 #ifdef CONFIG_RFS_ACCEL
1308 err
= irq_cpu_rmap_add(rmap
,
1309 priv
->eq_table
.eq
[vec
].irq
);
1311 mlx4_warn(dev
, "Failed adding irq rmap\n");
1314 err
= request_irq(priv
->eq_table
.eq
[vec
].irq
,
1315 mlx4_msi_x_interrupt
, 0,
1316 &priv
->eq_table
.irq_names
[vec
<<5],
1317 priv
->eq_table
.eq
+ vec
);
1319 /*zero out bit by fliping it*/
1320 priv
->msix_ctl
.pool_bm
^= 1 << i
;
1323 /*we dont want to break here*/
1325 eq_set_ci(&priv
->eq_table
.eq
[vec
], 1);
1328 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1334 err
= (i
== dev
->caps
.comp_pool
) ? -ENOSPC
: err
;
1338 EXPORT_SYMBOL(mlx4_assign_eq
);
1340 void mlx4_release_eq(struct mlx4_dev
*dev
, int vec
)
1342 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1344 int i
= vec
- dev
->caps
.num_comp_vectors
- 1;
1346 if (likely(i
>= 0)) {
1347 /*sanity check , making sure were not trying to free irq's
1348 Belonging to a legacy EQ*/
1349 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1350 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
1351 free_irq(priv
->eq_table
.eq
[vec
].irq
,
1352 &priv
->eq_table
.eq
[vec
]);
1353 priv
->msix_ctl
.pool_bm
&= ~(1ULL << i
);
1355 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1359 EXPORT_SYMBOL(mlx4_release_eq
);