]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/eq.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / eq.c
1 /*
2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <linux/mm.h>
38 #include <linux/dma-mapping.h>
39
40 #include <linux/mlx4/cmd.h>
41 #include <linux/cpu_rmap.h>
42
43 #include "mlx4.h"
44 #include "fw.h"
45
46 enum {
47 MLX4_IRQNAME_SIZE = 32
48 };
49
50 enum {
51 MLX4_NUM_ASYNC_EQE = 0x100,
52 MLX4_NUM_SPARE_EQE = 0x80,
53 MLX4_EQ_ENTRY_SIZE = 0x20
54 };
55
56 #define MLX4_EQ_STATUS_OK ( 0 << 28)
57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58 #define MLX4_EQ_OWNER_SW ( 0 << 24)
59 #define MLX4_EQ_OWNER_HW ( 1 << 24)
60 #define MLX4_EQ_FLAG_EC ( 1 << 18)
61 #define MLX4_EQ_FLAG_OI ( 1 << 17)
62 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
63 #define MLX4_EQ_STATE_FIRED (10 << 8)
64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
65
66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
85
86 static u64 get_async_ev_mask(struct mlx4_dev *dev)
87 {
88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
93
94 return async_ev_mask;
95 }
96
97 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
98 {
99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
100 req_not << 31),
101 eq->doorbell);
102 /* We still want ordering, just not swabbing, so add a barrier */
103 mb();
104 }
105
106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
107 u8 eqe_size)
108 {
109 /* (entry & (eq->nent - 1)) gives us a cyclic array */
110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with
112 * strides of 64B,128B and 256B.
113 * When 64B EQE is used, the first (in the lower addresses)
114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
115 * contain the legacy EQE information.
116 * In all other cases, the first 32B contains the legacy EQE info.
117 */
118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
119 }
120
121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
122 {
123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
125 }
126
127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
128 {
129 struct mlx4_eqe *eqe =
130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
131 return (!!(eqe->owner & 0x80) ^
132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
133 eqe : NULL;
134 }
135
136 void mlx4_gen_slave_eqe(struct work_struct *work)
137 {
138 struct mlx4_mfunc_master_ctx *master =
139 container_of(work, struct mlx4_mfunc_master_ctx,
140 slave_event_work);
141 struct mlx4_mfunc *mfunc =
142 container_of(master, struct mlx4_mfunc, master);
143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
144 struct mlx4_dev *dev = &priv->dev;
145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
146 struct mlx4_eqe *eqe;
147 u8 slave;
148 int i, phys_port, slave_port;
149
150 for (eqe = next_slave_event_eqe(slave_eq); eqe;
151 eqe = next_slave_event_eqe(slave_eq)) {
152 slave = eqe->slave_id;
153
154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE &&
155 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN &&
156 mlx4_is_bonded(dev)) {
157 struct mlx4_port_cap port_cap;
158
159 if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state)
160 goto consume;
161
162 if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state)
163 goto consume;
164 }
165 /* All active slaves need to receive the event */
166 if (slave == ALL_SLAVES) {
167 for (i = 0; i <= dev->persist->num_vfs; i++) {
168 phys_port = 0;
169 if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
170 eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
171 phys_port = eqe->event.port_mgmt_change.port;
172 slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
173 if (slave_port < 0) /* VF doesn't have this port */
174 continue;
175 eqe->event.port_mgmt_change.port = slave_port;
176 }
177 if (mlx4_GEN_EQE(dev, i, eqe))
178 mlx4_warn(dev, "Failed to generate event for slave %d\n",
179 i);
180 if (phys_port)
181 eqe->event.port_mgmt_change.port = phys_port;
182 }
183 } else {
184 if (mlx4_GEN_EQE(dev, slave, eqe))
185 mlx4_warn(dev, "Failed to generate event for slave %d\n",
186 slave);
187 }
188 consume:
189 ++slave_eq->cons;
190 }
191 }
192
193
194 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
195 {
196 struct mlx4_priv *priv = mlx4_priv(dev);
197 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
198 struct mlx4_eqe *s_eqe;
199 unsigned long flags;
200
201 spin_lock_irqsave(&slave_eq->event_lock, flags);
202 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
203 if ((!!(s_eqe->owner & 0x80)) ^
204 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
205 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
206 slave);
207 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
208 return;
209 }
210
211 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
212 s_eqe->slave_id = slave;
213 /* ensure all information is written before setting the ownersip bit */
214 dma_wmb();
215 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
216 ++slave_eq->prod;
217
218 queue_work(priv->mfunc.master.comm_wq,
219 &priv->mfunc.master.slave_event_work);
220 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
221 }
222
223 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
224 struct mlx4_eqe *eqe)
225 {
226 struct mlx4_priv *priv = mlx4_priv(dev);
227
228 if (slave < 0 || slave > dev->persist->num_vfs ||
229 slave == dev->caps.function ||
230 !priv->mfunc.master.slave_state[slave].active)
231 return;
232
233 slave_event(dev, slave, eqe);
234 }
235
236 #if defined(CONFIG_SMP)
237 static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
238 {
239 int hint_err;
240 struct mlx4_dev *dev = &priv->dev;
241 struct mlx4_eq *eq = &priv->eq_table.eq[vec];
242
243 if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
244 return;
245
246 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
247 if (hint_err)
248 mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
249 }
250 #endif
251
252 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
253 {
254 struct mlx4_eqe eqe;
255
256 struct mlx4_priv *priv = mlx4_priv(dev);
257 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
258
259 if (!s_slave->active)
260 return 0;
261
262 memset(&eqe, 0, sizeof eqe);
263
264 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
265 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
266 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
267
268 return mlx4_GEN_EQE(dev, slave, &eqe);
269 }
270 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
271
272 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
273 {
274 struct mlx4_eqe eqe;
275
276 /*don't send if we don't have the that slave */
277 if (dev->persist->num_vfs < slave)
278 return 0;
279 memset(&eqe, 0, sizeof eqe);
280
281 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
282 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
283 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
284
285 return mlx4_GEN_EQE(dev, slave, &eqe);
286 }
287 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
288
289 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
290 u8 port_subtype_change)
291 {
292 struct mlx4_eqe eqe;
293 u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
294
295 /*don't send if we don't have the that slave */
296 if (dev->persist->num_vfs < slave)
297 return 0;
298 memset(&eqe, 0, sizeof eqe);
299
300 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
301 eqe.subtype = port_subtype_change;
302 eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
303
304 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
305 port_subtype_change, slave, port);
306 return mlx4_GEN_EQE(dev, slave, &eqe);
307 }
308 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
309
310 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
311 {
312 struct mlx4_priv *priv = mlx4_priv(dev);
313 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
314 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
315
316 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
317 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
318 pr_err("%s: Error: asking for slave:%d, port:%d\n",
319 __func__, slave, port);
320 return SLAVE_PORT_DOWN;
321 }
322 return s_state[slave].port_state[port];
323 }
324 EXPORT_SYMBOL(mlx4_get_slave_port_state);
325
326 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
327 enum slave_port_state state)
328 {
329 struct mlx4_priv *priv = mlx4_priv(dev);
330 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
331 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
332
333 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
334 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
335 pr_err("%s: Error: asking for slave:%d, port:%d\n",
336 __func__, slave, port);
337 return -1;
338 }
339 s_state[slave].port_state[port] = state;
340
341 return 0;
342 }
343
344 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
345 {
346 int i;
347 enum slave_port_gen_event gen_event;
348 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
349 port);
350
351 for (i = 0; i < dev->persist->num_vfs + 1; i++)
352 if (test_bit(i, slaves_pport.slaves))
353 set_and_calc_slave_port_state(dev, i, port,
354 event, &gen_event);
355 }
356 /**************************************************************************
357 The function get as input the new event to that port,
358 and according to the prev state change the slave's port state.
359 The events are:
360 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
361 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
362 MLX4_PORT_STATE_IB_EVENT_GID_VALID
363 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
364 ***************************************************************************/
365 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
366 u8 port, int event,
367 enum slave_port_gen_event *gen_event)
368 {
369 struct mlx4_priv *priv = mlx4_priv(dev);
370 struct mlx4_slave_state *ctx = NULL;
371 unsigned long flags;
372 int ret = -1;
373 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
374 enum slave_port_state cur_state =
375 mlx4_get_slave_port_state(dev, slave, port);
376
377 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
378
379 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
380 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
381 pr_err("%s: Error: asking for slave:%d, port:%d\n",
382 __func__, slave, port);
383 return ret;
384 }
385
386 ctx = &priv->mfunc.master.slave_state[slave];
387 spin_lock_irqsave(&ctx->lock, flags);
388
389 switch (cur_state) {
390 case SLAVE_PORT_DOWN:
391 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
392 mlx4_set_slave_port_state(dev, slave, port,
393 SLAVE_PENDING_UP);
394 break;
395 case SLAVE_PENDING_UP:
396 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
397 mlx4_set_slave_port_state(dev, slave, port,
398 SLAVE_PORT_DOWN);
399 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
400 mlx4_set_slave_port_state(dev, slave, port,
401 SLAVE_PORT_UP);
402 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
403 }
404 break;
405 case SLAVE_PORT_UP:
406 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
407 mlx4_set_slave_port_state(dev, slave, port,
408 SLAVE_PORT_DOWN);
409 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
410 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
411 event) {
412 mlx4_set_slave_port_state(dev, slave, port,
413 SLAVE_PENDING_UP);
414 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
415 }
416 break;
417 default:
418 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
419 __func__, slave, port);
420 goto out;
421 }
422 ret = mlx4_get_slave_port_state(dev, slave, port);
423
424 out:
425 spin_unlock_irqrestore(&ctx->lock, flags);
426 return ret;
427 }
428
429 EXPORT_SYMBOL(set_and_calc_slave_port_state);
430
431 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
432 {
433 struct mlx4_eqe eqe;
434
435 memset(&eqe, 0, sizeof eqe);
436
437 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
438 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
439 eqe.event.port_mgmt_change.port = port;
440 eqe.event.port_mgmt_change.params.port_info.changed_attr =
441 cpu_to_be32((u32) attr);
442
443 slave_event(dev, ALL_SLAVES, &eqe);
444 return 0;
445 }
446 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
447
448 void mlx4_master_handle_slave_flr(struct work_struct *work)
449 {
450 struct mlx4_mfunc_master_ctx *master =
451 container_of(work, struct mlx4_mfunc_master_ctx,
452 slave_flr_event_work);
453 struct mlx4_mfunc *mfunc =
454 container_of(master, struct mlx4_mfunc, master);
455 struct mlx4_priv *priv =
456 container_of(mfunc, struct mlx4_priv, mfunc);
457 struct mlx4_dev *dev = &priv->dev;
458 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
459 int i;
460 int err;
461 unsigned long flags;
462
463 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
464
465 for (i = 0 ; i < dev->num_slaves; i++) {
466
467 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
468 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
469 i);
470 /* In case of 'Reset flow' FLR can be generated for
471 * a slave before mlx4_load_one is done.
472 * make sure interface is up before trying to delete
473 * slave resources which weren't allocated yet.
474 */
475 if (dev->persist->interface_state &
476 MLX4_INTERFACE_STATE_UP)
477 mlx4_delete_all_resources_for_slave(dev, i);
478 /*return the slave to running mode*/
479 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
480 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
481 slave_state[i].is_slave_going_down = 0;
482 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
483 /*notify the FW:*/
484 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
485 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
486 if (err)
487 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
488 i);
489 }
490 }
491 }
492
493 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
494 {
495 struct mlx4_priv *priv = mlx4_priv(dev);
496 struct mlx4_eqe *eqe;
497 int cqn = -1;
498 int eqes_found = 0;
499 int set_ci = 0;
500 int port;
501 int slave = 0;
502 int ret;
503 u32 flr_slave;
504 u8 update_slave_state;
505 int i;
506 enum slave_port_gen_event gen_event;
507 unsigned long flags;
508 struct mlx4_vport_state *s_info;
509 int eqe_size = dev->caps.eqe_size;
510
511 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
512 /*
513 * Make sure we read EQ entry contents after we've
514 * checked the ownership bit.
515 */
516 dma_rmb();
517
518 switch (eqe->type) {
519 case MLX4_EVENT_TYPE_COMP:
520 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
521 mlx4_cq_completion(dev, cqn);
522 break;
523
524 case MLX4_EVENT_TYPE_PATH_MIG:
525 case MLX4_EVENT_TYPE_COMM_EST:
526 case MLX4_EVENT_TYPE_SQ_DRAINED:
527 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
528 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
529 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
530 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
531 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
532 mlx4_dbg(dev, "event %d arrived\n", eqe->type);
533 if (mlx4_is_master(dev)) {
534 /* forward only to slave owning the QP */
535 ret = mlx4_get_slave_from_resource_id(dev,
536 RES_QP,
537 be32_to_cpu(eqe->event.qp.qpn)
538 & 0xffffff, &slave);
539 if (ret && ret != -ENOENT) {
540 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
541 eqe->type, eqe->subtype,
542 eq->eqn, eq->cons_index, ret);
543 break;
544 }
545
546 if (!ret && slave != dev->caps.function) {
547 mlx4_slave_event(dev, slave, eqe);
548 break;
549 }
550
551 }
552 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
553 0xffffff, eqe->type);
554 break;
555
556 case MLX4_EVENT_TYPE_SRQ_LIMIT:
557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
558 __func__, be32_to_cpu(eqe->event.srq.srqn),
559 eq->eqn);
560 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
561 if (mlx4_is_master(dev)) {
562 /* forward only to slave owning the SRQ */
563 ret = mlx4_get_slave_from_resource_id(dev,
564 RES_SRQ,
565 be32_to_cpu(eqe->event.srq.srqn)
566 & 0xffffff,
567 &slave);
568 if (ret && ret != -ENOENT) {
569 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
570 eqe->type, eqe->subtype,
571 eq->eqn, eq->cons_index, ret);
572 break;
573 }
574 if (eqe->type ==
575 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
576 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
577 __func__, slave,
578 be32_to_cpu(eqe->event.srq.srqn),
579 eqe->type, eqe->subtype);
580
581 if (!ret && slave != dev->caps.function) {
582 if (eqe->type ==
583 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
584 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
585 __func__, eqe->type,
586 eqe->subtype, slave);
587 mlx4_slave_event(dev, slave, eqe);
588 break;
589 }
590 }
591 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
592 0xffffff, eqe->type);
593 break;
594
595 case MLX4_EVENT_TYPE_CMD:
596 mlx4_cmd_event(dev,
597 be16_to_cpu(eqe->event.cmd.token),
598 eqe->event.cmd.status,
599 be64_to_cpu(eqe->event.cmd.out_param));
600 break;
601
602 case MLX4_EVENT_TYPE_PORT_CHANGE: {
603 struct mlx4_slaves_pport slaves_port;
604 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
605 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
606 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
607 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
608 port);
609 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
610 if (!mlx4_is_master(dev))
611 break;
612 for (i = 0; i < dev->persist->num_vfs + 1;
613 i++) {
614 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
615
616 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
617 continue;
618 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
619 if (i == mlx4_master_func_num(dev))
620 continue;
621 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
622 __func__, i, port);
623 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
624 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
625 eqe->event.port_change.port =
626 cpu_to_be32(
627 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
628 | (reported_port << 28));
629 mlx4_slave_event(dev, i, eqe);
630 }
631 } else { /* IB port */
632 set_and_calc_slave_port_state(dev, i, port,
633 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
634 &gen_event);
635 /*we can be in pending state, then do not send port_down event*/
636 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
637 if (i == mlx4_master_func_num(dev))
638 continue;
639 eqe->event.port_change.port =
640 cpu_to_be32(
641 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
642 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
643 mlx4_slave_event(dev, i, eqe);
644 }
645 }
646 }
647 } else {
648 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
649
650 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
651
652 if (!mlx4_is_master(dev))
653 break;
654 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
655 for (i = 0;
656 i < dev->persist->num_vfs + 1;
657 i++) {
658 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
659
660 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
661 continue;
662 if (i == mlx4_master_func_num(dev))
663 continue;
664 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
665 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
666 eqe->event.port_change.port =
667 cpu_to_be32(
668 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
669 | (reported_port << 28));
670 mlx4_slave_event(dev, i, eqe);
671 }
672 }
673 else /* IB port */
674 /* port-up event will be sent to a slave when the
675 * slave's alias-guid is set. This is done in alias_GUID.c
676 */
677 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
678 }
679 break;
680 }
681
682 case MLX4_EVENT_TYPE_CQ_ERROR:
683 mlx4_warn(dev, "CQ %s on CQN %06x\n",
684 eqe->event.cq_err.syndrome == 1 ?
685 "overrun" : "access violation",
686 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
687 if (mlx4_is_master(dev)) {
688 ret = mlx4_get_slave_from_resource_id(dev,
689 RES_CQ,
690 be32_to_cpu(eqe->event.cq_err.cqn)
691 & 0xffffff, &slave);
692 if (ret && ret != -ENOENT) {
693 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
694 eqe->type, eqe->subtype,
695 eq->eqn, eq->cons_index, ret);
696 break;
697 }
698
699 if (!ret && slave != dev->caps.function) {
700 mlx4_slave_event(dev, slave, eqe);
701 break;
702 }
703 }
704 mlx4_cq_event(dev,
705 be32_to_cpu(eqe->event.cq_err.cqn)
706 & 0xffffff,
707 eqe->type);
708 break;
709
710 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
711 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
712 break;
713
714 case MLX4_EVENT_TYPE_OP_REQUIRED:
715 atomic_inc(&priv->opreq_count);
716 /* FW commands can't be executed from interrupt context
717 * working in deferred task
718 */
719 queue_work(mlx4_wq, &priv->opreq_task);
720 break;
721
722 case MLX4_EVENT_TYPE_COMM_CHANNEL:
723 if (!mlx4_is_master(dev)) {
724 mlx4_warn(dev, "Received comm channel event for non master device\n");
725 break;
726 }
727 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
728 eqe->event.comm_channel_arm.bit_vec,
729 sizeof eqe->event.comm_channel_arm.bit_vec);
730 queue_work(priv->mfunc.master.comm_wq,
731 &priv->mfunc.master.comm_work);
732 break;
733
734 case MLX4_EVENT_TYPE_FLR_EVENT:
735 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
736 if (!mlx4_is_master(dev)) {
737 mlx4_warn(dev, "Non-master function received FLR event\n");
738 break;
739 }
740
741 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
742
743 if (flr_slave >= dev->num_slaves) {
744 mlx4_warn(dev,
745 "Got FLR for unknown function: %d\n",
746 flr_slave);
747 update_slave_state = 0;
748 } else
749 update_slave_state = 1;
750
751 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
752 if (update_slave_state) {
753 priv->mfunc.master.slave_state[flr_slave].active = false;
754 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
755 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
756 }
757 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
758 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
759 flr_slave);
760 queue_work(priv->mfunc.master.comm_wq,
761 &priv->mfunc.master.slave_flr_event_work);
762 break;
763
764 case MLX4_EVENT_TYPE_FATAL_WARNING:
765 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
766 if (mlx4_is_master(dev))
767 for (i = 0; i < dev->num_slaves; i++) {
768 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
769 __func__, i);
770 if (i == dev->caps.function)
771 continue;
772 mlx4_slave_event(dev, i, eqe);
773 }
774 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
775 be16_to_cpu(eqe->event.warming.warning_threshold),
776 be16_to_cpu(eqe->event.warming.current_temperature));
777 } else
778 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
779 eqe->type, eqe->subtype, eq->eqn,
780 eq->cons_index, eqe->owner, eq->nent,
781 eqe->slave_id,
782 !!(eqe->owner & 0x80) ^
783 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
784
785 break;
786
787 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
788 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
789 (unsigned long) eqe);
790 break;
791
792 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
793 switch (eqe->subtype) {
794 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
795 mlx4_warn(dev, "Bad cable detected on port %u\n",
796 eqe->event.bad_cable.port);
797 break;
798 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
799 mlx4_warn(dev, "Unsupported cable detected\n");
800 break;
801 default:
802 mlx4_dbg(dev,
803 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
804 eqe->type, eqe->subtype, eq->eqn,
805 eq->cons_index, eqe->owner, eq->nent,
806 !!(eqe->owner & 0x80) ^
807 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
808 break;
809 }
810 break;
811
812 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
813 case MLX4_EVENT_TYPE_ECC_DETECT:
814 default:
815 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
816 eqe->type, eqe->subtype, eq->eqn,
817 eq->cons_index, eqe->owner, eq->nent,
818 eqe->slave_id,
819 !!(eqe->owner & 0x80) ^
820 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
821 break;
822 };
823
824 ++eq->cons_index;
825 eqes_found = 1;
826 ++set_ci;
827
828 /*
829 * The HCA will think the queue has overflowed if we
830 * don't tell it we've been processing events. We
831 * create our EQs with MLX4_NUM_SPARE_EQE extra
832 * entries, so we must update our consumer index at
833 * least that often.
834 */
835 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
836 eq_set_ci(eq, 0);
837 set_ci = 0;
838 }
839 }
840
841 eq_set_ci(eq, 1);
842
843 /* cqn is 24bit wide but is initialized such that its higher bits
844 * are ones too. Thus, if we got any event, cqn's high bits should be off
845 * and we need to schedule the tasklet.
846 */
847 if (!(cqn & ~0xffffff))
848 tasklet_schedule(&eq->tasklet_ctx.task);
849
850 return eqes_found;
851 }
852
853 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
854 {
855 struct mlx4_dev *dev = dev_ptr;
856 struct mlx4_priv *priv = mlx4_priv(dev);
857 int work = 0;
858 int i;
859
860 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
861
862 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
863 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
864
865 return IRQ_RETVAL(work);
866 }
867
868 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
869 {
870 struct mlx4_eq *eq = eq_ptr;
871 struct mlx4_dev *dev = eq->dev;
872
873 mlx4_eq_int(dev, eq);
874
875 /* MSI-X vectors always belong to us */
876 return IRQ_HANDLED;
877 }
878
879 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
880 struct mlx4_vhcr *vhcr,
881 struct mlx4_cmd_mailbox *inbox,
882 struct mlx4_cmd_mailbox *outbox,
883 struct mlx4_cmd_info *cmd)
884 {
885 struct mlx4_priv *priv = mlx4_priv(dev);
886 struct mlx4_slave_event_eq_info *event_eq =
887 priv->mfunc.master.slave_state[slave].event_eq;
888 u32 in_modifier = vhcr->in_modifier;
889 u32 eqn = in_modifier & 0x3FF;
890 u64 in_param = vhcr->in_param;
891 int err = 0;
892 int i;
893
894 if (slave == dev->caps.function)
895 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
896 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
897 MLX4_CMD_NATIVE);
898 if (!err)
899 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
900 if (in_param & (1LL << i))
901 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
902
903 return err;
904 }
905
906 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
907 int eq_num)
908 {
909 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
910 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
911 MLX4_CMD_WRAPPED);
912 }
913
914 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
915 int eq_num)
916 {
917 return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
918 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
919 MLX4_CMD_WRAPPED);
920 }
921
922 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
923 {
924 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
925 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
926 }
927
928 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
929 {
930 /*
931 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
932 * we need to map, take the difference of highest index and
933 * the lowest index we'll use and add 1.
934 */
935 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
936 dev->caps.reserved_eqs / 4 + 1;
937 }
938
939 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 {
941 struct mlx4_priv *priv = mlx4_priv(dev);
942 int index;
943
944 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
945
946 if (!priv->eq_table.uar_map[index]) {
947 priv->eq_table.uar_map[index] =
948 ioremap(
949 pci_resource_start(dev->persist->pdev, 2) +
950 ((eq->eqn / 4) << (dev->uar_page_shift)),
951 (1 << (dev->uar_page_shift)));
952 if (!priv->eq_table.uar_map[index]) {
953 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
954 eq->eqn);
955 return NULL;
956 }
957 }
958
959 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
960 }
961
962 static void mlx4_unmap_uar(struct mlx4_dev *dev)
963 {
964 struct mlx4_priv *priv = mlx4_priv(dev);
965 int i;
966
967 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
968 if (priv->eq_table.uar_map[i]) {
969 iounmap(priv->eq_table.uar_map[i]);
970 priv->eq_table.uar_map[i] = NULL;
971 }
972 }
973
974 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
975 u8 intr, struct mlx4_eq *eq)
976 {
977 struct mlx4_priv *priv = mlx4_priv(dev);
978 struct mlx4_cmd_mailbox *mailbox;
979 struct mlx4_eq_context *eq_context;
980 int npages;
981 u64 *dma_list = NULL;
982 dma_addr_t t;
983 u64 mtt_addr;
984 int err = -ENOMEM;
985 int i;
986
987 eq->dev = dev;
988 eq->nent = roundup_pow_of_two(max(nent, 2));
989 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
990 * strides of 64B,128B and 256B.
991 */
992 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
993
994 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
995 GFP_KERNEL);
996 if (!eq->page_list)
997 goto err_out;
998
999 for (i = 0; i < npages; ++i)
1000 eq->page_list[i].buf = NULL;
1001
1002 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1003 if (!dma_list)
1004 goto err_out_free;
1005
1006 mailbox = mlx4_alloc_cmd_mailbox(dev);
1007 if (IS_ERR(mailbox))
1008 goto err_out_free;
1009 eq_context = mailbox->buf;
1010
1011 for (i = 0; i < npages; ++i) {
1012 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
1013 pdev->dev,
1014 PAGE_SIZE, &t,
1015 GFP_KERNEL);
1016 if (!eq->page_list[i].buf)
1017 goto err_out_free_pages;
1018
1019 dma_list[i] = t;
1020 eq->page_list[i].map = t;
1021
1022 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
1023 }
1024
1025 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
1026 if (eq->eqn == -1)
1027 goto err_out_free_pages;
1028
1029 eq->doorbell = mlx4_get_eq_uar(dev, eq);
1030 if (!eq->doorbell) {
1031 err = -ENOMEM;
1032 goto err_out_free_eq;
1033 }
1034
1035 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
1036 if (err)
1037 goto err_out_free_eq;
1038
1039 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
1040 if (err)
1041 goto err_out_free_mtt;
1042
1043 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
1044 MLX4_EQ_STATE_ARMED);
1045 eq_context->log_eq_size = ilog2(eq->nent);
1046 eq_context->intr = intr;
1047 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
1048
1049 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
1050 eq_context->mtt_base_addr_h = mtt_addr >> 32;
1051 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
1052
1053 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
1054 if (err) {
1055 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
1056 goto err_out_free_mtt;
1057 }
1058
1059 kfree(dma_list);
1060 mlx4_free_cmd_mailbox(dev, mailbox);
1061
1062 eq->cons_index = 0;
1063
1064 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1065 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1066 spin_lock_init(&eq->tasklet_ctx.lock);
1067 tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
1068 (unsigned long)&eq->tasklet_ctx);
1069
1070 return err;
1071
1072 err_out_free_mtt:
1073 mlx4_mtt_cleanup(dev, &eq->mtt);
1074
1075 err_out_free_eq:
1076 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1077
1078 err_out_free_pages:
1079 for (i = 0; i < npages; ++i)
1080 if (eq->page_list[i].buf)
1081 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1082 eq->page_list[i].buf,
1083 eq->page_list[i].map);
1084
1085 mlx4_free_cmd_mailbox(dev, mailbox);
1086
1087 err_out_free:
1088 kfree(eq->page_list);
1089 kfree(dma_list);
1090
1091 err_out:
1092 return err;
1093 }
1094
1095 static void mlx4_free_eq(struct mlx4_dev *dev,
1096 struct mlx4_eq *eq)
1097 {
1098 struct mlx4_priv *priv = mlx4_priv(dev);
1099 int err;
1100 int i;
1101 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
1102 * strides of 64B,128B and 256B
1103 */
1104 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1105
1106 err = mlx4_HW2SW_EQ(dev, eq->eqn);
1107 if (err)
1108 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1109
1110 synchronize_irq(eq->irq);
1111 tasklet_disable(&eq->tasklet_ctx.task);
1112
1113 mlx4_mtt_cleanup(dev, &eq->mtt);
1114 for (i = 0; i < npages; ++i)
1115 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1116 eq->page_list[i].buf,
1117 eq->page_list[i].map);
1118
1119 kfree(eq->page_list);
1120 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1121 }
1122
1123 static void mlx4_free_irqs(struct mlx4_dev *dev)
1124 {
1125 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
1126 int i;
1127
1128 if (eq_table->have_irq)
1129 free_irq(dev->persist->pdev->irq, dev);
1130
1131 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1132 if (eq_table->eq[i].have_irq) {
1133 free_cpumask_var(eq_table->eq[i].affinity_mask);
1134 #if defined(CONFIG_SMP)
1135 irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
1136 #endif
1137 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1138 eq_table->eq[i].have_irq = 0;
1139 }
1140
1141 kfree(eq_table->irq_names);
1142 }
1143
1144 static int mlx4_map_clr_int(struct mlx4_dev *dev)
1145 {
1146 struct mlx4_priv *priv = mlx4_priv(dev);
1147
1148 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
1149 priv->fw.clr_int_bar) +
1150 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1151 if (!priv->clr_base) {
1152 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1153 return -ENOMEM;
1154 }
1155
1156 return 0;
1157 }
1158
1159 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1160 {
1161 struct mlx4_priv *priv = mlx4_priv(dev);
1162
1163 iounmap(priv->clr_base);
1164 }
1165
1166 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1167 {
1168 struct mlx4_priv *priv = mlx4_priv(dev);
1169
1170 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
1171 sizeof *priv->eq_table.eq, GFP_KERNEL);
1172 if (!priv->eq_table.eq)
1173 return -ENOMEM;
1174
1175 return 0;
1176 }
1177
1178 void mlx4_free_eq_table(struct mlx4_dev *dev)
1179 {
1180 kfree(mlx4_priv(dev)->eq_table.eq);
1181 }
1182
1183 int mlx4_init_eq_table(struct mlx4_dev *dev)
1184 {
1185 struct mlx4_priv *priv = mlx4_priv(dev);
1186 int err;
1187 int i;
1188
1189 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1190 sizeof *priv->eq_table.uar_map,
1191 GFP_KERNEL);
1192 if (!priv->eq_table.uar_map) {
1193 err = -ENOMEM;
1194 goto err_out_free;
1195 }
1196
1197 err = mlx4_bitmap_init(&priv->eq_table.bitmap,
1198 roundup_pow_of_two(dev->caps.num_eqs),
1199 dev->caps.num_eqs - 1,
1200 dev->caps.reserved_eqs,
1201 roundup_pow_of_two(dev->caps.num_eqs) -
1202 dev->caps.num_eqs);
1203 if (err)
1204 goto err_out_free;
1205
1206 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
1207 priv->eq_table.uar_map[i] = NULL;
1208
1209 if (!mlx4_is_slave(dev)) {
1210 err = mlx4_map_clr_int(dev);
1211 if (err)
1212 goto err_out_bitmap;
1213
1214 priv->eq_table.clr_mask =
1215 swab32(1 << (priv->eq_table.inta_pin & 31));
1216 priv->eq_table.clr_int = priv->clr_base +
1217 (priv->eq_table.inta_pin < 32 ? 4 : 0);
1218 }
1219
1220 priv->eq_table.irq_names =
1221 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
1222 GFP_KERNEL);
1223 if (!priv->eq_table.irq_names) {
1224 err = -ENOMEM;
1225 goto err_out_clr_int;
1226 }
1227
1228 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
1229 if (i == MLX4_EQ_ASYNC) {
1230 err = mlx4_create_eq(dev,
1231 MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1232 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
1233 } else {
1234 struct mlx4_eq *eq = &priv->eq_table.eq[i];
1235 #ifdef CONFIG_RFS_ACCEL
1236 int port = find_first_bit(eq->actv_ports.ports,
1237 dev->caps.num_ports) + 1;
1238
1239 if (port <= dev->caps.num_ports) {
1240 struct mlx4_port_info *info =
1241 &mlx4_priv(dev)->port[port];
1242
1243 if (!info->rmap) {
1244 info->rmap = alloc_irq_cpu_rmap(
1245 mlx4_get_eqs_per_port(dev, port));
1246 if (!info->rmap) {
1247 mlx4_warn(dev, "Failed to allocate cpu rmap\n");
1248 err = -ENOMEM;
1249 goto err_out_unmap;
1250 }
1251 }
1252
1253 err = irq_cpu_rmap_add(
1254 info->rmap, eq->irq);
1255 if (err)
1256 mlx4_warn(dev, "Failed adding irq rmap\n");
1257 }
1258 #endif
1259 err = mlx4_create_eq(dev, dev->quotas.cq +
1260 MLX4_NUM_SPARE_EQE,
1261 (dev->flags & MLX4_FLAG_MSI_X) ?
1262 i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
1263 eq);
1264 }
1265 if (err)
1266 goto err_out_unmap;
1267 }
1268
1269 if (dev->flags & MLX4_FLAG_MSI_X) {
1270 const char *eq_name;
1271
1272 snprintf(priv->eq_table.irq_names +
1273 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
1274 MLX4_IRQNAME_SIZE,
1275 "mlx4-async@pci:%s",
1276 pci_name(dev->persist->pdev));
1277 eq_name = priv->eq_table.irq_names +
1278 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
1279
1280 err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
1281 mlx4_msi_x_interrupt, 0, eq_name,
1282 priv->eq_table.eq + MLX4_EQ_ASYNC);
1283 if (err)
1284 goto err_out_unmap;
1285
1286 priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
1287 } else {
1288 snprintf(priv->eq_table.irq_names,
1289 MLX4_IRQNAME_SIZE,
1290 DRV_NAME "@pci:%s",
1291 pci_name(dev->persist->pdev));
1292 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
1293 IRQF_SHARED, priv->eq_table.irq_names, dev);
1294 if (err)
1295 goto err_out_unmap;
1296
1297 priv->eq_table.have_irq = 1;
1298 }
1299
1300 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1301 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1302 if (err)
1303 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
1304 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
1305
1306 /* arm ASYNC eq */
1307 eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
1308
1309 return 0;
1310
1311 err_out_unmap:
1312 while (i > 0)
1313 mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
1314 #ifdef CONFIG_RFS_ACCEL
1315 for (i = 1; i <= dev->caps.num_ports; i++) {
1316 if (mlx4_priv(dev)->port[i].rmap) {
1317 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
1318 mlx4_priv(dev)->port[i].rmap = NULL;
1319 }
1320 }
1321 #endif
1322 mlx4_free_irqs(dev);
1323
1324 err_out_clr_int:
1325 if (!mlx4_is_slave(dev))
1326 mlx4_unmap_clr_int(dev);
1327
1328 err_out_bitmap:
1329 mlx4_unmap_uar(dev);
1330 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1331
1332 err_out_free:
1333 kfree(priv->eq_table.uar_map);
1334
1335 return err;
1336 }
1337
1338 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1339 {
1340 struct mlx4_priv *priv = mlx4_priv(dev);
1341 int i;
1342
1343 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1344 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1345
1346 #ifdef CONFIG_RFS_ACCEL
1347 for (i = 1; i <= dev->caps.num_ports; i++) {
1348 if (mlx4_priv(dev)->port[i].rmap) {
1349 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
1350 mlx4_priv(dev)->port[i].rmap = NULL;
1351 }
1352 }
1353 #endif
1354 mlx4_free_irqs(dev);
1355
1356 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1357 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1358
1359 if (!mlx4_is_slave(dev))
1360 mlx4_unmap_clr_int(dev);
1361
1362 mlx4_unmap_uar(dev);
1363 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1364
1365 kfree(priv->eq_table.uar_map);
1366 }
1367
1368 /* A test that verifies that we can accept interrupts
1369 * on the vector allocated for asynchronous events
1370 */
1371 int mlx4_test_async(struct mlx4_dev *dev)
1372 {
1373 return mlx4_NOP(dev);
1374 }
1375 EXPORT_SYMBOL(mlx4_test_async);
1376
1377 /* A test that verifies that we can accept interrupts
1378 * on the given irq vector of the tested port.
1379 * Interrupts are checked using the NOP command.
1380 */
1381 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
1382 {
1383 struct mlx4_priv *priv = mlx4_priv(dev);
1384 int err;
1385
1386 /* Temporary use polling for command completions */
1387 mlx4_cmd_use_polling(dev);
1388
1389 /* Map the new eq to handle all asynchronous events */
1390 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1391 priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
1392 if (err) {
1393 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1394 goto out;
1395 }
1396
1397 /* Go back to using events */
1398 mlx4_cmd_use_events(dev);
1399 err = mlx4_NOP(dev);
1400
1401 /* Return to default */
1402 mlx4_cmd_use_polling(dev);
1403 out:
1404 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1405 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1406 mlx4_cmd_use_events(dev);
1407
1408 return err;
1409 }
1410 EXPORT_SYMBOL(mlx4_test_interrupt);
1411
1412 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
1413 {
1414 struct mlx4_priv *priv = mlx4_priv(dev);
1415
1416 vector = MLX4_CQ_TO_EQ_VECTOR(vector);
1417 if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
1418 (vector == MLX4_EQ_ASYNC))
1419 return false;
1420
1421 return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
1422 }
1423 EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
1424
1425 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
1426 {
1427 struct mlx4_priv *priv = mlx4_priv(dev);
1428 unsigned int i;
1429 unsigned int sum = 0;
1430
1431 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
1432 sum += !!test_bit(port - 1,
1433 priv->eq_table.eq[i].actv_ports.ports);
1434
1435 return sum;
1436 }
1437 EXPORT_SYMBOL(mlx4_get_eqs_per_port);
1438
1439 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
1440 {
1441 struct mlx4_priv *priv = mlx4_priv(dev);
1442
1443 vector = MLX4_CQ_TO_EQ_VECTOR(vector);
1444 if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
1445 return -EINVAL;
1446
1447 return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
1448 dev->caps.num_ports) > 1);
1449 }
1450 EXPORT_SYMBOL(mlx4_is_eq_shared);
1451
1452 struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
1453 {
1454 return mlx4_priv(dev)->port[port].rmap;
1455 }
1456 EXPORT_SYMBOL(mlx4_get_cpu_rmap);
1457
1458 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
1459 {
1460 struct mlx4_priv *priv = mlx4_priv(dev);
1461 int err = 0, i = 0;
1462 u32 min_ref_count_val = (u32)-1;
1463 int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
1464 int *prequested_vector = NULL;
1465
1466
1467 mutex_lock(&priv->msix_ctl.pool_lock);
1468 if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
1469 (requested_vector >= 0) &&
1470 (requested_vector != MLX4_EQ_ASYNC)) {
1471 if (test_bit(port - 1,
1472 priv->eq_table.eq[requested_vector].actv_ports.ports)) {
1473 prequested_vector = &requested_vector;
1474 } else {
1475 struct mlx4_eq *eq;
1476
1477 for (i = 1; i < port;
1478 requested_vector += mlx4_get_eqs_per_port(dev, i++))
1479 ;
1480
1481 eq = &priv->eq_table.eq[requested_vector];
1482 if (requested_vector < dev->caps.num_comp_vectors + 1 &&
1483 test_bit(port - 1, eq->actv_ports.ports)) {
1484 prequested_vector = &requested_vector;
1485 }
1486 }
1487 }
1488
1489 if (!prequested_vector) {
1490 requested_vector = -1;
1491 for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
1492 i++) {
1493 struct mlx4_eq *eq = &priv->eq_table.eq[i];
1494
1495 if (min_ref_count_val > eq->ref_count &&
1496 test_bit(port - 1, eq->actv_ports.ports)) {
1497 min_ref_count_val = eq->ref_count;
1498 requested_vector = i;
1499 }
1500 }
1501
1502 if (requested_vector < 0) {
1503 err = -ENOSPC;
1504 goto err_unlock;
1505 }
1506
1507 prequested_vector = &requested_vector;
1508 }
1509
1510 if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
1511 dev->flags & MLX4_FLAG_MSI_X) {
1512 set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
1513 snprintf(priv->eq_table.irq_names +
1514 *prequested_vector * MLX4_IRQNAME_SIZE,
1515 MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
1516 *prequested_vector, dev_name(&dev->persist->pdev->dev));
1517
1518 err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
1519 mlx4_msi_x_interrupt, 0,
1520 &priv->eq_table.irq_names[*prequested_vector << 5],
1521 priv->eq_table.eq + *prequested_vector);
1522
1523 if (err) {
1524 clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
1525 *prequested_vector = -1;
1526 } else {
1527 #if defined(CONFIG_SMP)
1528 mlx4_set_eq_affinity_hint(priv, *prequested_vector);
1529 #endif
1530 eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
1531 priv->eq_table.eq[*prequested_vector].have_irq = 1;
1532 }
1533 }
1534
1535 if (!err && *prequested_vector >= 0)
1536 priv->eq_table.eq[*prequested_vector].ref_count++;
1537
1538 err_unlock:
1539 mutex_unlock(&priv->msix_ctl.pool_lock);
1540
1541 if (!err && *prequested_vector >= 0)
1542 *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
1543 else
1544 *vector = 0;
1545
1546 return err;
1547 }
1548 EXPORT_SYMBOL(mlx4_assign_eq);
1549
1550 int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
1551 {
1552 struct mlx4_priv *priv = mlx4_priv(dev);
1553
1554 return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
1555 }
1556 EXPORT_SYMBOL(mlx4_eq_get_irq);
1557
1558 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1559 {
1560 struct mlx4_priv *priv = mlx4_priv(dev);
1561 int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
1562
1563 mutex_lock(&priv->msix_ctl.pool_lock);
1564 priv->eq_table.eq[eq_vec].ref_count--;
1565
1566 /* once we allocated EQ, we don't release it because it might be binded
1567 * to cpu_rmap.
1568 */
1569 mutex_unlock(&priv->msix_ctl.pool_lock);
1570 }
1571 EXPORT_SYMBOL(mlx4_release_eq);
1572