]>
Commit | Line | Data |
---|---|---|
225c7b1f | 1 | /* |
51a379d0 | 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
225c7b1f | 34 | #include <linux/interrupt.h> |
5a0e3ad6 | 35 | #include <linux/slab.h> |
ee40fa06 | 36 | #include <linux/export.h> |
27ac792c | 37 | #include <linux/mm.h> |
9cbe05c7 | 38 | #include <linux/dma-mapping.h> |
225c7b1f RD |
39 | |
40 | #include <linux/mlx4/cmd.h> | |
d9236c3f | 41 | #include <linux/cpu_rmap.h> |
225c7b1f RD |
42 | |
43 | #include "mlx4.h" | |
44 | #include "fw.h" | |
45 | ||
f5f5951c | 46 | enum { |
0b7ca5a9 | 47 | MLX4_IRQNAME_SIZE = 32 |
f5f5951c AB |
48 | }; |
49 | ||
225c7b1f RD |
50 | enum { |
51 | MLX4_NUM_ASYNC_EQE = 0x100, | |
52 | MLX4_NUM_SPARE_EQE = 0x80, | |
53 | MLX4_EQ_ENTRY_SIZE = 0x20 | |
54 | }; | |
55 | ||
225c7b1f RD |
56 | #define MLX4_EQ_STATUS_OK ( 0 << 28) |
57 | #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) | |
58 | #define MLX4_EQ_OWNER_SW ( 0 << 24) | |
59 | #define MLX4_EQ_OWNER_HW ( 1 << 24) | |
60 | #define MLX4_EQ_FLAG_EC ( 1 << 18) | |
61 | #define MLX4_EQ_FLAG_OI ( 1 << 17) | |
62 | #define MLX4_EQ_STATE_ARMED ( 9 << 8) | |
63 | #define MLX4_EQ_STATE_FIRED (10 << 8) | |
64 | #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) | |
65 | ||
66 | #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ | |
67 | (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ | |
68 | (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ | |
69 | (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ | |
70 | (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
71 | (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ | |
72 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
73 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
74 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
225c7b1f RD |
75 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
76 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | |
77 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | |
78 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | |
79 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | |
acba2420 | 80 | (1ull << MLX4_EVENT_TYPE_CMD) | \ |
fe6f700d | 81 | (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ |
acba2420 | 82 | (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ |
5984be90 JM |
83 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ |
84 | (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) | |
225c7b1f | 85 | |
00f5ce99 JM |
86 | static u64 get_async_ev_mask(struct mlx4_dev *dev) |
87 | { | |
88 | u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; | |
89 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) | |
90 | async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); | |
be6a6b43 JM |
91 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) |
92 | async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); | |
00f5ce99 JM |
93 | |
94 | return async_ev_mask; | |
95 | } | |
96 | ||
225c7b1f RD |
97 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
98 | { | |
99 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | | |
100 | req_not << 31), | |
101 | eq->doorbell); | |
102 | /* We still want ordering, just not swabbing, so add a barrier */ | |
103 | mb(); | |
104 | } | |
105 | ||
43c816c6 IS |
106 | static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, |
107 | u8 eqe_size) | |
225c7b1f | 108 | { |
08ff3235 | 109 | /* (entry & (eq->nent - 1)) gives us a cyclic array */ |
43c816c6 IS |
110 | unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; |
111 | /* CX3 is capable of extending the EQE from 32 to 64 bytes with | |
112 | * strides of 64B,128B and 256B. | |
113 | * When 64B EQE is used, the first (in the lower addresses) | |
08ff3235 OG |
114 | * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes |
115 | * contain the legacy EQE information. | |
43c816c6 | 116 | * In all other cases, the first 32B contains the legacy EQE info. |
08ff3235 OG |
117 | */ |
118 | return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; | |
225c7b1f RD |
119 | } |
120 | ||
43c816c6 | 121 | static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) |
225c7b1f | 122 | { |
43c816c6 | 123 | struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); |
225c7b1f RD |
124 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; |
125 | } | |
126 | ||
acba2420 JM |
127 | static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) |
128 | { | |
129 | struct mlx4_eqe *eqe = | |
130 | &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; | |
131 | return (!!(eqe->owner & 0x80) ^ | |
132 | !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? | |
133 | eqe : NULL; | |
134 | } | |
135 | ||
acba2420 JM |
136 | void mlx4_gen_slave_eqe(struct work_struct *work) |
137 | { | |
138 | struct mlx4_mfunc_master_ctx *master = | |
139 | container_of(work, struct mlx4_mfunc_master_ctx, | |
140 | slave_event_work); | |
141 | struct mlx4_mfunc *mfunc = | |
142 | container_of(master, struct mlx4_mfunc, master); | |
143 | struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); | |
144 | struct mlx4_dev *dev = &priv->dev; | |
145 | struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; | |
146 | struct mlx4_eqe *eqe; | |
147 | u8 slave; | |
74d4943f | 148 | int i, phys_port, slave_port; |
acba2420 JM |
149 | |
150 | for (eqe = next_slave_event_eqe(slave_eq); eqe; | |
151 | eqe = next_slave_event_eqe(slave_eq)) { | |
152 | slave = eqe->slave_id; | |
153 | ||
8d80d04a MS |
154 | if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && |
155 | eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && | |
156 | mlx4_is_bonded(dev)) { | |
157 | struct mlx4_port_cap port_cap; | |
158 | ||
159 | if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) | |
160 | goto consume; | |
161 | ||
162 | if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) | |
163 | goto consume; | |
164 | } | |
acba2420 JM |
165 | /* All active slaves need to receive the event */ |
166 | if (slave == ALL_SLAVES) { | |
bffb023a | 167 | for (i = 0; i <= dev->persist->num_vfs; i++) { |
74d4943f OG |
168 | phys_port = 0; |
169 | if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT && | |
170 | eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) { | |
171 | phys_port = eqe->event.port_mgmt_change.port; | |
172 | slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); | |
173 | if (slave_port < 0) /* VF doesn't have this port */ | |
174 | continue; | |
175 | eqe->event.port_mgmt_change.port = slave_port; | |
176 | } | |
bffb023a JM |
177 | if (mlx4_GEN_EQE(dev, i, eqe)) |
178 | mlx4_warn(dev, "Failed to generate event for slave %d\n", | |
179 | i); | |
74d4943f OG |
180 | if (phys_port) |
181 | eqe->event.port_mgmt_change.port = phys_port; | |
acba2420 JM |
182 | } |
183 | } else { | |
184 | if (mlx4_GEN_EQE(dev, slave, eqe)) | |
1a91de28 JP |
185 | mlx4_warn(dev, "Failed to generate event for slave %d\n", |
186 | slave); | |
acba2420 | 187 | } |
8d80d04a | 188 | consume: |
acba2420 JM |
189 | ++slave_eq->cons; |
190 | } | |
191 | } | |
192 | ||
193 | ||
194 | static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) | |
195 | { | |
196 | struct mlx4_priv *priv = mlx4_priv(dev); | |
197 | struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; | |
992e8e6e JM |
198 | struct mlx4_eqe *s_eqe; |
199 | unsigned long flags; | |
acba2420 | 200 | |
992e8e6e JM |
201 | spin_lock_irqsave(&slave_eq->event_lock, flags); |
202 | s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; | |
acba2420 JM |
203 | if ((!!(s_eqe->owner & 0x80)) ^ |
204 | (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { | |
1a91de28 JP |
205 | mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", |
206 | slave); | |
992e8e6e | 207 | spin_unlock_irqrestore(&slave_eq->event_lock, flags); |
acba2420 JM |
208 | return; |
209 | } | |
210 | ||
c02b0501 | 211 | memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); |
acba2420 JM |
212 | s_eqe->slave_id = slave; |
213 | /* ensure all information is written before setting the ownersip bit */ | |
12b3375f | 214 | dma_wmb(); |
acba2420 JM |
215 | s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; |
216 | ++slave_eq->prod; | |
217 | ||
218 | queue_work(priv->mfunc.master.comm_wq, | |
219 | &priv->mfunc.master.slave_event_work); | |
992e8e6e | 220 | spin_unlock_irqrestore(&slave_eq->event_lock, flags); |
acba2420 JM |
221 | } |
222 | ||
223 | static void mlx4_slave_event(struct mlx4_dev *dev, int slave, | |
224 | struct mlx4_eqe *eqe) | |
225 | { | |
226 | struct mlx4_priv *priv = mlx4_priv(dev); | |
acba2420 | 227 | |
bffb023a JM |
228 | if (slave < 0 || slave > dev->persist->num_vfs || |
229 | slave == dev->caps.function || | |
230 | !priv->mfunc.master.slave_state[slave].active) | |
acba2420 | 231 | return; |
acba2420 JM |
232 | |
233 | slave_event(dev, slave, eqe); | |
234 | } | |
235 | ||
db9777e3 | 236 | #if defined(CONFIG_SMP) |
de161803 IS |
237 | static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) |
238 | { | |
239 | int hint_err; | |
240 | struct mlx4_dev *dev = &priv->dev; | |
241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; | |
242 | ||
243 | if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) | |
244 | return; | |
245 | ||
246 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); | |
247 | if (hint_err) | |
248 | mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); | |
249 | } | |
db9777e3 | 250 | #endif |
de161803 | 251 | |
993c401e JM |
252 | int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) |
253 | { | |
254 | struct mlx4_eqe eqe; | |
255 | ||
256 | struct mlx4_priv *priv = mlx4_priv(dev); | |
257 | struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; | |
258 | ||
259 | if (!s_slave->active) | |
260 | return 0; | |
261 | ||
262 | memset(&eqe, 0, sizeof eqe); | |
263 | ||
264 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; | |
265 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; | |
74d4943f | 266 | eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); |
993c401e JM |
267 | |
268 | return mlx4_GEN_EQE(dev, slave, &eqe); | |
269 | } | |
270 | EXPORT_SYMBOL(mlx4_gen_pkey_eqe); | |
271 | ||
272 | int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) | |
273 | { | |
274 | struct mlx4_eqe eqe; | |
275 | ||
276 | /*don't send if we don't have the that slave */ | |
872bf2fb | 277 | if (dev->persist->num_vfs < slave) |
993c401e JM |
278 | return 0; |
279 | memset(&eqe, 0, sizeof eqe); | |
280 | ||
281 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; | |
282 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; | |
74d4943f | 283 | eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); |
993c401e JM |
284 | |
285 | return mlx4_GEN_EQE(dev, slave, &eqe); | |
286 | } | |
287 | EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); | |
288 | ||
289 | int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, | |
290 | u8 port_subtype_change) | |
291 | { | |
292 | struct mlx4_eqe eqe; | |
74d4943f | 293 | u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); |
993c401e JM |
294 | |
295 | /*don't send if we don't have the that slave */ | |
872bf2fb | 296 | if (dev->persist->num_vfs < slave) |
993c401e JM |
297 | return 0; |
298 | memset(&eqe, 0, sizeof eqe); | |
299 | ||
300 | eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; | |
301 | eqe.subtype = port_subtype_change; | |
74d4943f | 302 | eqe.event.port_change.port = cpu_to_be32(slave_port << 28); |
993c401e JM |
303 | |
304 | mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, | |
305 | port_subtype_change, slave, port); | |
306 | return mlx4_GEN_EQE(dev, slave, &eqe); | |
307 | } | |
308 | EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); | |
309 | ||
310 | enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) | |
311 | { | |
312 | struct mlx4_priv *priv = mlx4_priv(dev); | |
313 | struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; | |
449fc488 MB |
314 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
315 | ||
316 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || | |
317 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { | |
993c401e JM |
318 | pr_err("%s: Error: asking for slave:%d, port:%d\n", |
319 | __func__, slave, port); | |
320 | return SLAVE_PORT_DOWN; | |
321 | } | |
322 | return s_state[slave].port_state[port]; | |
323 | } | |
324 | EXPORT_SYMBOL(mlx4_get_slave_port_state); | |
325 | ||
326 | static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, | |
327 | enum slave_port_state state) | |
328 | { | |
329 | struct mlx4_priv *priv = mlx4_priv(dev); | |
330 | struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; | |
449fc488 | 331 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
993c401e | 332 | |
449fc488 MB |
333 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || |
334 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { | |
993c401e JM |
335 | pr_err("%s: Error: asking for slave:%d, port:%d\n", |
336 | __func__, slave, port); | |
337 | return -1; | |
338 | } | |
339 | s_state[slave].port_state[port] = state; | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) | |
345 | { | |
346 | int i; | |
347 | enum slave_port_gen_event gen_event; | |
449fc488 MB |
348 | struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, |
349 | port); | |
993c401e | 350 | |
872bf2fb | 351 | for (i = 0; i < dev->persist->num_vfs + 1; i++) |
449fc488 MB |
352 | if (test_bit(i, slaves_pport.slaves)) |
353 | set_and_calc_slave_port_state(dev, i, port, | |
354 | event, &gen_event); | |
993c401e JM |
355 | } |
356 | /************************************************************************** | |
357 | The function get as input the new event to that port, | |
358 | and according to the prev state change the slave's port state. | |
359 | The events are: | |
360 | MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, | |
361 | MLX4_PORT_STATE_DEV_EVENT_PORT_UP | |
362 | MLX4_PORT_STATE_IB_EVENT_GID_VALID | |
363 | MLX4_PORT_STATE_IB_EVENT_GID_INVALID | |
364 | ***************************************************************************/ | |
365 | int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, | |
366 | u8 port, int event, | |
367 | enum slave_port_gen_event *gen_event) | |
368 | { | |
369 | struct mlx4_priv *priv = mlx4_priv(dev); | |
370 | struct mlx4_slave_state *ctx = NULL; | |
371 | unsigned long flags; | |
372 | int ret = -1; | |
449fc488 | 373 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); |
993c401e JM |
374 | enum slave_port_state cur_state = |
375 | mlx4_get_slave_port_state(dev, slave, port); | |
376 | ||
377 | *gen_event = SLAVE_PORT_GEN_EVENT_NONE; | |
378 | ||
449fc488 MB |
379 | if (slave >= dev->num_slaves || port > dev->caps.num_ports || |
380 | port <= 0 || !test_bit(port - 1, actv_ports.ports)) { | |
993c401e JM |
381 | pr_err("%s: Error: asking for slave:%d, port:%d\n", |
382 | __func__, slave, port); | |
383 | return ret; | |
384 | } | |
385 | ||
386 | ctx = &priv->mfunc.master.slave_state[slave]; | |
387 | spin_lock_irqsave(&ctx->lock, flags); | |
388 | ||
993c401e JM |
389 | switch (cur_state) { |
390 | case SLAVE_PORT_DOWN: | |
391 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) | |
392 | mlx4_set_slave_port_state(dev, slave, port, | |
393 | SLAVE_PENDING_UP); | |
394 | break; | |
395 | case SLAVE_PENDING_UP: | |
396 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) | |
397 | mlx4_set_slave_port_state(dev, slave, port, | |
398 | SLAVE_PORT_DOWN); | |
399 | else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { | |
400 | mlx4_set_slave_port_state(dev, slave, port, | |
401 | SLAVE_PORT_UP); | |
402 | *gen_event = SLAVE_PORT_GEN_EVENT_UP; | |
403 | } | |
404 | break; | |
405 | case SLAVE_PORT_UP: | |
406 | if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { | |
407 | mlx4_set_slave_port_state(dev, slave, port, | |
408 | SLAVE_PORT_DOWN); | |
409 | *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; | |
410 | } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == | |
411 | event) { | |
412 | mlx4_set_slave_port_state(dev, slave, port, | |
413 | SLAVE_PENDING_UP); | |
414 | *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; | |
415 | } | |
416 | break; | |
417 | default: | |
1a91de28 JP |
418 | pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", |
419 | __func__, slave, port); | |
420 | goto out; | |
993c401e JM |
421 | } |
422 | ret = mlx4_get_slave_port_state(dev, slave, port); | |
993c401e JM |
423 | |
424 | out: | |
425 | spin_unlock_irqrestore(&ctx->lock, flags); | |
426 | return ret; | |
427 | } | |
428 | ||
429 | EXPORT_SYMBOL(set_and_calc_slave_port_state); | |
430 | ||
431 | int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) | |
432 | { | |
433 | struct mlx4_eqe eqe; | |
434 | ||
435 | memset(&eqe, 0, sizeof eqe); | |
436 | ||
437 | eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; | |
438 | eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; | |
439 | eqe.event.port_mgmt_change.port = port; | |
440 | eqe.event.port_mgmt_change.params.port_info.changed_attr = | |
441 | cpu_to_be32((u32) attr); | |
442 | ||
443 | slave_event(dev, ALL_SLAVES, &eqe); | |
444 | return 0; | |
445 | } | |
446 | EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); | |
447 | ||
acba2420 JM |
448 | void mlx4_master_handle_slave_flr(struct work_struct *work) |
449 | { | |
450 | struct mlx4_mfunc_master_ctx *master = | |
451 | container_of(work, struct mlx4_mfunc_master_ctx, | |
452 | slave_flr_event_work); | |
453 | struct mlx4_mfunc *mfunc = | |
454 | container_of(master, struct mlx4_mfunc, master); | |
455 | struct mlx4_priv *priv = | |
456 | container_of(mfunc, struct mlx4_priv, mfunc); | |
457 | struct mlx4_dev *dev = &priv->dev; | |
458 | struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; | |
459 | int i; | |
460 | int err; | |
311f813a | 461 | unsigned long flags; |
acba2420 JM |
462 | |
463 | mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); | |
464 | ||
465 | for (i = 0 ; i < dev->num_slaves; i++) { | |
466 | ||
467 | if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { | |
1a91de28 JP |
468 | mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", |
469 | i); | |
55ad3592 YH |
470 | /* In case of 'Reset flow' FLR can be generated for |
471 | * a slave before mlx4_load_one is done. | |
472 | * make sure interface is up before trying to delete | |
473 | * slave resources which weren't allocated yet. | |
474 | */ | |
475 | if (dev->persist->interface_state & | |
476 | MLX4_INTERFACE_STATE_UP) | |
477 | mlx4_delete_all_resources_for_slave(dev, i); | |
acba2420 | 478 | /*return the slave to running mode*/ |
311f813a | 479 | spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); |
acba2420 JM |
480 | slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; |
481 | slave_state[i].is_slave_going_down = 0; | |
311f813a | 482 | spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); |
acba2420 JM |
483 | /*notify the FW:*/ |
484 | err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, | |
485 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); | |
486 | if (err) | |
1a91de28 JP |
487 | mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", |
488 | i); | |
acba2420 JM |
489 | } |
490 | } | |
491 | } | |
492 | ||
225c7b1f RD |
493 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
494 | { | |
acba2420 | 495 | struct mlx4_priv *priv = mlx4_priv(dev); |
225c7b1f | 496 | struct mlx4_eqe *eqe; |
3dca0f42 | 497 | int cqn = -1; |
225c7b1f RD |
498 | int eqes_found = 0; |
499 | int set_ci = 0; | |
27bf91d6 | 500 | int port; |
acba2420 JM |
501 | int slave = 0; |
502 | int ret; | |
503 | u32 flr_slave; | |
504 | u8 update_slave_state; | |
505 | int i; | |
993c401e | 506 | enum slave_port_gen_event gen_event; |
311f813a | 507 | unsigned long flags; |
948e306d | 508 | struct mlx4_vport_state *s_info; |
43c816c6 | 509 | int eqe_size = dev->caps.eqe_size; |
225c7b1f | 510 | |
43c816c6 | 511 | while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { |
225c7b1f RD |
512 | /* |
513 | * Make sure we read EQ entry contents after we've | |
514 | * checked the ownership bit. | |
515 | */ | |
12b3375f | 516 | dma_rmb(); |
225c7b1f RD |
517 | |
518 | switch (eqe->type) { | |
519 | case MLX4_EVENT_TYPE_COMP: | |
520 | cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | |
521 | mlx4_cq_completion(dev, cqn); | |
522 | break; | |
523 | ||
524 | case MLX4_EVENT_TYPE_PATH_MIG: | |
525 | case MLX4_EVENT_TYPE_COMM_EST: | |
526 | case MLX4_EVENT_TYPE_SQ_DRAINED: | |
527 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: | |
528 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: | |
529 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | |
530 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
531 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | |
acba2420 JM |
532 | mlx4_dbg(dev, "event %d arrived\n", eqe->type); |
533 | if (mlx4_is_master(dev)) { | |
534 | /* forward only to slave owning the QP */ | |
535 | ret = mlx4_get_slave_from_resource_id(dev, | |
536 | RES_QP, | |
537 | be32_to_cpu(eqe->event.qp.qpn) | |
538 | & 0xffffff, &slave); | |
539 | if (ret && ret != -ENOENT) { | |
1a91de28 | 540 | mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", |
acba2420 JM |
541 | eqe->type, eqe->subtype, |
542 | eq->eqn, eq->cons_index, ret); | |
543 | break; | |
544 | } | |
545 | ||
546 | if (!ret && slave != dev->caps.function) { | |
547 | mlx4_slave_event(dev, slave, eqe); | |
548 | break; | |
549 | } | |
550 | ||
551 | } | |
552 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & | |
553 | 0xffffff, eqe->type); | |
225c7b1f RD |
554 | break; |
555 | ||
556 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | |
e0debf9c JM |
557 | mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", |
558 | __func__); | |
225c7b1f | 559 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: |
acba2420 JM |
560 | if (mlx4_is_master(dev)) { |
561 | /* forward only to slave owning the SRQ */ | |
562 | ret = mlx4_get_slave_from_resource_id(dev, | |
563 | RES_SRQ, | |
564 | be32_to_cpu(eqe->event.srq.srqn) | |
565 | & 0xffffff, | |
566 | &slave); | |
567 | if (ret && ret != -ENOENT) { | |
1a91de28 | 568 | mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", |
acba2420 JM |
569 | eqe->type, eqe->subtype, |
570 | eq->eqn, eq->cons_index, ret); | |
571 | break; | |
572 | } | |
1a91de28 JP |
573 | mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", |
574 | __func__, slave, | |
acba2420 JM |
575 | be32_to_cpu(eqe->event.srq.srqn), |
576 | eqe->type, eqe->subtype); | |
577 | ||
578 | if (!ret && slave != dev->caps.function) { | |
1a91de28 JP |
579 | mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", |
580 | __func__, eqe->type, | |
acba2420 JM |
581 | eqe->subtype, slave); |
582 | mlx4_slave_event(dev, slave, eqe); | |
583 | break; | |
584 | } | |
585 | } | |
586 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & | |
587 | 0xffffff, eqe->type); | |
225c7b1f RD |
588 | break; |
589 | ||
590 | case MLX4_EVENT_TYPE_CMD: | |
591 | mlx4_cmd_event(dev, | |
592 | be16_to_cpu(eqe->event.cmd.token), | |
593 | eqe->event.cmd.status, | |
594 | be64_to_cpu(eqe->event.cmd.out_param)); | |
595 | break; | |
596 | ||
449fc488 MB |
597 | case MLX4_EVENT_TYPE_PORT_CHANGE: { |
598 | struct mlx4_slaves_pport slaves_port; | |
27bf91d6 | 599 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; |
449fc488 | 600 | slaves_port = mlx4_phys_to_slaves_pport(dev, port); |
27bf91d6 | 601 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { |
993c401e | 602 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, |
27bf91d6 YP |
603 | port); |
604 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; | |
993c401e JM |
605 | if (!mlx4_is_master(dev)) |
606 | break; | |
872bf2fb YH |
607 | for (i = 0; i < dev->persist->num_vfs + 1; |
608 | i++) { | |
8d80d04a MS |
609 | int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); |
610 | ||
611 | if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) | |
449fc488 | 612 | continue; |
993c401e JM |
613 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { |
614 | if (i == mlx4_master_func_num(dev)) | |
615 | continue; | |
1a91de28 | 616 | mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", |
acba2420 | 617 | __func__, i, port); |
1c1bf349 | 618 | s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
449fc488 MB |
619 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
620 | eqe->event.port_change.port = | |
621 | cpu_to_be32( | |
622 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) | |
8d80d04a | 623 | | (reported_port << 28)); |
948e306d | 624 | mlx4_slave_event(dev, i, eqe); |
449fc488 | 625 | } |
993c401e JM |
626 | } else { /* IB port */ |
627 | set_and_calc_slave_port_state(dev, i, port, | |
628 | MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, | |
629 | &gen_event); | |
630 | /*we can be in pending state, then do not send port_down event*/ | |
631 | if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { | |
632 | if (i == mlx4_master_func_num(dev)) | |
633 | continue; | |
74d4943f OG |
634 | eqe->event.port_change.port = |
635 | cpu_to_be32( | |
636 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) | |
637 | | (mlx4_phys_to_slave_port(dev, i, port) << 28)); | |
993c401e JM |
638 | mlx4_slave_event(dev, i, eqe); |
639 | } | |
acba2420 | 640 | } |
993c401e | 641 | } |
27bf91d6 | 642 | } else { |
993c401e JM |
643 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); |
644 | ||
27bf91d6 | 645 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; |
acba2420 | 646 | |
993c401e JM |
647 | if (!mlx4_is_master(dev)) |
648 | break; | |
649 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | |
872bf2fb YH |
650 | for (i = 0; |
651 | i < dev->persist->num_vfs + 1; | |
652 | i++) { | |
8d80d04a MS |
653 | int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); |
654 | ||
655 | if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) | |
449fc488 | 656 | continue; |
993c401e | 657 | if (i == mlx4_master_func_num(dev)) |
acba2420 | 658 | continue; |
1c1bf349 | 659 | s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
449fc488 MB |
660 | if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { |
661 | eqe->event.port_change.port = | |
662 | cpu_to_be32( | |
663 | (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) | |
8d80d04a | 664 | | (reported_port << 28)); |
948e306d | 665 | mlx4_slave_event(dev, i, eqe); |
449fc488 | 666 | } |
acba2420 | 667 | } |
993c401e JM |
668 | else /* IB port */ |
669 | /* port-up event will be sent to a slave when the | |
670 | * slave's alias-guid is set. This is done in alias_GUID.c | |
671 | */ | |
672 | set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); | |
27bf91d6 | 673 | } |
225c7b1f | 674 | break; |
449fc488 | 675 | } |
225c7b1f RD |
676 | |
677 | case MLX4_EVENT_TYPE_CQ_ERROR: | |
678 | mlx4_warn(dev, "CQ %s on CQN %06x\n", | |
679 | eqe->event.cq_err.syndrome == 1 ? | |
680 | "overrun" : "access violation", | |
681 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); | |
acba2420 JM |
682 | if (mlx4_is_master(dev)) { |
683 | ret = mlx4_get_slave_from_resource_id(dev, | |
684 | RES_CQ, | |
685 | be32_to_cpu(eqe->event.cq_err.cqn) | |
686 | & 0xffffff, &slave); | |
687 | if (ret && ret != -ENOENT) { | |
1a91de28 JP |
688 | mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", |
689 | eqe->type, eqe->subtype, | |
690 | eq->eqn, eq->cons_index, ret); | |
acba2420 JM |
691 | break; |
692 | } | |
693 | ||
694 | if (!ret && slave != dev->caps.function) { | |
695 | mlx4_slave_event(dev, slave, eqe); | |
696 | break; | |
697 | } | |
698 | } | |
699 | mlx4_cq_event(dev, | |
700 | be32_to_cpu(eqe->event.cq_err.cqn) | |
701 | & 0xffffff, | |
225c7b1f RD |
702 | eqe->type); |
703 | break; | |
704 | ||
705 | case MLX4_EVENT_TYPE_EQ_OVERFLOW: | |
706 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | |
707 | break; | |
708 | ||
fe6f700d YP |
709 | case MLX4_EVENT_TYPE_OP_REQUIRED: |
710 | atomic_inc(&priv->opreq_count); | |
711 | /* FW commands can't be executed from interrupt context | |
712 | * working in deferred task | |
713 | */ | |
714 | queue_work(mlx4_wq, &priv->opreq_task); | |
715 | break; | |
716 | ||
acba2420 JM |
717 | case MLX4_EVENT_TYPE_COMM_CHANNEL: |
718 | if (!mlx4_is_master(dev)) { | |
1a91de28 | 719 | mlx4_warn(dev, "Received comm channel event for non master device\n"); |
acba2420 JM |
720 | break; |
721 | } | |
722 | memcpy(&priv->mfunc.master.comm_arm_bit_vector, | |
723 | eqe->event.comm_channel_arm.bit_vec, | |
724 | sizeof eqe->event.comm_channel_arm.bit_vec); | |
725 | queue_work(priv->mfunc.master.comm_wq, | |
726 | &priv->mfunc.master.comm_work); | |
727 | break; | |
728 | ||
729 | case MLX4_EVENT_TYPE_FLR_EVENT: | |
730 | flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); | |
731 | if (!mlx4_is_master(dev)) { | |
1a91de28 | 732 | mlx4_warn(dev, "Non-master function received FLR event\n"); |
acba2420 JM |
733 | break; |
734 | } | |
735 | ||
736 | mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); | |
737 | ||
30f7c73b | 738 | if (flr_slave >= dev->num_slaves) { |
acba2420 JM |
739 | mlx4_warn(dev, |
740 | "Got FLR for unknown function: %d\n", | |
741 | flr_slave); | |
742 | update_slave_state = 0; | |
743 | } else | |
744 | update_slave_state = 1; | |
745 | ||
311f813a | 746 | spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); |
acba2420 JM |
747 | if (update_slave_state) { |
748 | priv->mfunc.master.slave_state[flr_slave].active = false; | |
749 | priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; | |
750 | priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; | |
751 | } | |
311f813a | 752 | spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); |
a0667a83 YH |
753 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, |
754 | flr_slave); | |
acba2420 JM |
755 | queue_work(priv->mfunc.master.comm_wq, |
756 | &priv->mfunc.master.slave_flr_event_work); | |
757 | break; | |
5984be90 JM |
758 | |
759 | case MLX4_EVENT_TYPE_FATAL_WARNING: | |
760 | if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { | |
761 | if (mlx4_is_master(dev)) | |
762 | for (i = 0; i < dev->num_slaves; i++) { | |
1a91de28 JP |
763 | mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", |
764 | __func__, i); | |
5984be90 JM |
765 | if (i == dev->caps.function) |
766 | continue; | |
767 | mlx4_slave_event(dev, i, eqe); | |
768 | } | |
1a91de28 JP |
769 | mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", |
770 | be16_to_cpu(eqe->event.warming.warning_threshold), | |
771 | be16_to_cpu(eqe->event.warming.current_temperature)); | |
5984be90 | 772 | } else |
1a91de28 | 773 | mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", |
5984be90 JM |
774 | eqe->type, eqe->subtype, eq->eqn, |
775 | eq->cons_index, eqe->owner, eq->nent, | |
776 | eqe->slave_id, | |
777 | !!(eqe->owner & 0x80) ^ | |
778 | !!(eq->cons_index & eq->nent) ? "HW" : "SW"); | |
779 | ||
780 | break; | |
781 | ||
00f5ce99 JM |
782 | case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: |
783 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, | |
784 | (unsigned long) eqe); | |
785 | break; | |
786 | ||
be6a6b43 JM |
787 | case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: |
788 | switch (eqe->subtype) { | |
789 | case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: | |
790 | mlx4_warn(dev, "Bad cable detected on port %u\n", | |
791 | eqe->event.bad_cable.port); | |
792 | break; | |
793 | case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: | |
794 | mlx4_warn(dev, "Unsupported cable detected\n"); | |
795 | break; | |
796 | default: | |
797 | mlx4_dbg(dev, | |
798 | "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", | |
799 | eqe->type, eqe->subtype, eq->eqn, | |
800 | eq->cons_index, eqe->owner, eq->nent, | |
801 | !!(eqe->owner & 0x80) ^ | |
802 | !!(eq->cons_index & eq->nent) ? "HW" : "SW"); | |
803 | break; | |
804 | } | |
805 | break; | |
806 | ||
225c7b1f RD |
807 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: |
808 | case MLX4_EVENT_TYPE_ECC_DETECT: | |
809 | default: | |
1a91de28 | 810 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", |
acba2420 JM |
811 | eqe->type, eqe->subtype, eq->eqn, |
812 | eq->cons_index, eqe->owner, eq->nent, | |
813 | eqe->slave_id, | |
814 | !!(eqe->owner & 0x80) ^ | |
815 | !!(eq->cons_index & eq->nent) ? "HW" : "SW"); | |
225c7b1f | 816 | break; |
acba2420 | 817 | }; |
225c7b1f RD |
818 | |
819 | ++eq->cons_index; | |
820 | eqes_found = 1; | |
821 | ++set_ci; | |
822 | ||
823 | /* | |
824 | * The HCA will think the queue has overflowed if we | |
825 | * don't tell it we've been processing events. We | |
826 | * create our EQs with MLX4_NUM_SPARE_EQE extra | |
827 | * entries, so we must update our consumer index at | |
828 | * least that often. | |
829 | */ | |
830 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { | |
225c7b1f RD |
831 | eq_set_ci(eq, 0); |
832 | set_ci = 0; | |
833 | } | |
834 | } | |
835 | ||
836 | eq_set_ci(eq, 1); | |
837 | ||
3dca0f42 MB |
838 | /* cqn is 24bit wide but is initialized such that its higher bits |
839 | * are ones too. Thus, if we got any event, cqn's high bits should be off | |
840 | * and we need to schedule the tasklet. | |
841 | */ | |
842 | if (!(cqn & ~0xffffff)) | |
843 | tasklet_schedule(&eq->tasklet_ctx.task); | |
844 | ||
225c7b1f RD |
845 | return eqes_found; |
846 | } | |
847 | ||
848 | static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |
849 | { | |
850 | struct mlx4_dev *dev = dev_ptr; | |
851 | struct mlx4_priv *priv = mlx4_priv(dev); | |
852 | int work = 0; | |
853 | int i; | |
854 | ||
855 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | |
856 | ||
b8dd786f | 857 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
225c7b1f RD |
858 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
859 | ||
860 | return IRQ_RETVAL(work); | |
861 | } | |
862 | ||
863 | static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |
864 | { | |
865 | struct mlx4_eq *eq = eq_ptr; | |
866 | struct mlx4_dev *dev = eq->dev; | |
867 | ||
868 | mlx4_eq_int(dev, eq); | |
869 | ||
870 | /* MSI-X vectors always belong to us */ | |
871 | return IRQ_HANDLED; | |
872 | } | |
873 | ||
acba2420 JM |
874 | int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, |
875 | struct mlx4_vhcr *vhcr, | |
876 | struct mlx4_cmd_mailbox *inbox, | |
877 | struct mlx4_cmd_mailbox *outbox, | |
878 | struct mlx4_cmd_info *cmd) | |
879 | { | |
880 | struct mlx4_priv *priv = mlx4_priv(dev); | |
881 | struct mlx4_slave_event_eq_info *event_eq = | |
803143fb | 882 | priv->mfunc.master.slave_state[slave].event_eq; |
acba2420 | 883 | u32 in_modifier = vhcr->in_modifier; |
c101c81b | 884 | u32 eqn = in_modifier & 0x3FF; |
acba2420 JM |
885 | u64 in_param = vhcr->in_param; |
886 | int err = 0; | |
803143fb | 887 | int i; |
acba2420 JM |
888 | |
889 | if (slave == dev->caps.function) | |
890 | err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, | |
891 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, | |
892 | MLX4_CMD_NATIVE); | |
803143fb MA |
893 | if (!err) |
894 | for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) | |
895 | if (in_param & (1LL << i)) | |
896 | event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; | |
897 | ||
acba2420 JM |
898 | return err; |
899 | } | |
900 | ||
225c7b1f RD |
901 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
902 | int eq_num) | |
903 | { | |
904 | return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, | |
f9baff50 JM |
905 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, |
906 | MLX4_CMD_WRAPPED); | |
225c7b1f RD |
907 | } |
908 | ||
909 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
910 | int eq_num) | |
911 | { | |
eb41049f | 912 | return mlx4_cmd(dev, mailbox->dma, eq_num, 0, |
acba2420 | 913 | MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, |
f9baff50 | 914 | MLX4_CMD_WRAPPED); |
225c7b1f RD |
915 | } |
916 | ||
30a5da5b | 917 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) |
225c7b1f | 918 | { |
30a5da5b JM |
919 | return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, |
920 | MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); | |
225c7b1f RD |
921 | } |
922 | ||
b8dd786f YP |
923 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) |
924 | { | |
925 | /* | |
926 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs | |
927 | * we need to map, take the difference of highest index and | |
928 | * the lowest index we'll use and add 1. | |
929 | */ | |
c66fa19c MB |
930 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - |
931 | dev->caps.reserved_eqs / 4 + 1; | |
b8dd786f YP |
932 | } |
933 | ||
3d73c288 | 934 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
225c7b1f RD |
935 | { |
936 | struct mlx4_priv *priv = mlx4_priv(dev); | |
937 | int index; | |
938 | ||
939 | index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; | |
940 | ||
941 | if (!priv->eq_table.uar_map[index]) { | |
942 | priv->eq_table.uar_map[index] = | |
85743f1e HN |
943 | ioremap( |
944 | pci_resource_start(dev->persist->pdev, 2) + | |
945 | ((eq->eqn / 4) << (dev->uar_page_shift)), | |
946 | (1 << (dev->uar_page_shift))); | |
225c7b1f RD |
947 | if (!priv->eq_table.uar_map[index]) { |
948 | mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", | |
949 | eq->eqn); | |
950 | return NULL; | |
951 | } | |
952 | } | |
953 | ||
954 | return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); | |
955 | } | |
956 | ||
bfc0d8c3 DB |
957 | static void mlx4_unmap_uar(struct mlx4_dev *dev) |
958 | { | |
959 | struct mlx4_priv *priv = mlx4_priv(dev); | |
960 | int i; | |
961 | ||
962 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) | |
963 | if (priv->eq_table.uar_map[i]) { | |
964 | iounmap(priv->eq_table.uar_map[i]); | |
965 | priv->eq_table.uar_map[i] = NULL; | |
966 | } | |
967 | } | |
968 | ||
3d73c288 RD |
969 | static int mlx4_create_eq(struct mlx4_dev *dev, int nent, |
970 | u8 intr, struct mlx4_eq *eq) | |
225c7b1f RD |
971 | { |
972 | struct mlx4_priv *priv = mlx4_priv(dev); | |
973 | struct mlx4_cmd_mailbox *mailbox; | |
974 | struct mlx4_eq_context *eq_context; | |
975 | int npages; | |
976 | u64 *dma_list = NULL; | |
977 | dma_addr_t t; | |
978 | u64 mtt_addr; | |
979 | int err = -ENOMEM; | |
980 | int i; | |
981 | ||
982 | eq->dev = dev; | |
983 | eq->nent = roundup_pow_of_two(max(nent, 2)); | |
43c816c6 IS |
984 | /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with |
985 | * strides of 64B,128B and 256B. | |
986 | */ | |
987 | npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; | |
225c7b1f RD |
988 | |
989 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | |
990 | GFP_KERNEL); | |
991 | if (!eq->page_list) | |
992 | goto err_out; | |
993 | ||
994 | for (i = 0; i < npages; ++i) | |
995 | eq->page_list[i].buf = NULL; | |
996 | ||
997 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | |
998 | if (!dma_list) | |
999 | goto err_out_free; | |
1000 | ||
1001 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
1002 | if (IS_ERR(mailbox)) | |
1003 | goto err_out_free; | |
1004 | eq_context = mailbox->buf; | |
1005 | ||
1006 | for (i = 0; i < npages; ++i) { | |
872bf2fb YH |
1007 | eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> |
1008 | pdev->dev, | |
1009 | PAGE_SIZE, &t, | |
1010 | GFP_KERNEL); | |
225c7b1f RD |
1011 | if (!eq->page_list[i].buf) |
1012 | goto err_out_free_pages; | |
1013 | ||
1014 | dma_list[i] = t; | |
1015 | eq->page_list[i].map = t; | |
1016 | ||
1017 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | |
1018 | } | |
1019 | ||
1020 | eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); | |
1021 | if (eq->eqn == -1) | |
1022 | goto err_out_free_pages; | |
1023 | ||
1024 | eq->doorbell = mlx4_get_eq_uar(dev, eq); | |
1025 | if (!eq->doorbell) { | |
1026 | err = -ENOMEM; | |
1027 | goto err_out_free_eq; | |
1028 | } | |
1029 | ||
1030 | err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); | |
1031 | if (err) | |
1032 | goto err_out_free_eq; | |
1033 | ||
1034 | err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); | |
1035 | if (err) | |
1036 | goto err_out_free_mtt; | |
1037 | ||
225c7b1f RD |
1038 | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | |
1039 | MLX4_EQ_STATE_ARMED); | |
1040 | eq_context->log_eq_size = ilog2(eq->nent); | |
1041 | eq_context->intr = intr; | |
1042 | eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; | |
1043 | ||
1044 | mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); | |
1045 | eq_context->mtt_base_addr_h = mtt_addr >> 32; | |
1046 | eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
1047 | ||
1048 | err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); | |
1049 | if (err) { | |
1050 | mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); | |
1051 | goto err_out_free_mtt; | |
1052 | } | |
1053 | ||
1054 | kfree(dma_list); | |
1055 | mlx4_free_cmd_mailbox(dev, mailbox); | |
1056 | ||
1057 | eq->cons_index = 0; | |
1058 | ||
3dca0f42 MB |
1059 | INIT_LIST_HEAD(&eq->tasklet_ctx.list); |
1060 | INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); | |
1061 | spin_lock_init(&eq->tasklet_ctx.lock); | |
1062 | tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, | |
1063 | (unsigned long)&eq->tasklet_ctx); | |
1064 | ||
225c7b1f RD |
1065 | return err; |
1066 | ||
1067 | err_out_free_mtt: | |
1068 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
1069 | ||
1070 | err_out_free_eq: | |
7c6d74d2 | 1071 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); |
225c7b1f RD |
1072 | |
1073 | err_out_free_pages: | |
1074 | for (i = 0; i < npages; ++i) | |
1075 | if (eq->page_list[i].buf) | |
872bf2fb | 1076 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
225c7b1f RD |
1077 | eq->page_list[i].buf, |
1078 | eq->page_list[i].map); | |
1079 | ||
1080 | mlx4_free_cmd_mailbox(dev, mailbox); | |
1081 | ||
1082 | err_out_free: | |
1083 | kfree(eq->page_list); | |
1084 | kfree(dma_list); | |
1085 | ||
1086 | err_out: | |
1087 | return err; | |
1088 | } | |
1089 | ||
1090 | static void mlx4_free_eq(struct mlx4_dev *dev, | |
1091 | struct mlx4_eq *eq) | |
1092 | { | |
1093 | struct mlx4_priv *priv = mlx4_priv(dev); | |
225c7b1f | 1094 | int err; |
225c7b1f | 1095 | int i; |
43c816c6 IS |
1096 | /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with |
1097 | * strides of 64B,128B and 256B | |
1098 | */ | |
1099 | int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; | |
225c7b1f | 1100 | |
30a5da5b | 1101 | err = mlx4_HW2SW_EQ(dev, eq->eqn); |
225c7b1f RD |
1102 | if (err) |
1103 | mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); | |
1104 | ||
bf1bac5b | 1105 | synchronize_irq(eq->irq); |
3dca0f42 | 1106 | tasklet_disable(&eq->tasklet_ctx.task); |
225c7b1f RD |
1107 | |
1108 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
1109 | for (i = 0; i < npages; ++i) | |
872bf2fb YH |
1110 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
1111 | eq->page_list[i].buf, | |
1112 | eq->page_list[i].map); | |
225c7b1f RD |
1113 | |
1114 | kfree(eq->page_list); | |
7c6d74d2 | 1115 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); |
225c7b1f RD |
1116 | } |
1117 | ||
1118 | static void mlx4_free_irqs(struct mlx4_dev *dev) | |
1119 | { | |
1120 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | |
c66fa19c | 1121 | int i; |
225c7b1f RD |
1122 | |
1123 | if (eq_table->have_irq) | |
872bf2fb | 1124 | free_irq(dev->persist->pdev->irq, dev); |
0b7ca5a9 | 1125 | |
b8dd786f | 1126 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
d1fdf24b | 1127 | if (eq_table->eq[i].have_irq) { |
de161803 IS |
1128 | free_cpumask_var(eq_table->eq[i].affinity_mask); |
1129 | #if defined(CONFIG_SMP) | |
1130 | irq_set_affinity_hint(eq_table->eq[i].irq, NULL); | |
1131 | #endif | |
225c7b1f | 1132 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
d1fdf24b RD |
1133 | eq_table->eq[i].have_irq = 0; |
1134 | } | |
b8dd786f YP |
1135 | |
1136 | kfree(eq_table->irq_names); | |
225c7b1f RD |
1137 | } |
1138 | ||
3d73c288 | 1139 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
225c7b1f RD |
1140 | { |
1141 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1142 | ||
872bf2fb YH |
1143 | priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, |
1144 | priv->fw.clr_int_bar) + | |
225c7b1f RD |
1145 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); |
1146 | if (!priv->clr_base) { | |
1a91de28 | 1147 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); |
225c7b1f RD |
1148 | return -ENOMEM; |
1149 | } | |
1150 | ||
1151 | return 0; | |
1152 | } | |
1153 | ||
1154 | static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |
1155 | { | |
1156 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1157 | ||
1158 | iounmap(priv->clr_base); | |
1159 | } | |
1160 | ||
b8dd786f YP |
1161 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
1162 | { | |
1163 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1164 | ||
1165 | priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, | |
1166 | sizeof *priv->eq_table.eq, GFP_KERNEL); | |
1167 | if (!priv->eq_table.eq) | |
1168 | return -ENOMEM; | |
1169 | ||
1170 | return 0; | |
1171 | } | |
1172 | ||
1173 | void mlx4_free_eq_table(struct mlx4_dev *dev) | |
1174 | { | |
1175 | kfree(mlx4_priv(dev)->eq_table.eq); | |
1176 | } | |
1177 | ||
3d73c288 | 1178 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
225c7b1f RD |
1179 | { |
1180 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1181 | int err; | |
1182 | int i; | |
1183 | ||
758ff235 AL |
1184 | priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), |
1185 | sizeof *priv->eq_table.uar_map, | |
1186 | GFP_KERNEL); | |
b8dd786f YP |
1187 | if (!priv->eq_table.uar_map) { |
1188 | err = -ENOMEM; | |
1189 | goto err_out_free; | |
1190 | } | |
1191 | ||
7ae0e400 MB |
1192 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, |
1193 | roundup_pow_of_two(dev->caps.num_eqs), | |
1194 | dev->caps.num_eqs - 1, | |
1195 | dev->caps.reserved_eqs, | |
1196 | roundup_pow_of_two(dev->caps.num_eqs) - | |
1197 | dev->caps.num_eqs); | |
225c7b1f | 1198 | if (err) |
b8dd786f | 1199 | goto err_out_free; |
225c7b1f | 1200 | |
b8dd786f | 1201 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
225c7b1f RD |
1202 | priv->eq_table.uar_map[i] = NULL; |
1203 | ||
acba2420 JM |
1204 | if (!mlx4_is_slave(dev)) { |
1205 | err = mlx4_map_clr_int(dev); | |
1206 | if (err) | |
1207 | goto err_out_bitmap; | |
225c7b1f | 1208 | |
acba2420 JM |
1209 | priv->eq_table.clr_mask = |
1210 | swab32(1 << (priv->eq_table.inta_pin & 31)); | |
1211 | priv->eq_table.clr_int = priv->clr_base + | |
1212 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | |
1213 | } | |
225c7b1f | 1214 | |
f5f5951c | 1215 | priv->eq_table.irq_names = |
c66fa19c | 1216 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), |
f5f5951c | 1217 | GFP_KERNEL); |
b8dd786f YP |
1218 | if (!priv->eq_table.irq_names) { |
1219 | err = -ENOMEM; | |
c66fa19c | 1220 | goto err_out_clr_int; |
b8dd786f YP |
1221 | } |
1222 | ||
c66fa19c MB |
1223 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
1224 | if (i == MLX4_EQ_ASYNC) { | |
1225 | err = mlx4_create_eq(dev, | |
1226 | MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | |
1227 | 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | |
1228 | } else { | |
c66fa19c | 1229 | struct mlx4_eq *eq = &priv->eq_table.eq[i]; |
db9777e3 | 1230 | #ifdef CONFIG_RFS_ACCEL |
c66fa19c MB |
1231 | int port = find_first_bit(eq->actv_ports.ports, |
1232 | dev->caps.num_ports) + 1; | |
1233 | ||
1234 | if (port <= dev->caps.num_ports) { | |
1235 | struct mlx4_port_info *info = | |
1236 | &mlx4_priv(dev)->port[port]; | |
1237 | ||
1238 | if (!info->rmap) { | |
1239 | info->rmap = alloc_irq_cpu_rmap( | |
1240 | mlx4_get_eqs_per_port(dev, port)); | |
1241 | if (!info->rmap) { | |
1242 | mlx4_warn(dev, "Failed to allocate cpu rmap\n"); | |
1243 | err = -ENOMEM; | |
1244 | goto err_out_unmap; | |
1245 | } | |
1246 | } | |
0b7ca5a9 | 1247 | |
c66fa19c MB |
1248 | err = irq_cpu_rmap_add( |
1249 | info->rmap, eq->irq); | |
1250 | if (err) | |
1251 | mlx4_warn(dev, "Failed adding irq rmap\n"); | |
1252 | } | |
1253 | #endif | |
1254 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | |
1255 | dev->caps.reserved_cqs + | |
1256 | MLX4_NUM_SPARE_EQE, | |
1257 | (dev->flags & MLX4_FLAG_MSI_X) ? | |
1258 | i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, | |
1259 | eq); | |
0b7ca5a9 | 1260 | } |
c66fa19c MB |
1261 | if (err) |
1262 | goto err_out_unmap; | |
0b7ca5a9 YP |
1263 | } |
1264 | ||
225c7b1f | 1265 | if (dev->flags & MLX4_FLAG_MSI_X) { |
b8dd786f YP |
1266 | const char *eq_name; |
1267 | ||
c66fa19c MB |
1268 | snprintf(priv->eq_table.irq_names + |
1269 | MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, | |
1270 | MLX4_IRQNAME_SIZE, | |
1271 | "mlx4-async@pci:%s", | |
1272 | pci_name(dev->persist->pdev)); | |
1273 | eq_name = priv->eq_table.irq_names + | |
1274 | MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; | |
225c7b1f | 1275 | |
c66fa19c MB |
1276 | err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, |
1277 | mlx4_msi_x_interrupt, 0, eq_name, | |
1278 | priv->eq_table.eq + MLX4_EQ_ASYNC); | |
1279 | if (err) | |
1280 | goto err_out_unmap; | |
225c7b1f | 1281 | |
c66fa19c | 1282 | priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; |
225c7b1f | 1283 | } else { |
f5f5951c AB |
1284 | snprintf(priv->eq_table.irq_names, |
1285 | MLX4_IRQNAME_SIZE, | |
1286 | DRV_NAME "@pci:%s", | |
872bf2fb YH |
1287 | pci_name(dev->persist->pdev)); |
1288 | err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, | |
f5f5951c | 1289 | IRQF_SHARED, priv->eq_table.irq_names, dev); |
225c7b1f | 1290 | if (err) |
c66fa19c | 1291 | goto err_out_unmap; |
225c7b1f RD |
1292 | |
1293 | priv->eq_table.have_irq = 1; | |
1294 | } | |
1295 | ||
00f5ce99 | 1296 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
c66fa19c | 1297 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
225c7b1f RD |
1298 | if (err) |
1299 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | |
c66fa19c | 1300 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); |
225c7b1f | 1301 | |
c66fa19c MB |
1302 | /* arm ASYNC eq */ |
1303 | eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); | |
225c7b1f | 1304 | |
225c7b1f RD |
1305 | return 0; |
1306 | ||
225c7b1f | 1307 | err_out_unmap: |
c66fa19c MB |
1308 | while (i >= 0) |
1309 | mlx4_free_eq(dev, &priv->eq_table.eq[i--]); | |
1310 | #ifdef CONFIG_RFS_ACCEL | |
1311 | for (i = 1; i <= dev->caps.num_ports; i++) { | |
1312 | if (mlx4_priv(dev)->port[i].rmap) { | |
1313 | free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); | |
1314 | mlx4_priv(dev)->port[i].rmap = NULL; | |
1315 | } | |
b8dd786f | 1316 | } |
c66fa19c MB |
1317 | #endif |
1318 | mlx4_free_irqs(dev); | |
1319 | ||
1320 | err_out_clr_int: | |
acba2420 JM |
1321 | if (!mlx4_is_slave(dev)) |
1322 | mlx4_unmap_clr_int(dev); | |
225c7b1f | 1323 | |
b8dd786f | 1324 | err_out_bitmap: |
bfc0d8c3 | 1325 | mlx4_unmap_uar(dev); |
225c7b1f | 1326 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
b8dd786f YP |
1327 | |
1328 | err_out_free: | |
1329 | kfree(priv->eq_table.uar_map); | |
1330 | ||
225c7b1f RD |
1331 | return err; |
1332 | } | |
1333 | ||
1334 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |
1335 | { | |
1336 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1337 | int i; | |
1338 | ||
00f5ce99 | 1339 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, |
c66fa19c | 1340 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
225c7b1f | 1341 | |
c66fa19c MB |
1342 | #ifdef CONFIG_RFS_ACCEL |
1343 | for (i = 1; i <= dev->caps.num_ports; i++) { | |
1344 | if (mlx4_priv(dev)->port[i].rmap) { | |
1345 | free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); | |
1346 | mlx4_priv(dev)->port[i].rmap = NULL; | |
1347 | } | |
1348 | } | |
1349 | #endif | |
225c7b1f RD |
1350 | mlx4_free_irqs(dev); |
1351 | ||
c66fa19c | 1352 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
225c7b1f | 1353 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
225c7b1f | 1354 | |
acba2420 JM |
1355 | if (!mlx4_is_slave(dev)) |
1356 | mlx4_unmap_clr_int(dev); | |
225c7b1f | 1357 | |
bfc0d8c3 | 1358 | mlx4_unmap_uar(dev); |
225c7b1f | 1359 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
b8dd786f YP |
1360 | |
1361 | kfree(priv->eq_table.uar_map); | |
225c7b1f | 1362 | } |
e7c1c2c4 YP |
1363 | |
1364 | /* A test that verifies that we can accept interrupts on all | |
1365 | * the irq vectors of the device. | |
1366 | * Interrupts are checked using the NOP command. | |
1367 | */ | |
1368 | int mlx4_test_interrupts(struct mlx4_dev *dev) | |
1369 | { | |
1370 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1371 | int i; | |
1372 | int err; | |
1373 | ||
1374 | err = mlx4_NOP(dev); | |
1375 | /* When not in MSI_X, there is only one irq to check */ | |
acba2420 | 1376 | if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) |
e7c1c2c4 YP |
1377 | return err; |
1378 | ||
1379 | /* A loop over all completion vectors, for each vector we will check | |
1380 | * whether it works by mapping command completions to that vector | |
1381 | * and performing a NOP command | |
1382 | */ | |
1383 | for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { | |
820d39f3 CS |
1384 | /* Make sure request_irq was called */ |
1385 | if (!priv->eq_table.eq[i].have_irq) | |
1386 | continue; | |
1387 | ||
e7c1c2c4 YP |
1388 | /* Temporary use polling for command completions */ |
1389 | mlx4_cmd_use_polling(dev); | |
1390 | ||
b3834be5 | 1391 | /* Map the new eq to handle all asynchronous events */ |
00f5ce99 | 1392 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
e7c1c2c4 YP |
1393 | priv->eq_table.eq[i].eqn); |
1394 | if (err) { | |
1395 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | |
1396 | mlx4_cmd_use_events(dev); | |
1397 | break; | |
1398 | } | |
1399 | ||
1400 | /* Go back to using events */ | |
1401 | mlx4_cmd_use_events(dev); | |
1402 | err = mlx4_NOP(dev); | |
1403 | } | |
1404 | ||
1405 | /* Return to default */ | |
00f5ce99 | 1406 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
c66fa19c | 1407 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
e7c1c2c4 YP |
1408 | return err; |
1409 | } | |
1410 | EXPORT_SYMBOL(mlx4_test_interrupts); | |
0b7ca5a9 | 1411 | |
c66fa19c | 1412 | bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) |
0b7ca5a9 | 1413 | { |
c66fa19c | 1414 | struct mlx4_priv *priv = mlx4_priv(dev); |
0b7ca5a9 | 1415 | |
c66fa19c MB |
1416 | vector = MLX4_CQ_TO_EQ_VECTOR(vector); |
1417 | if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || | |
1418 | (vector == MLX4_EQ_ASYNC)) | |
1419 | return false; | |
1420 | ||
1421 | return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); | |
1422 | } | |
1423 | EXPORT_SYMBOL(mlx4_is_eq_vector_valid); | |
1424 | ||
1425 | u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) | |
1426 | { | |
1427 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1428 | unsigned int i; | |
1429 | unsigned int sum = 0; | |
1430 | ||
1431 | for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) | |
1432 | sum += !!test_bit(port - 1, | |
1433 | priv->eq_table.eq[i].actv_ports.ports); | |
1434 | ||
1435 | return sum; | |
1436 | } | |
1437 | EXPORT_SYMBOL(mlx4_get_eqs_per_port); | |
1438 | ||
1439 | int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) | |
1440 | { | |
1441 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1442 | ||
1443 | vector = MLX4_CQ_TO_EQ_VECTOR(vector); | |
1444 | if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) | |
1445 | return -EINVAL; | |
1446 | ||
1447 | return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, | |
1448 | dev->caps.num_ports) > 1); | |
1449 | } | |
1450 | EXPORT_SYMBOL(mlx4_is_eq_shared); | |
1451 | ||
1452 | struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) | |
1453 | { | |
1454 | return mlx4_priv(dev)->port[port].rmap; | |
1455 | } | |
1456 | EXPORT_SYMBOL(mlx4_get_cpu_rmap); | |
1457 | ||
1458 | int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) | |
1459 | { | |
0b7ca5a9 | 1460 | struct mlx4_priv *priv = mlx4_priv(dev); |
c66fa19c MB |
1461 | int err = 0, i = 0; |
1462 | u32 min_ref_count_val = (u32)-1; | |
1463 | int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); | |
1464 | int *prequested_vector = NULL; | |
1465 | ||
0b7ca5a9 | 1466 | |
730c41d5 | 1467 | mutex_lock(&priv->msix_ctl.pool_lock); |
c66fa19c MB |
1468 | if (requested_vector < (dev->caps.num_comp_vectors + 1) && |
1469 | (requested_vector >= 0) && | |
1470 | (requested_vector != MLX4_EQ_ASYNC)) { | |
1471 | if (test_bit(port - 1, | |
1472 | priv->eq_table.eq[requested_vector].actv_ports.ports)) { | |
1473 | prequested_vector = &requested_vector; | |
1474 | } else { | |
1475 | struct mlx4_eq *eq; | |
1476 | ||
1477 | for (i = 1; i < port; | |
1478 | requested_vector += mlx4_get_eqs_per_port(dev, i++)) | |
1479 | ; | |
1480 | ||
1481 | eq = &priv->eq_table.eq[requested_vector]; | |
1482 | if (requested_vector < dev->caps.num_comp_vectors + 1 && | |
1483 | test_bit(port - 1, eq->actv_ports.ports)) { | |
1484 | prequested_vector = &requested_vector; | |
d9236c3f | 1485 | } |
c66fa19c MB |
1486 | } |
1487 | } | |
1488 | ||
1489 | if (!prequested_vector) { | |
1490 | requested_vector = -1; | |
1491 | for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; | |
1492 | i++) { | |
1493 | struct mlx4_eq *eq = &priv->eq_table.eq[i]; | |
1494 | ||
1495 | if (min_ref_count_val > eq->ref_count && | |
1496 | test_bit(port - 1, eq->actv_ports.ports)) { | |
1497 | min_ref_count_val = eq->ref_count; | |
1498 | requested_vector = i; | |
0b7ca5a9 | 1499 | } |
c66fa19c | 1500 | } |
2eacc23c | 1501 | |
c66fa19c MB |
1502 | if (requested_vector < 0) { |
1503 | err = -ENOSPC; | |
1504 | goto err_unlock; | |
0b7ca5a9 | 1505 | } |
c66fa19c MB |
1506 | |
1507 | prequested_vector = &requested_vector; | |
0b7ca5a9 | 1508 | } |
c66fa19c MB |
1509 | |
1510 | if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && | |
1511 | dev->flags & MLX4_FLAG_MSI_X) { | |
1512 | set_bit(*prequested_vector, priv->msix_ctl.pool_bm); | |
1513 | snprintf(priv->eq_table.irq_names + | |
1514 | *prequested_vector * MLX4_IRQNAME_SIZE, | |
1515 | MLX4_IRQNAME_SIZE, "mlx4-%d@%s", | |
1516 | *prequested_vector, dev_name(&dev->persist->pdev->dev)); | |
1517 | ||
1518 | err = request_irq(priv->eq_table.eq[*prequested_vector].irq, | |
1519 | mlx4_msi_x_interrupt, 0, | |
1520 | &priv->eq_table.irq_names[*prequested_vector << 5], | |
1521 | priv->eq_table.eq + *prequested_vector); | |
1522 | ||
1523 | if (err) { | |
1524 | clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); | |
1525 | *prequested_vector = -1; | |
1526 | } else { | |
de161803 IS |
1527 | #if defined(CONFIG_SMP) |
1528 | mlx4_set_eq_affinity_hint(priv, *prequested_vector); | |
1529 | #endif | |
c66fa19c MB |
1530 | eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); |
1531 | priv->eq_table.eq[*prequested_vector].have_irq = 1; | |
1532 | } | |
1533 | } | |
1534 | ||
1535 | if (!err && *prequested_vector >= 0) | |
1536 | priv->eq_table.eq[*prequested_vector].ref_count++; | |
1537 | ||
1538 | err_unlock: | |
730c41d5 | 1539 | mutex_unlock(&priv->msix_ctl.pool_lock); |
0b7ca5a9 | 1540 | |
c66fa19c MB |
1541 | if (!err && *prequested_vector >= 0) |
1542 | *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); | |
1543 | else | |
0b7ca5a9 | 1544 | *vector = 0; |
c66fa19c | 1545 | |
0b7ca5a9 YP |
1546 | return err; |
1547 | } | |
1548 | EXPORT_SYMBOL(mlx4_assign_eq); | |
1549 | ||
c66fa19c | 1550 | int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) |
35f6f453 AV |
1551 | { |
1552 | struct mlx4_priv *priv = mlx4_priv(dev); | |
1553 | ||
c66fa19c | 1554 | return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; |
35f6f453 AV |
1555 | } |
1556 | EXPORT_SYMBOL(mlx4_eq_get_irq); | |
1557 | ||
0b7ca5a9 YP |
1558 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) |
1559 | { | |
1560 | struct mlx4_priv *priv = mlx4_priv(dev); | |
c66fa19c MB |
1561 | int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); |
1562 | ||
1563 | mutex_lock(&priv->msix_ctl.pool_lock); | |
1564 | priv->eq_table.eq[eq_vec].ref_count--; | |
0b7ca5a9 | 1565 | |
c66fa19c MB |
1566 | /* once we allocated EQ, we don't release it because it might be binded |
1567 | * to cpu_rmap. | |
1568 | */ | |
1569 | mutex_unlock(&priv->msix_ctl.pool_lock); | |
0b7ca5a9 YP |
1570 | } |
1571 | EXPORT_SYMBOL(mlx4_release_eq); | |
1572 |