]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/interrupt.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/mlx5/driver.h> | |
36 | #include <linux/mlx5/cmd.h> | |
37 | #include "mlx5_core.h" | |
073bb189 SM |
38 | #ifdef CONFIG_MLX5_CORE_EN |
39 | #include "eswitch.h" | |
40 | #endif | |
e126ba97 EC |
41 | |
42 | enum { | |
43 | MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), | |
44 | MLX5_EQE_OWNER_INIT_VAL = 0x1, | |
45 | }; | |
46 | ||
47 | enum { | |
48 | MLX5_EQ_STATE_ARMED = 0x9, | |
49 | MLX5_EQ_STATE_FIRED = 0xa, | |
50 | MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, | |
51 | }; | |
52 | ||
53 | enum { | |
54 | MLX5_NUM_SPARE_EQE = 0x80, | |
55 | MLX5_NUM_ASYNC_EQE = 0x100, | |
56 | MLX5_NUM_CMD_EQE = 32, | |
d9aaed83 | 57 | MLX5_NUM_PF_DRAIN = 64, |
e126ba97 EC |
58 | }; |
59 | ||
60 | enum { | |
61 | MLX5_EQ_DOORBEL_OFFSET = 0x40, | |
62 | }; | |
63 | ||
64 | #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ | |
65 | (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ | |
66 | (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ | |
67 | (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ | |
68 | (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
69 | (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
70 | (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
71 | (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
72 | (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ | |
73 | (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | |
74 | (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ | |
75 | (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) | |
76 | ||
77 | struct map_eq_in { | |
78 | u64 mask; | |
79 | u32 reserved; | |
80 | u32 unmap_eqn; | |
81 | }; | |
82 | ||
83 | struct cre_des_eq { | |
84 | u8 reserved[15]; | |
85 | u8 eqn; | |
86 | }; | |
87 | ||
88 | static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) | |
89 | { | |
73b626c1 SM |
90 | u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; |
91 | u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; | |
e126ba97 | 92 | |
73b626c1 SM |
93 | MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); |
94 | MLX5_SET(destroy_eq_in, in, eq_number, eqn); | |
c4f287c4 | 95 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
e126ba97 EC |
96 | } |
97 | ||
98 | static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) | |
99 | { | |
100 | return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); | |
101 | } | |
102 | ||
103 | static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) | |
104 | { | |
105 | struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); | |
106 | ||
107 | return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; | |
108 | } | |
109 | ||
110 | static const char *eqe_type_str(u8 type) | |
111 | { | |
112 | switch (type) { | |
113 | case MLX5_EVENT_TYPE_COMP: | |
114 | return "MLX5_EVENT_TYPE_COMP"; | |
115 | case MLX5_EVENT_TYPE_PATH_MIG: | |
116 | return "MLX5_EVENT_TYPE_PATH_MIG"; | |
117 | case MLX5_EVENT_TYPE_COMM_EST: | |
118 | return "MLX5_EVENT_TYPE_COMM_EST"; | |
119 | case MLX5_EVENT_TYPE_SQ_DRAINED: | |
120 | return "MLX5_EVENT_TYPE_SQ_DRAINED"; | |
121 | case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | |
122 | return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; | |
123 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
124 | return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; | |
125 | case MLX5_EVENT_TYPE_CQ_ERROR: | |
126 | return "MLX5_EVENT_TYPE_CQ_ERROR"; | |
127 | case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | |
128 | return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; | |
129 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | |
130 | return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; | |
131 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
132 | return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; | |
133 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | |
134 | return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; | |
135 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
136 | return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; | |
137 | case MLX5_EVENT_TYPE_INTERNAL_ERROR: | |
138 | return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; | |
139 | case MLX5_EVENT_TYPE_PORT_CHANGE: | |
140 | return "MLX5_EVENT_TYPE_PORT_CHANGE"; | |
141 | case MLX5_EVENT_TYPE_GPIO_EVENT: | |
142 | return "MLX5_EVENT_TYPE_GPIO_EVENT"; | |
d4eb4cd7 HN |
143 | case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: |
144 | return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; | |
e126ba97 EC |
145 | case MLX5_EVENT_TYPE_REMOTE_CONFIG: |
146 | return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; | |
147 | case MLX5_EVENT_TYPE_DB_BF_CONGESTION: | |
148 | return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; | |
149 | case MLX5_EVENT_TYPE_STALL_EVENT: | |
150 | return "MLX5_EVENT_TYPE_STALL_EVENT"; | |
151 | case MLX5_EVENT_TYPE_CMD: | |
152 | return "MLX5_EVENT_TYPE_CMD"; | |
153 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | |
154 | return "MLX5_EVENT_TYPE_PAGE_REQUEST"; | |
e420f0c0 HE |
155 | case MLX5_EVENT_TYPE_PAGE_FAULT: |
156 | return "MLX5_EVENT_TYPE_PAGE_FAULT"; | |
f9a1ef72 EE |
157 | case MLX5_EVENT_TYPE_PPS_EVENT: |
158 | return "MLX5_EVENT_TYPE_PPS_EVENT"; | |
e126ba97 EC |
159 | default: |
160 | return "Unrecognized event"; | |
161 | } | |
162 | } | |
163 | ||
164 | static enum mlx5_dev_event port_subtype_event(u8 subtype) | |
165 | { | |
166 | switch (subtype) { | |
167 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | |
168 | return MLX5_DEV_EVENT_PORT_DOWN; | |
169 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | |
170 | return MLX5_DEV_EVENT_PORT_UP; | |
171 | case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | |
172 | return MLX5_DEV_EVENT_PORT_INITIALIZED; | |
173 | case MLX5_PORT_CHANGE_SUBTYPE_LID: | |
174 | return MLX5_DEV_EVENT_LID_CHANGE; | |
175 | case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | |
176 | return MLX5_DEV_EVENT_PKEY_CHANGE; | |
177 | case MLX5_PORT_CHANGE_SUBTYPE_GUID: | |
178 | return MLX5_DEV_EVENT_GUID_CHANGE; | |
179 | case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | |
180 | return MLX5_DEV_EVENT_CLIENT_REREG; | |
181 | } | |
182 | return -1; | |
183 | } | |
184 | ||
185 | static void eq_update_ci(struct mlx5_eq *eq, int arm) | |
186 | { | |
187 | __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); | |
188 | u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); | |
189 | __raw_writel((__force u32) cpu_to_be32(val), addr); | |
190 | /* We still want ordering, just not swabbing, so add a barrier */ | |
191 | mb(); | |
192 | } | |
193 | ||
d9aaed83 AK |
194 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
195 | static void eqe_pf_action(struct work_struct *work) | |
196 | { | |
197 | struct mlx5_pagefault *pfault = container_of(work, | |
198 | struct mlx5_pagefault, | |
199 | work); | |
200 | struct mlx5_eq *eq = pfault->eq; | |
201 | ||
202 | mlx5_core_page_fault(eq->dev, pfault); | |
203 | mempool_free(pfault, eq->pf_ctx.pool); | |
204 | } | |
205 | ||
206 | static void eq_pf_process(struct mlx5_eq *eq) | |
207 | { | |
208 | struct mlx5_core_dev *dev = eq->dev; | |
209 | struct mlx5_eqe_page_fault *pf_eqe; | |
210 | struct mlx5_pagefault *pfault; | |
211 | struct mlx5_eqe *eqe; | |
212 | int set_ci = 0; | |
213 | ||
214 | while ((eqe = next_eqe_sw(eq))) { | |
215 | pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC); | |
216 | if (!pfault) { | |
217 | schedule_work(&eq->pf_ctx.work); | |
218 | break; | |
219 | } | |
220 | ||
221 | dma_rmb(); | |
222 | pf_eqe = &eqe->data.page_fault; | |
223 | pfault->event_subtype = eqe->sub_type; | |
224 | pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); | |
225 | ||
226 | mlx5_core_dbg(dev, | |
227 | "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", | |
228 | eqe->sub_type, pfault->bytes_committed); | |
229 | ||
230 | switch (eqe->sub_type) { | |
231 | case MLX5_PFAULT_SUBTYPE_RDMA: | |
232 | /* RDMA based event */ | |
233 | pfault->type = | |
234 | be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; | |
235 | pfault->token = | |
236 | be32_to_cpu(pf_eqe->rdma.pftype_token) & | |
237 | MLX5_24BIT_MASK; | |
238 | pfault->rdma.r_key = | |
239 | be32_to_cpu(pf_eqe->rdma.r_key); | |
240 | pfault->rdma.packet_size = | |
241 | be16_to_cpu(pf_eqe->rdma.packet_length); | |
242 | pfault->rdma.rdma_op_len = | |
243 | be32_to_cpu(pf_eqe->rdma.rdma_op_len); | |
244 | pfault->rdma.rdma_va = | |
245 | be64_to_cpu(pf_eqe->rdma.rdma_va); | |
246 | mlx5_core_dbg(dev, | |
247 | "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", | |
248 | pfault->type, pfault->token, | |
249 | pfault->rdma.r_key); | |
250 | mlx5_core_dbg(dev, | |
251 | "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", | |
252 | pfault->rdma.rdma_op_len, | |
253 | pfault->rdma.rdma_va); | |
254 | break; | |
255 | ||
256 | case MLX5_PFAULT_SUBTYPE_WQE: | |
257 | /* WQE based event */ | |
258 | pfault->type = | |
259 | be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; | |
260 | pfault->token = | |
261 | be32_to_cpu(pf_eqe->wqe.token); | |
262 | pfault->wqe.wq_num = | |
263 | be32_to_cpu(pf_eqe->wqe.pftype_wq) & | |
264 | MLX5_24BIT_MASK; | |
265 | pfault->wqe.wqe_index = | |
266 | be16_to_cpu(pf_eqe->wqe.wqe_index); | |
267 | pfault->wqe.packet_size = | |
268 | be16_to_cpu(pf_eqe->wqe.packet_length); | |
269 | mlx5_core_dbg(dev, | |
270 | "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", | |
271 | pfault->type, pfault->token, | |
272 | pfault->wqe.wq_num, | |
273 | pfault->wqe.wqe_index); | |
274 | break; | |
275 | ||
276 | default: | |
277 | mlx5_core_warn(dev, | |
278 | "Unsupported page fault event sub-type: 0x%02hhx\n", | |
279 | eqe->sub_type); | |
280 | /* Unsupported page faults should still be | |
281 | * resolved by the page fault handler | |
282 | */ | |
283 | } | |
284 | ||
285 | pfault->eq = eq; | |
286 | INIT_WORK(&pfault->work, eqe_pf_action); | |
287 | queue_work(eq->pf_ctx.wq, &pfault->work); | |
288 | ||
289 | ++eq->cons_index; | |
290 | ++set_ci; | |
291 | ||
292 | if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { | |
293 | eq_update_ci(eq, 0); | |
294 | set_ci = 0; | |
295 | } | |
296 | } | |
297 | ||
298 | eq_update_ci(eq, 1); | |
299 | } | |
300 | ||
301 | static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) | |
302 | { | |
303 | struct mlx5_eq *eq = eq_ptr; | |
304 | unsigned long flags; | |
305 | ||
306 | if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) { | |
307 | eq_pf_process(eq); | |
308 | spin_unlock_irqrestore(&eq->pf_ctx.lock, flags); | |
309 | } else { | |
310 | schedule_work(&eq->pf_ctx.work); | |
311 | } | |
312 | ||
313 | return IRQ_HANDLED; | |
314 | } | |
315 | ||
316 | /* mempool_refill() was proposed but unfortunately wasn't accepted | |
317 | * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html | |
318 | * Chip workaround. | |
319 | */ | |
320 | static void mempool_refill(mempool_t *pool) | |
321 | { | |
322 | while (pool->curr_nr < pool->min_nr) | |
323 | mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); | |
324 | } | |
325 | ||
326 | static void eq_pf_action(struct work_struct *work) | |
327 | { | |
328 | struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work); | |
329 | ||
330 | mempool_refill(eq->pf_ctx.pool); | |
331 | ||
332 | spin_lock_irq(&eq->pf_ctx.lock); | |
333 | eq_pf_process(eq); | |
334 | spin_unlock_irq(&eq->pf_ctx.lock); | |
335 | } | |
336 | ||
337 | static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) | |
e126ba97 | 338 | { |
d9aaed83 AK |
339 | spin_lock_init(&pf_ctx->lock); |
340 | INIT_WORK(&pf_ctx->work, eq_pf_action); | |
341 | ||
342 | pf_ctx->wq = alloc_ordered_workqueue(name, | |
343 | WQ_MEM_RECLAIM); | |
344 | if (!pf_ctx->wq) | |
345 | return -ENOMEM; | |
346 | ||
347 | pf_ctx->pool = mempool_create_kmalloc_pool | |
348 | (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault)); | |
349 | if (!pf_ctx->pool) | |
350 | goto err_wq; | |
351 | ||
352 | return 0; | |
353 | err_wq: | |
354 | destroy_workqueue(pf_ctx->wq); | |
355 | return -ENOMEM; | |
356 | } | |
357 | ||
358 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, | |
359 | u32 wq_num, u8 type, int error) | |
360 | { | |
361 | u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; | |
362 | u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; | |
363 | ||
364 | MLX5_SET(page_fault_resume_in, in, opcode, | |
365 | MLX5_CMD_OP_PAGE_FAULT_RESUME); | |
366 | MLX5_SET(page_fault_resume_in, in, error, !!error); | |
367 | MLX5_SET(page_fault_resume_in, in, page_fault_type, type); | |
368 | MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); | |
369 | MLX5_SET(page_fault_resume_in, in, token, token); | |
370 | ||
371 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); | |
374 | #endif | |
375 | ||
376 | static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) | |
377 | { | |
378 | struct mlx5_eq *eq = eq_ptr; | |
379 | struct mlx5_core_dev *dev = eq->dev; | |
e126ba97 | 380 | struct mlx5_eqe *eqe; |
e126ba97 | 381 | int set_ci = 0; |
94c6825e | 382 | u32 cqn = -1; |
5903325a | 383 | u32 rsn; |
e126ba97 EC |
384 | u8 port; |
385 | ||
386 | while ((eqe = next_eqe_sw(eq))) { | |
387 | /* | |
388 | * Make sure we read EQ entry contents after we've | |
389 | * checked the ownership bit. | |
390 | */ | |
12b3375f | 391 | dma_rmb(); |
e126ba97 | 392 | |
1a91de28 JP |
393 | mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", |
394 | eq->eqn, eqe_type_str(eqe->type)); | |
e126ba97 EC |
395 | switch (eqe->type) { |
396 | case MLX5_EVENT_TYPE_COMP: | |
397 | cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; | |
398 | mlx5_cq_completion(dev, cqn); | |
399 | break; | |
400 | ||
401 | case MLX5_EVENT_TYPE_PATH_MIG: | |
402 | case MLX5_EVENT_TYPE_COMM_EST: | |
403 | case MLX5_EVENT_TYPE_SQ_DRAINED: | |
404 | case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | |
405 | case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | |
406 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | |
407 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
408 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | |
5903325a | 409 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
e2013b21 | 410 | rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); |
ab62924e EC |
411 | mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", |
412 | eqe_type_str(eqe->type), eqe->type, rsn); | |
5903325a | 413 | mlx5_rsc_event(dev, rsn, eqe->type); |
e126ba97 EC |
414 | break; |
415 | ||
416 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
417 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
5903325a | 418 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
e126ba97 | 419 | mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", |
5903325a EC |
420 | eqe_type_str(eqe->type), eqe->type, rsn); |
421 | mlx5_srq_event(dev, rsn, eqe->type); | |
e126ba97 EC |
422 | break; |
423 | ||
424 | case MLX5_EVENT_TYPE_CMD: | |
73dd3a48 | 425 | mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); |
e126ba97 EC |
426 | break; |
427 | ||
428 | case MLX5_EVENT_TYPE_PORT_CHANGE: | |
429 | port = (eqe->data.port.port >> 4) & 0xf; | |
430 | switch (eqe->sub_type) { | |
431 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | |
432 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | |
433 | case MLX5_PORT_CHANGE_SUBTYPE_LID: | |
434 | case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | |
435 | case MLX5_PORT_CHANGE_SUBTYPE_GUID: | |
436 | case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | |
437 | case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | |
f241e749 | 438 | if (dev->event) |
4d2f9bbb JM |
439 | dev->event(dev, port_subtype_event(eqe->sub_type), |
440 | (unsigned long)port); | |
e126ba97 EC |
441 | break; |
442 | default: | |
443 | mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", | |
444 | port, eqe->sub_type); | |
445 | } | |
446 | break; | |
447 | case MLX5_EVENT_TYPE_CQ_ERROR: | |
448 | cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; | |
449 | mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", | |
450 | cqn, eqe->data.cq_err.syndrome); | |
451 | mlx5_cq_event(dev, cqn, eqe->type); | |
452 | break; | |
453 | ||
454 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | |
455 | { | |
456 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | |
0a324f31 | 457 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
e126ba97 | 458 | |
1a91de28 JP |
459 | mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", |
460 | func_id, npages); | |
e126ba97 EC |
461 | mlx5_core_req_pages_handler(dev, func_id, npages); |
462 | } | |
463 | break; | |
464 | ||
073bb189 SM |
465 | #ifdef CONFIG_MLX5_CORE_EN |
466 | case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: | |
467 | mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); | |
468 | break; | |
469 | #endif | |
d4eb4cd7 HN |
470 | |
471 | case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: | |
472 | mlx5_port_module_event(dev, eqe); | |
473 | break; | |
474 | ||
f9a1ef72 EE |
475 | case MLX5_EVENT_TYPE_PPS_EVENT: |
476 | if (dev->event) | |
477 | dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); | |
478 | break; | |
e126ba97 | 479 | default: |
1a91de28 JP |
480 | mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", |
481 | eqe->type, eq->eqn); | |
e126ba97 EC |
482 | break; |
483 | } | |
484 | ||
485 | ++eq->cons_index; | |
e126ba97 EC |
486 | ++set_ci; |
487 | ||
488 | /* The HCA will think the queue has overflowed if we | |
489 | * don't tell it we've been processing events. We | |
490 | * create our EQs with MLX5_NUM_SPARE_EQE extra | |
491 | * entries, so we must update our consumer index at | |
492 | * least that often. | |
493 | */ | |
494 | if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { | |
495 | eq_update_ci(eq, 0); | |
496 | set_ci = 0; | |
497 | } | |
498 | } | |
499 | ||
500 | eq_update_ci(eq, 1); | |
501 | ||
94c6825e MB |
502 | if (cqn != -1) |
503 | tasklet_schedule(&eq->tasklet_ctx.task); | |
504 | ||
e126ba97 EC |
505 | return IRQ_HANDLED; |
506 | } | |
507 | ||
508 | static void init_eq_buf(struct mlx5_eq *eq) | |
509 | { | |
510 | struct mlx5_eqe *eqe; | |
511 | int i; | |
512 | ||
513 | for (i = 0; i < eq->nent; i++) { | |
514 | eqe = get_eqe(eq, i); | |
515 | eqe->owner = MLX5_EQE_OWNER_INIT_VAL; | |
516 | } | |
517 | } | |
518 | ||
519 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | |
d9aaed83 | 520 | int nent, u64 mask, const char *name, |
01187175 | 521 | enum mlx5_eq_type type) |
e126ba97 | 522 | { |
73b626c1 | 523 | u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; |
db058a18 | 524 | struct mlx5_priv *priv = &dev->priv; |
d9aaed83 | 525 | irq_handler_t handler; |
73b626c1 SM |
526 | __be64 *pas; |
527 | void *eqc; | |
e126ba97 | 528 | int inlen; |
73b626c1 SM |
529 | u32 *in; |
530 | int err; | |
e126ba97 | 531 | |
d9aaed83 | 532 | eq->type = type; |
e126ba97 | 533 | eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); |
a31208b1 | 534 | eq->cons_index = 0; |
64ffaa21 | 535 | err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); |
e126ba97 EC |
536 | if (err) |
537 | return err; | |
538 | ||
d9aaed83 AK |
539 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
540 | if (type == MLX5_EQ_TYPE_PF) | |
541 | handler = mlx5_eq_pf_int; | |
542 | else | |
543 | #endif | |
544 | handler = mlx5_eq_int; | |
545 | ||
e126ba97 EC |
546 | init_eq_buf(eq); |
547 | ||
73b626c1 SM |
548 | inlen = MLX5_ST_SZ_BYTES(create_eq_in) + |
549 | MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; | |
550 | ||
e126ba97 EC |
551 | in = mlx5_vzalloc(inlen); |
552 | if (!in) { | |
553 | err = -ENOMEM; | |
554 | goto err_buf; | |
555 | } | |
e126ba97 | 556 | |
73b626c1 SM |
557 | pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); |
558 | mlx5_fill_page_array(&eq->buf, pas); | |
e126ba97 | 559 | |
73b626c1 SM |
560 | MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); |
561 | MLX5_SET64(create_eq_in, in, event_bitmask, mask); | |
e126ba97 | 562 | |
73b626c1 SM |
563 | eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); |
564 | MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); | |
01187175 | 565 | MLX5_SET(eqc, eqc, uar_page, priv->uar->index); |
73b626c1 SM |
566 | MLX5_SET(eqc, eqc, intr, vecidx); |
567 | MLX5_SET(eqc, eqc, log_page_size, | |
568 | eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); | |
e126ba97 | 569 | |
73b626c1 | 570 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
73b626c1 | 571 | if (err) |
e126ba97 | 572 | goto err_in; |
e126ba97 | 573 | |
db058a18 | 574 | snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", |
ada9f5d0 | 575 | name, pci_name(dev->pdev)); |
db058a18 | 576 | |
73b626c1 | 577 | eq->eqn = MLX5_GET(create_eq_out, out, eq_number); |
61d0e73e | 578 | eq->irqn = priv->msix_arr[vecidx].vector; |
a158906d | 579 | eq->dev = dev; |
01187175 | 580 | eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; |
d9aaed83 | 581 | err = request_irq(eq->irqn, handler, 0, |
db058a18 | 582 | priv->irq_info[vecidx].name, eq); |
e126ba97 EC |
583 | if (err) |
584 | goto err_eq; | |
585 | ||
e126ba97 EC |
586 | err = mlx5_debug_eq_add(dev, eq); |
587 | if (err) | |
588 | goto err_irq; | |
589 | ||
d9aaed83 AK |
590 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
591 | if (type == MLX5_EQ_TYPE_PF) { | |
592 | err = init_pf_ctx(&eq->pf_ctx, name); | |
593 | if (err) | |
594 | goto err_irq; | |
595 | } else | |
596 | #endif | |
597 | { | |
598 | INIT_LIST_HEAD(&eq->tasklet_ctx.list); | |
599 | INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); | |
600 | spin_lock_init(&eq->tasklet_ctx.lock); | |
601 | tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, | |
602 | (unsigned long)&eq->tasklet_ctx); | |
603 | } | |
94c6825e | 604 | |
e126ba97 EC |
605 | /* EQs are created in ARMED state |
606 | */ | |
607 | eq_update_ci(eq, 1); | |
608 | ||
479163f4 | 609 | kvfree(in); |
e126ba97 EC |
610 | return 0; |
611 | ||
612 | err_irq: | |
db058a18 | 613 | free_irq(priv->msix_arr[vecidx].vector, eq); |
e126ba97 EC |
614 | |
615 | err_eq: | |
616 | mlx5_cmd_destroy_eq(dev, eq->eqn); | |
617 | ||
618 | err_in: | |
479163f4 | 619 | kvfree(in); |
e126ba97 EC |
620 | |
621 | err_buf: | |
622 | mlx5_buf_free(dev, &eq->buf); | |
623 | return err; | |
624 | } | |
625 | EXPORT_SYMBOL_GPL(mlx5_create_map_eq); | |
626 | ||
627 | int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |
628 | { | |
e126ba97 EC |
629 | int err; |
630 | ||
631 | mlx5_debug_eq_remove(dev, eq); | |
61d0e73e | 632 | free_irq(eq->irqn, eq); |
e126ba97 EC |
633 | err = mlx5_cmd_destroy_eq(dev, eq->eqn); |
634 | if (err) | |
635 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", | |
636 | eq->eqn); | |
61d0e73e | 637 | synchronize_irq(eq->irqn); |
d9aaed83 AK |
638 | |
639 | if (eq->type == MLX5_EQ_TYPE_COMP) { | |
640 | tasklet_disable(&eq->tasklet_ctx.task); | |
641 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
642 | } else if (eq->type == MLX5_EQ_TYPE_PF) { | |
643 | cancel_work_sync(&eq->pf_ctx.work); | |
644 | destroy_workqueue(eq->pf_ctx.wq); | |
645 | mempool_destroy(eq->pf_ctx.pool); | |
646 | #endif | |
647 | } | |
e126ba97 EC |
648 | mlx5_buf_free(dev, &eq->buf); |
649 | ||
650 | return err; | |
651 | } | |
652 | EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); | |
653 | ||
daa21560 TT |
654 | u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx) |
655 | { | |
656 | return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector; | |
657 | } | |
658 | ||
e126ba97 EC |
659 | int mlx5_eq_init(struct mlx5_core_dev *dev) |
660 | { | |
661 | int err; | |
662 | ||
663 | spin_lock_init(&dev->priv.eq_table.lock); | |
664 | ||
665 | err = mlx5_eq_debugfs_init(dev); | |
666 | ||
667 | return err; | |
668 | } | |
669 | ||
670 | ||
671 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev) | |
672 | { | |
673 | mlx5_eq_debugfs_cleanup(dev); | |
674 | } | |
675 | ||
676 | int mlx5_start_eqs(struct mlx5_core_dev *dev) | |
677 | { | |
678 | struct mlx5_eq_table *table = &dev->priv.eq_table; | |
6887a825 | 679 | u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; |
e126ba97 EC |
680 | int err; |
681 | ||
e420f0c0 | 682 | |
073bb189 SM |
683 | if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && |
684 | MLX5_CAP_GEN(dev, vport_group_manager) && | |
685 | mlx5_core_is_pf(dev)) | |
686 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); | |
687 | ||
d4eb4cd7 HN |
688 | if (MLX5_CAP_GEN(dev, port_module_event)) |
689 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT); | |
690 | else | |
691 | mlx5_core_dbg(dev, "port_module_event is not set\n"); | |
692 | ||
f9a1ef72 EE |
693 | if (MLX5_CAP_GEN(dev, pps)) |
694 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); | |
695 | ||
e126ba97 EC |
696 | err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, |
697 | MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, | |
01187175 | 698 | "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); |
e126ba97 EC |
699 | if (err) { |
700 | mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); | |
701 | return err; | |
702 | } | |
703 | ||
704 | mlx5_cmd_use_events(dev); | |
705 | ||
706 | err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, | |
e420f0c0 | 707 | MLX5_NUM_ASYNC_EQE, async_event_mask, |
01187175 | 708 | "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC); |
e126ba97 EC |
709 | if (err) { |
710 | mlx5_core_warn(dev, "failed to create async EQ %d\n", err); | |
711 | goto err1; | |
712 | } | |
713 | ||
714 | err = mlx5_create_map_eq(dev, &table->pages_eq, | |
715 | MLX5_EQ_VEC_PAGES, | |
938fe83c | 716 | /* TODO: sriov max_vf + */ 1, |
e126ba97 | 717 | 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", |
d9aaed83 | 718 | MLX5_EQ_TYPE_ASYNC); |
e126ba97 EC |
719 | if (err) { |
720 | mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); | |
721 | goto err2; | |
722 | } | |
723 | ||
d9aaed83 AK |
724 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
725 | if (MLX5_CAP_GEN(dev, pg)) { | |
726 | err = mlx5_create_map_eq(dev, &table->pfault_eq, | |
727 | MLX5_EQ_VEC_PFAULT, | |
728 | MLX5_NUM_ASYNC_EQE, | |
729 | 1 << MLX5_EVENT_TYPE_PAGE_FAULT, | |
730 | "mlx5_page_fault_eq", | |
d9aaed83 AK |
731 | MLX5_EQ_TYPE_PF); |
732 | if (err) { | |
733 | mlx5_core_warn(dev, "failed to create page fault EQ %d\n", | |
734 | err); | |
735 | goto err3; | |
736 | } | |
737 | } | |
738 | ||
e126ba97 | 739 | return err; |
d9aaed83 AK |
740 | err3: |
741 | mlx5_destroy_unmap_eq(dev, &table->pages_eq); | |
742 | #else | |
743 | return err; | |
744 | #endif | |
e126ba97 EC |
745 | |
746 | err2: | |
747 | mlx5_destroy_unmap_eq(dev, &table->async_eq); | |
748 | ||
749 | err1: | |
750 | mlx5_cmd_use_polling(dev); | |
751 | mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | |
752 | return err; | |
753 | } | |
754 | ||
755 | int mlx5_stop_eqs(struct mlx5_core_dev *dev) | |
756 | { | |
757 | struct mlx5_eq_table *table = &dev->priv.eq_table; | |
758 | int err; | |
759 | ||
d9aaed83 AK |
760 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
761 | if (MLX5_CAP_GEN(dev, pg)) { | |
762 | err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); | |
763 | if (err) | |
764 | return err; | |
765 | } | |
766 | #endif | |
767 | ||
e126ba97 EC |
768 | err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); |
769 | if (err) | |
770 | return err; | |
771 | ||
772 | mlx5_destroy_unmap_eq(dev, &table->async_eq); | |
773 | mlx5_cmd_use_polling(dev); | |
774 | ||
775 | err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | |
776 | if (err) | |
777 | mlx5_cmd_use_events(dev); | |
778 | ||
779 | return err; | |
780 | } | |
781 | ||
782 | int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | |
73b626c1 | 783 | u32 *out, int outlen) |
e126ba97 | 784 | { |
73b626c1 | 785 | u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; |
e126ba97 | 786 | |
73b626c1 SM |
787 | MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); |
788 | MLX5_SET(query_eq_in, in, eq_number, eq->eqn); | |
c4f287c4 | 789 | return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); |
e126ba97 EC |
790 | } |
791 | EXPORT_SYMBOL_GPL(mlx5_core_eq_query); |