2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
41 #include "mthca_dev.h"
42 #include "mthca_cmd.h"
43 #include "mthca_config_reg.h"
46 MTHCA_NUM_ASYNC_EQE
= 0x80,
47 MTHCA_NUM_CMD_EQE
= 0x80,
48 MTHCA_NUM_SPARE_EQE
= 0x80,
49 MTHCA_EQ_ENTRY_SIZE
= 0x20
53 * Must be packed because start is 64 bits but only aligned to 32 bits.
55 struct mthca_eq_context
{
58 __be32 logsize_usrpage
;
59 __be32 tavor_pd
; /* reserved for Arbel */
62 __be32 arbel_pd
; /* lost_count for Tavor */
65 __be32 consumer_index
;
66 __be32 producer_index
;
68 } __attribute__((packed
));
70 #define MTHCA_EQ_STATUS_OK ( 0 << 28)
71 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
72 #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
73 #define MTHCA_EQ_OWNER_SW ( 0 << 24)
74 #define MTHCA_EQ_OWNER_HW ( 1 << 24)
75 #define MTHCA_EQ_FLAG_TR ( 1 << 18)
76 #define MTHCA_EQ_FLAG_OI ( 1 << 17)
77 #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
78 #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
79 #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
80 #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
83 MTHCA_EVENT_TYPE_COMP
= 0x00,
84 MTHCA_EVENT_TYPE_PATH_MIG
= 0x01,
85 MTHCA_EVENT_TYPE_COMM_EST
= 0x02,
86 MTHCA_EVENT_TYPE_SQ_DRAINED
= 0x03,
87 MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE
= 0x13,
88 MTHCA_EVENT_TYPE_SRQ_LIMIT
= 0x14,
89 MTHCA_EVENT_TYPE_CQ_ERROR
= 0x04,
90 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR
= 0x05,
91 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR
= 0x06,
92 MTHCA_EVENT_TYPE_PATH_MIG_FAILED
= 0x07,
93 MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR
= 0x10,
94 MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR
= 0x11,
95 MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR
= 0x12,
96 MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR
= 0x08,
97 MTHCA_EVENT_TYPE_PORT_CHANGE
= 0x09,
98 MTHCA_EVENT_TYPE_EQ_OVERFLOW
= 0x0f,
99 MTHCA_EVENT_TYPE_ECC_DETECT
= 0x0e,
100 MTHCA_EVENT_TYPE_CMD
= 0x0a
103 #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
104 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
105 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
106 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
107 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
108 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
109 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
110 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
111 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
112 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
113 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
114 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
115 #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
116 (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
117 (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
118 #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
120 #define MTHCA_EQ_DB_INC_CI (1 << 24)
121 #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
122 #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
123 #define MTHCA_EQ_DB_SET_CI (4 << 24)
124 #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
135 } __attribute__((packed
)) comp
;
143 } __attribute__((packed
)) cmd
;
146 } __attribute__((packed
)) qp
;
149 } __attribute__((packed
)) srq
;
155 } __attribute__((packed
)) cq_err
;
159 } __attribute__((packed
)) port_change
;
163 } __attribute__((packed
));
165 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
166 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
168 static inline u64
async_mask(struct mthca_dev
*dev
)
170 return dev
->mthca_flags
& MTHCA_FLAG_SRQ
?
171 MTHCA_ASYNC_EVENT_MASK
| MTHCA_SRQ_EVENT_MASK
:
172 MTHCA_ASYNC_EVENT_MASK
;
175 static inline void tavor_set_eq_ci(struct mthca_dev
*dev
, struct mthca_eq
*eq
, u32 ci
)
179 doorbell
[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI
| eq
->eqn
);
180 doorbell
[1] = cpu_to_be32(ci
& (eq
->nent
- 1));
183 * This barrier makes sure that all updates to ownership bits
184 * done by set_eqe_hw() hit memory before the consumer index
185 * is updated. set_eq_ci() allows the HCA to possibly write
186 * more EQ entries, and we want to avoid the exceedingly
187 * unlikely possibility of the HCA writing an entry and then
188 * having set_eqe_hw() overwrite the owner field.
191 mthca_write64(doorbell
,
192 dev
->kar
+ MTHCA_EQ_DOORBELL
,
193 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
196 static inline void arbel_set_eq_ci(struct mthca_dev
*dev
, struct mthca_eq
*eq
, u32 ci
)
198 /* See comment in tavor_set_eq_ci() above. */
200 __raw_writel((__force u32
) cpu_to_be32(ci
),
201 dev
->eq_regs
.arbel
.eq_set_ci_base
+ eq
->eqn
* 8);
202 /* We still want ordering, just not swabbing, so add a barrier */
206 static inline void set_eq_ci(struct mthca_dev
*dev
, struct mthca_eq
*eq
, u32 ci
)
208 if (mthca_is_memfree(dev
))
209 arbel_set_eq_ci(dev
, eq
, ci
);
211 tavor_set_eq_ci(dev
, eq
, ci
);
214 static inline void tavor_eq_req_not(struct mthca_dev
*dev
, int eqn
)
218 doorbell
[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT
| eqn
);
221 mthca_write64(doorbell
,
222 dev
->kar
+ MTHCA_EQ_DOORBELL
,
223 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
226 static inline void arbel_eq_req_not(struct mthca_dev
*dev
, u32 eqn_mask
)
228 writel(eqn_mask
, dev
->eq_regs
.arbel
.eq_arm
);
231 static inline void disarm_cq(struct mthca_dev
*dev
, int eqn
, int cqn
)
233 if (!mthca_is_memfree(dev
)) {
236 doorbell
[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ
| eqn
);
237 doorbell
[1] = cpu_to_be32(cqn
);
239 mthca_write64(doorbell
,
240 dev
->kar
+ MTHCA_EQ_DOORBELL
,
241 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
245 static inline struct mthca_eqe
*get_eqe(struct mthca_eq
*eq
, u32 entry
)
247 unsigned long off
= (entry
& (eq
->nent
- 1)) * MTHCA_EQ_ENTRY_SIZE
;
248 return eq
->page_list
[off
/ PAGE_SIZE
].buf
+ off
% PAGE_SIZE
;
251 static inline struct mthca_eqe
* next_eqe_sw(struct mthca_eq
*eq
)
253 struct mthca_eqe
* eqe
;
254 eqe
= get_eqe(eq
, eq
->cons_index
);
255 return (MTHCA_EQ_ENTRY_OWNER_HW
& eqe
->owner
) ? NULL
: eqe
;
258 static inline void set_eqe_hw(struct mthca_eqe
*eqe
)
260 eqe
->owner
= MTHCA_EQ_ENTRY_OWNER_HW
;
263 static void port_change(struct mthca_dev
*dev
, int port
, int active
)
265 struct ib_event record
;
267 mthca_dbg(dev
, "Port change to %s for port %d\n",
268 active
? "active" : "down", port
);
270 record
.device
= &dev
->ib_dev
;
271 record
.event
= active
? IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
272 record
.element
.port_num
= port
;
274 ib_dispatch_event(&record
);
277 static int mthca_eq_int(struct mthca_dev
*dev
, struct mthca_eq
*eq
)
279 struct mthca_eqe
*eqe
;
284 while ((eqe
= next_eqe_sw(eq
))) {
286 * Make sure we read EQ entry contents after we've
287 * checked the ownership bit.
292 case MTHCA_EVENT_TYPE_COMP
:
293 disarm_cqn
= be32_to_cpu(eqe
->event
.comp
.cqn
) & 0xffffff;
294 disarm_cq(dev
, eq
->eqn
, disarm_cqn
);
295 mthca_cq_completion(dev
, disarm_cqn
);
298 case MTHCA_EVENT_TYPE_PATH_MIG
:
299 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
303 case MTHCA_EVENT_TYPE_COMM_EST
:
304 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
308 case MTHCA_EVENT_TYPE_SQ_DRAINED
:
309 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
310 IB_EVENT_SQ_DRAINED
);
313 case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE
:
314 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
315 IB_EVENT_QP_LAST_WQE_REACHED
);
318 case MTHCA_EVENT_TYPE_SRQ_LIMIT
:
319 mthca_srq_event(dev
, be32_to_cpu(eqe
->event
.srq
.srqn
) & 0xffffff,
320 IB_EVENT_SRQ_LIMIT_REACHED
);
323 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR
:
324 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
328 case MTHCA_EVENT_TYPE_PATH_MIG_FAILED
:
329 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
330 IB_EVENT_PATH_MIG_ERR
);
333 case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
334 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
335 IB_EVENT_QP_REQ_ERR
);
338 case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR
:
339 mthca_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
340 IB_EVENT_QP_ACCESS_ERR
);
343 case MTHCA_EVENT_TYPE_CMD
:
345 be16_to_cpu(eqe
->event
.cmd
.token
),
346 eqe
->event
.cmd
.status
,
347 be64_to_cpu(eqe
->event
.cmd
.out_param
));
350 case MTHCA_EVENT_TYPE_PORT_CHANGE
:
352 (be32_to_cpu(eqe
->event
.port_change
.port
) >> 28) & 3,
353 eqe
->subtype
== 0x4);
356 case MTHCA_EVENT_TYPE_CQ_ERROR
:
357 mthca_warn(dev
, "CQ %s on CQN %06x\n",
358 eqe
->event
.cq_err
.syndrome
== 1 ?
359 "overrun" : "access violation",
360 be32_to_cpu(eqe
->event
.cq_err
.cqn
) & 0xffffff);
361 mthca_cq_event(dev
, be32_to_cpu(eqe
->event
.cq_err
.cqn
),
365 case MTHCA_EVENT_TYPE_EQ_OVERFLOW
:
366 mthca_warn(dev
, "EQ overrun on EQN %d\n", eq
->eqn
);
369 case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR
:
370 case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR
:
371 case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR
:
372 case MTHCA_EVENT_TYPE_ECC_DETECT
:
374 mthca_warn(dev
, "Unhandled event %02x(%02x) on EQ %d\n",
375 eqe
->type
, eqe
->subtype
, eq
->eqn
);
385 * The HCA will think the queue has overflowed if we
386 * don't tell it we've been processing events. We
387 * create our EQs with MTHCA_NUM_SPARE_EQE extra
388 * entries, so we must update our consumer index at
391 if (unlikely(set_ci
>= MTHCA_NUM_SPARE_EQE
)) {
393 * Conditional on hca_type is OK here because
394 * this is a rare case, not the fast path.
396 set_eq_ci(dev
, eq
, eq
->cons_index
);
402 * Rely on caller to set consumer index so that we don't have
403 * to test hca_type in our interrupt handling fast path.
408 static irqreturn_t
mthca_tavor_interrupt(int irq
, void *dev_ptr
, struct pt_regs
*regs
)
410 struct mthca_dev
*dev
= dev_ptr
;
414 if (dev
->eq_table
.clr_mask
)
415 writel(dev
->eq_table
.clr_mask
, dev
->eq_table
.clr_int
);
417 ecr
= readl(dev
->eq_regs
.tavor
.ecr_base
+ 4);
421 writel(ecr
, dev
->eq_regs
.tavor
.ecr_base
+
422 MTHCA_ECR_CLR_BASE
- MTHCA_ECR_BASE
+ 4);
424 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
)
425 if (ecr
& dev
->eq_table
.eq
[i
].eqn_mask
) {
426 if (mthca_eq_int(dev
, &dev
->eq_table
.eq
[i
]))
427 tavor_set_eq_ci(dev
, &dev
->eq_table
.eq
[i
],
428 dev
->eq_table
.eq
[i
].cons_index
);
429 tavor_eq_req_not(dev
, dev
->eq_table
.eq
[i
].eqn
);
435 static irqreturn_t
mthca_tavor_msi_x_interrupt(int irq
, void *eq_ptr
,
436 struct pt_regs
*regs
)
438 struct mthca_eq
*eq
= eq_ptr
;
439 struct mthca_dev
*dev
= eq
->dev
;
441 mthca_eq_int(dev
, eq
);
442 tavor_set_eq_ci(dev
, eq
, eq
->cons_index
);
443 tavor_eq_req_not(dev
, eq
->eqn
);
445 /* MSI-X vectors always belong to us */
449 static irqreturn_t
mthca_arbel_interrupt(int irq
, void *dev_ptr
, struct pt_regs
*regs
)
451 struct mthca_dev
*dev
= dev_ptr
;
455 if (dev
->eq_table
.clr_mask
)
456 writel(dev
->eq_table
.clr_mask
, dev
->eq_table
.clr_int
);
458 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
)
459 if (mthca_eq_int(dev
, &dev
->eq_table
.eq
[i
])) {
461 arbel_set_eq_ci(dev
, &dev
->eq_table
.eq
[i
],
462 dev
->eq_table
.eq
[i
].cons_index
);
465 arbel_eq_req_not(dev
, dev
->eq_table
.arm_mask
);
467 return IRQ_RETVAL(work
);
470 static irqreturn_t
mthca_arbel_msi_x_interrupt(int irq
, void *eq_ptr
,
471 struct pt_regs
*regs
)
473 struct mthca_eq
*eq
= eq_ptr
;
474 struct mthca_dev
*dev
= eq
->dev
;
476 mthca_eq_int(dev
, eq
);
477 arbel_set_eq_ci(dev
, eq
, eq
->cons_index
);
478 arbel_eq_req_not(dev
, eq
->eqn_mask
);
480 /* MSI-X vectors always belong to us */
484 static int __devinit
mthca_create_eq(struct mthca_dev
*dev
,
490 u64
*dma_list
= NULL
;
492 struct mthca_mailbox
*mailbox
;
493 struct mthca_eq_context
*eq_context
;
499 eq
->nent
= roundup_pow_of_two(max(nent
, 2));
500 npages
= ALIGN(eq
->nent
* MTHCA_EQ_ENTRY_SIZE
, PAGE_SIZE
) / PAGE_SIZE
;
502 eq
->page_list
= kmalloc(npages
* sizeof *eq
->page_list
,
507 for (i
= 0; i
< npages
; ++i
)
508 eq
->page_list
[i
].buf
= NULL
;
510 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
514 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
517 eq_context
= mailbox
->buf
;
519 for (i
= 0; i
< npages
; ++i
) {
520 eq
->page_list
[i
].buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
521 PAGE_SIZE
, &t
, GFP_KERNEL
);
522 if (!eq
->page_list
[i
].buf
)
523 goto err_out_free_pages
;
526 pci_unmap_addr_set(&eq
->page_list
[i
], mapping
, t
);
528 memset(eq
->page_list
[i
].buf
, 0, PAGE_SIZE
);
531 for (i
= 0; i
< eq
->nent
; ++i
)
532 set_eqe_hw(get_eqe(eq
, i
));
534 eq
->eqn
= mthca_alloc(&dev
->eq_table
.alloc
);
536 goto err_out_free_pages
;
538 err
= mthca_mr_alloc_phys(dev
, dev
->driver_pd
.pd_num
,
539 dma_list
, PAGE_SHIFT
, npages
,
540 0, npages
* PAGE_SIZE
,
541 MTHCA_MPT_FLAG_LOCAL_WRITE
|
542 MTHCA_MPT_FLAG_LOCAL_READ
,
545 goto err_out_free_eq
;
547 memset(eq_context
, 0, sizeof *eq_context
);
548 eq_context
->flags
= cpu_to_be32(MTHCA_EQ_STATUS_OK
|
550 MTHCA_EQ_STATE_ARMED
|
552 if (mthca_is_memfree(dev
))
553 eq_context
->flags
|= cpu_to_be32(MTHCA_EQ_STATE_ARBEL
);
555 eq_context
->logsize_usrpage
= cpu_to_be32((ffs(eq
->nent
) - 1) << 24);
556 if (mthca_is_memfree(dev
)) {
557 eq_context
->arbel_pd
= cpu_to_be32(dev
->driver_pd
.pd_num
);
559 eq_context
->logsize_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
560 eq_context
->tavor_pd
= cpu_to_be32(dev
->driver_pd
.pd_num
);
562 eq_context
->intr
= intr
;
563 eq_context
->lkey
= cpu_to_be32(eq
->mr
.ibmr
.lkey
);
565 err
= mthca_SW2HW_EQ(dev
, mailbox
, eq
->eqn
, &status
);
567 mthca_warn(dev
, "SW2HW_EQ failed (%d)\n", err
);
568 goto err_out_free_mr
;
571 mthca_warn(dev
, "SW2HW_EQ returned status 0x%02x\n",
574 goto err_out_free_mr
;
578 mthca_free_mailbox(dev
, mailbox
);
580 eq
->eqn_mask
= swab32(1 << eq
->eqn
);
583 dev
->eq_table
.arm_mask
|= eq
->eqn_mask
;
585 mthca_dbg(dev
, "Allocated EQ %d with %d entries\n",
591 mthca_free_mr(dev
, &eq
->mr
);
594 mthca_free(&dev
->eq_table
.alloc
, eq
->eqn
);
597 for (i
= 0; i
< npages
; ++i
)
598 if (eq
->page_list
[i
].buf
)
599 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
600 eq
->page_list
[i
].buf
,
601 pci_unmap_addr(&eq
->page_list
[i
],
604 mthca_free_mailbox(dev
, mailbox
);
607 kfree(eq
->page_list
);
614 static void mthca_free_eq(struct mthca_dev
*dev
,
617 struct mthca_mailbox
*mailbox
;
620 int npages
= (eq
->nent
* MTHCA_EQ_ENTRY_SIZE
+ PAGE_SIZE
- 1) /
624 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
628 err
= mthca_HW2SW_EQ(dev
, mailbox
, eq
->eqn
, &status
);
630 mthca_warn(dev
, "HW2SW_EQ failed (%d)\n", err
);
632 mthca_warn(dev
, "HW2SW_EQ returned status 0x%02x\n", status
);
634 dev
->eq_table
.arm_mask
&= ~eq
->eqn_mask
;
637 mthca_dbg(dev
, "Dumping EQ context %02x:\n", eq
->eqn
);
638 for (i
= 0; i
< sizeof (struct mthca_eq_context
) / 4; ++i
) {
640 printk("[%02x] ", i
* 4);
641 printk(" %08x", be32_to_cpup(mailbox
->buf
+ i
* 4));
642 if ((i
+ 1) % 4 == 0)
647 mthca_free_mr(dev
, &eq
->mr
);
648 for (i
= 0; i
< npages
; ++i
)
649 pci_free_consistent(dev
->pdev
, PAGE_SIZE
,
650 eq
->page_list
[i
].buf
,
651 pci_unmap_addr(&eq
->page_list
[i
], mapping
));
653 kfree(eq
->page_list
);
654 mthca_free_mailbox(dev
, mailbox
);
657 static void mthca_free_irqs(struct mthca_dev
*dev
)
661 if (dev
->eq_table
.have_irq
)
662 free_irq(dev
->pdev
->irq
, dev
);
663 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
)
664 if (dev
->eq_table
.eq
[i
].have_irq
)
665 free_irq(dev
->eq_table
.eq
[i
].msi_x_vector
,
666 dev
->eq_table
.eq
+ i
);
669 static int __devinit
mthca_map_reg(struct mthca_dev
*dev
,
670 unsigned long offset
, unsigned long size
,
673 unsigned long base
= pci_resource_start(dev
->pdev
, 0);
675 if (!request_mem_region(base
+ offset
, size
, DRV_NAME
))
678 *map
= ioremap(base
+ offset
, size
);
680 release_mem_region(base
+ offset
, size
);
687 static void mthca_unmap_reg(struct mthca_dev
*dev
, unsigned long offset
,
688 unsigned long size
, void __iomem
*map
)
690 unsigned long base
= pci_resource_start(dev
->pdev
, 0);
692 release_mem_region(base
+ offset
, size
);
696 static int __devinit
mthca_map_eq_regs(struct mthca_dev
*dev
)
698 if (mthca_is_memfree(dev
)) {
700 * We assume that the EQ arm and EQ set CI registers
701 * fall within the first BAR. We can't trust the
702 * values firmware gives us, since those addresses are
703 * valid on the HCA's side of the PCI bus but not
704 * necessarily the host side.
706 if (mthca_map_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
707 dev
->fw
.arbel
.clr_int_base
, MTHCA_CLR_INT_SIZE
,
709 mthca_err(dev
, "Couldn't map interrupt clear register, "
715 * Add 4 because we limit ourselves to EQs 0 ... 31,
716 * so we only need the low word of the register.
718 if (mthca_map_reg(dev
, ((pci_resource_len(dev
->pdev
, 0) - 1) &
719 dev
->fw
.arbel
.eq_arm_base
) + 4, 4,
720 &dev
->eq_regs
.arbel
.eq_arm
)) {
721 mthca_err(dev
, "Couldn't map EQ arm register, aborting.\n");
722 mthca_unmap_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
723 dev
->fw
.arbel
.clr_int_base
, MTHCA_CLR_INT_SIZE
,
728 if (mthca_map_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
729 dev
->fw
.arbel
.eq_set_ci_base
,
730 MTHCA_EQ_SET_CI_SIZE
,
731 &dev
->eq_regs
.arbel
.eq_set_ci_base
)) {
732 mthca_err(dev
, "Couldn't map EQ CI register, aborting.\n");
733 mthca_unmap_reg(dev
, ((pci_resource_len(dev
->pdev
, 0) - 1) &
734 dev
->fw
.arbel
.eq_arm_base
) + 4, 4,
735 dev
->eq_regs
.arbel
.eq_arm
);
736 mthca_unmap_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
737 dev
->fw
.arbel
.clr_int_base
, MTHCA_CLR_INT_SIZE
,
742 if (mthca_map_reg(dev
, MTHCA_CLR_INT_BASE
, MTHCA_CLR_INT_SIZE
,
744 mthca_err(dev
, "Couldn't map interrupt clear register, "
749 if (mthca_map_reg(dev
, MTHCA_ECR_BASE
,
750 MTHCA_ECR_SIZE
+ MTHCA_ECR_CLR_SIZE
,
751 &dev
->eq_regs
.tavor
.ecr_base
)) {
752 mthca_err(dev
, "Couldn't map ecr register, "
754 mthca_unmap_reg(dev
, MTHCA_CLR_INT_BASE
, MTHCA_CLR_INT_SIZE
,
764 static void mthca_unmap_eq_regs(struct mthca_dev
*dev
)
766 if (mthca_is_memfree(dev
)) {
767 mthca_unmap_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
768 dev
->fw
.arbel
.eq_set_ci_base
,
769 MTHCA_EQ_SET_CI_SIZE
,
770 dev
->eq_regs
.arbel
.eq_set_ci_base
);
771 mthca_unmap_reg(dev
, ((pci_resource_len(dev
->pdev
, 0) - 1) &
772 dev
->fw
.arbel
.eq_arm_base
) + 4, 4,
773 dev
->eq_regs
.arbel
.eq_arm
);
774 mthca_unmap_reg(dev
, (pci_resource_len(dev
->pdev
, 0) - 1) &
775 dev
->fw
.arbel
.clr_int_base
, MTHCA_CLR_INT_SIZE
,
778 mthca_unmap_reg(dev
, MTHCA_ECR_BASE
,
779 MTHCA_ECR_SIZE
+ MTHCA_ECR_CLR_SIZE
,
780 dev
->eq_regs
.tavor
.ecr_base
);
781 mthca_unmap_reg(dev
, MTHCA_CLR_INT_BASE
, MTHCA_CLR_INT_SIZE
,
786 int __devinit
mthca_map_eq_icm(struct mthca_dev
*dev
, u64 icm_virt
)
792 * We assume that mapping one page is enough for the whole EQ
793 * context table. This is fine with all current HCAs, because
794 * we only use 32 EQs and each EQ uses 32 bytes of context
795 * memory, or 1 KB total.
797 dev
->eq_table
.icm_virt
= icm_virt
;
798 dev
->eq_table
.icm_page
= alloc_page(GFP_HIGHUSER
);
799 if (!dev
->eq_table
.icm_page
)
801 dev
->eq_table
.icm_dma
= pci_map_page(dev
->pdev
, dev
->eq_table
.icm_page
, 0,
802 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
803 if (pci_dma_mapping_error(dev
->eq_table
.icm_dma
)) {
804 __free_page(dev
->eq_table
.icm_page
);
808 ret
= mthca_MAP_ICM_page(dev
, dev
->eq_table
.icm_dma
, icm_virt
, &status
);
812 pci_unmap_page(dev
->pdev
, dev
->eq_table
.icm_dma
, PAGE_SIZE
,
813 PCI_DMA_BIDIRECTIONAL
);
814 __free_page(dev
->eq_table
.icm_page
);
820 void mthca_unmap_eq_icm(struct mthca_dev
*dev
)
824 mthca_UNMAP_ICM(dev
, dev
->eq_table
.icm_virt
, 1, &status
);
825 pci_unmap_page(dev
->pdev
, dev
->eq_table
.icm_dma
, PAGE_SIZE
,
826 PCI_DMA_BIDIRECTIONAL
);
827 __free_page(dev
->eq_table
.icm_page
);
830 int __devinit
mthca_init_eq_table(struct mthca_dev
*dev
)
837 err
= mthca_alloc_init(&dev
->eq_table
.alloc
,
839 dev
->limits
.num_eqs
- 1,
840 dev
->limits
.reserved_eqs
);
844 err
= mthca_map_eq_regs(dev
);
848 if (dev
->mthca_flags
& MTHCA_FLAG_MSI
||
849 dev
->mthca_flags
& MTHCA_FLAG_MSI_X
) {
850 dev
->eq_table
.clr_mask
= 0;
852 dev
->eq_table
.clr_mask
=
853 swab32(1 << (dev
->eq_table
.inta_pin
& 31));
854 dev
->eq_table
.clr_int
= dev
->clr_base
+
855 (dev
->eq_table
.inta_pin
< 32 ? 4 : 0);
858 dev
->eq_table
.arm_mask
= 0;
860 intr
= (dev
->mthca_flags
& MTHCA_FLAG_MSI
) ?
861 128 : dev
->eq_table
.inta_pin
;
863 err
= mthca_create_eq(dev
, dev
->limits
.num_cqs
+ MTHCA_NUM_SPARE_EQE
,
864 (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
) ? 128 : intr
,
865 &dev
->eq_table
.eq
[MTHCA_EQ_COMP
]);
869 err
= mthca_create_eq(dev
, MTHCA_NUM_ASYNC_EQE
+ MTHCA_NUM_SPARE_EQE
,
870 (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
) ? 129 : intr
,
871 &dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
]);
875 err
= mthca_create_eq(dev
, MTHCA_NUM_CMD_EQE
+ MTHCA_NUM_SPARE_EQE
,
876 (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
) ? 130 : intr
,
877 &dev
->eq_table
.eq
[MTHCA_EQ_CMD
]);
881 if (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
) {
882 static const char *eq_name
[] = {
883 [MTHCA_EQ_COMP
] = DRV_NAME
" (comp)",
884 [MTHCA_EQ_ASYNC
] = DRV_NAME
" (async)",
885 [MTHCA_EQ_CMD
] = DRV_NAME
" (cmd)"
888 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
) {
889 err
= request_irq(dev
->eq_table
.eq
[i
].msi_x_vector
,
890 mthca_is_memfree(dev
) ?
891 mthca_arbel_msi_x_interrupt
:
892 mthca_tavor_msi_x_interrupt
,
893 0, eq_name
[i
], dev
->eq_table
.eq
+ i
);
896 dev
->eq_table
.eq
[i
].have_irq
= 1;
899 err
= request_irq(dev
->pdev
->irq
,
900 mthca_is_memfree(dev
) ?
901 mthca_arbel_interrupt
:
902 mthca_tavor_interrupt
,
903 SA_SHIRQ
, DRV_NAME
, dev
);
906 dev
->eq_table
.have_irq
= 1;
909 err
= mthca_MAP_EQ(dev
, async_mask(dev
),
910 0, dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
, &status
);
912 mthca_warn(dev
, "MAP_EQ for async EQ %d failed (%d)\n",
913 dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
, err
);
915 mthca_warn(dev
, "MAP_EQ for async EQ %d returned status 0x%02x\n",
916 dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
, status
);
918 err
= mthca_MAP_EQ(dev
, MTHCA_CMD_EVENT_MASK
,
919 0, dev
->eq_table
.eq
[MTHCA_EQ_CMD
].eqn
, &status
);
921 mthca_warn(dev
, "MAP_EQ for cmd EQ %d failed (%d)\n",
922 dev
->eq_table
.eq
[MTHCA_EQ_CMD
].eqn
, err
);
924 mthca_warn(dev
, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
925 dev
->eq_table
.eq
[MTHCA_EQ_CMD
].eqn
, status
);
927 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
)
928 if (mthca_is_memfree(dev
))
929 arbel_eq_req_not(dev
, dev
->eq_table
.eq
[i
].eqn_mask
);
931 tavor_eq_req_not(dev
, dev
->eq_table
.eq
[i
].eqn
);
936 mthca_free_irqs(dev
);
937 mthca_free_eq(dev
, &dev
->eq_table
.eq
[MTHCA_EQ_CMD
]);
940 mthca_free_eq(dev
, &dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
]);
943 mthca_free_eq(dev
, &dev
->eq_table
.eq
[MTHCA_EQ_COMP
]);
946 mthca_unmap_eq_regs(dev
);
949 mthca_alloc_cleanup(&dev
->eq_table
.alloc
);
953 void mthca_cleanup_eq_table(struct mthca_dev
*dev
)
958 mthca_free_irqs(dev
);
960 mthca_MAP_EQ(dev
, async_mask(dev
),
961 1, dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
, &status
);
962 mthca_MAP_EQ(dev
, MTHCA_CMD_EVENT_MASK
,
963 1, dev
->eq_table
.eq
[MTHCA_EQ_CMD
].eqn
, &status
);
965 for (i
= 0; i
< MTHCA_NUM_EQ
; ++i
)
966 mthca_free_eq(dev
, &dev
->eq_table
.eq
[i
]);
968 mthca_unmap_eq_regs(dev
);
970 mthca_alloc_cleanup(&dev
->eq_table
.alloc
);