]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/bfa/bfa_hw_cb.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / bfa / bfa_hw_cb.c
1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include "bfad_drv.h"
20 #include "bfa_modules.h"
21 #include "bfi_reg.h"
22
23 void
24 bfa_hwcb_reginit(struct bfa_s *bfa)
25 {
26 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
27 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
28 int fn = bfa_ioc_pcifn(&bfa->ioc);
29
30 if (fn == 0) {
31 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
32 bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
33 } else {
34 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
35 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
36 }
37 }
38
39 static void
40 bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
41 {
42 writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
43 bfa->iocfc.bfa_regs.intr_status);
44 }
45
46 /*
47 * Actions to respond RME Interrupt for Crossbow ASIC:
48 * - Write 1 to Interrupt Status register
49 * INTX - done in bfa_intx()
50 * MSIX - done in bfa_hwcb_rspq_ack_msix()
51 * - Update CI (only if new CI)
52 */
53 static void
54 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
55 {
56 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
57 bfa->iocfc.bfa_regs.intr_status);
58
59 if (bfa_rspq_ci(bfa, rspq) == ci)
60 return;
61
62 bfa_rspq_ci(bfa, rspq) = ci;
63 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
64 mmiowb();
65 }
66
67 void
68 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
69 {
70 if (bfa_rspq_ci(bfa, rspq) == ci)
71 return;
72
73 bfa_rspq_ci(bfa, rspq) = ci;
74 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
75 mmiowb();
76 }
77
78 void
79 bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
80 u32 *num_vecs, u32 *max_vec_bit)
81 {
82 #define __HFN_NUMINTS 13
83 if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
84 *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
85 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
86 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
87 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
88 __HFN_INT_MBOX_LPU0);
89 *max_vec_bit = __HFN_INT_MBOX_LPU0;
90 } else {
91 *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
92 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
93 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
94 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
95 __HFN_INT_MBOX_LPU1);
96 *max_vec_bit = __HFN_INT_MBOX_LPU1;
97 }
98
99 *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
100 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
101 *num_vecs = __HFN_NUMINTS;
102 }
103
104 /*
105 * Dummy interrupt handler for handling spurious interrupts.
106 */
107 static void
108 bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
109 {
110 }
111
112 /*
113 * No special setup required for crossbow -- vector assignments are implicit.
114 */
115 void
116 bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
117 {
118 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
119
120 bfa->msix.nvecs = nvecs;
121 bfa_hwcb_msix_uninstall(bfa);
122 }
123
124 void
125 bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
126 {
127 int i;
128
129 if (bfa->msix.nvecs == 0)
130 return;
131
132 if (bfa->msix.nvecs == 1) {
133 for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
134 bfa->msix.handler[i] = bfa_msix_all;
135 return;
136 }
137
138 for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
139 bfa->msix.handler[i] = bfa_msix_lpu_err;
140 }
141
142 void
143 bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
144 {
145 int i;
146
147 if (bfa->msix.nvecs == 0)
148 return;
149
150 if (bfa->msix.nvecs == 1) {
151 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
152 bfa->msix.handler[i] = bfa_msix_all;
153 return;
154 }
155
156 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
157 bfa->msix.handler[i] = bfa_msix_reqq;
158
159 for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
160 bfa->msix.handler[i] = bfa_msix_rspq;
161 }
162
163 void
164 bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
165 {
166 int i;
167
168 for (i = 0; i < BFI_MSIX_CB_MAX; i++)
169 bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
170 }
171
172 /*
173 * No special enable/disable -- vector assignments are implicit.
174 */
175 void
176 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
177 {
178 if (msix) {
179 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
180 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
181 } else {
182 bfa->iocfc.hwif.hw_reqq_ack = NULL;
183 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
184 }
185 }
186
187 void
188 bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
189 {
190 *start = BFI_MSIX_RME_QMIN_CB;
191 *end = BFI_MSIX_RME_QMAX_CB;
192 }