]>
Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include <linux/pci.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/spinlock.h> | |
650b91fb | 22 | #include <linux/bitops.h> |
5e3dd157 KV |
23 | |
24 | #include "core.h" | |
25 | #include "debug.h" | |
26 | ||
27 | #include "targaddrs.h" | |
28 | #include "bmi.h" | |
29 | ||
30 | #include "hif.h" | |
31 | #include "htc.h" | |
32 | ||
33 | #include "ce.h" | |
34 | #include "pci.h" | |
35 | ||
cfe9c45b MK |
36 | enum ath10k_pci_irq_mode { |
37 | ATH10K_PCI_IRQ_AUTO = 0, | |
38 | ATH10K_PCI_IRQ_LEGACY = 1, | |
39 | ATH10K_PCI_IRQ_MSI = 2, | |
40 | }; | |
41 | ||
35098463 KV |
42 | enum ath10k_pci_reset_mode { |
43 | ATH10K_PCI_RESET_AUTO = 0, | |
44 | ATH10K_PCI_RESET_WARM_ONLY = 1, | |
45 | }; | |
46 | ||
cfe9c45b | 47 | static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; |
35098463 | 48 | static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; |
cfe9c45b | 49 | |
cfe9c45b MK |
50 | module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); |
51 | MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); | |
52 | ||
35098463 KV |
53 | module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); |
54 | MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); | |
55 | ||
0399eca8 KV |
56 | /* how long wait to wait for target to initialise, in ms */ |
57 | #define ATH10K_PCI_TARGET_WAIT 3000 | |
61c95cea | 58 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 |
0399eca8 | 59 | |
9baa3c34 | 60 | static const struct pci_device_id ath10k_pci_id_table[] = { |
5e3dd157 | 61 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
36582e5d | 62 | { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ |
d63955b3 | 63 | { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ |
8a055a8a | 64 | { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ |
a226b519 | 65 | { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ |
5e3dd157 KV |
66 | {0} |
67 | }; | |
68 | ||
7505f7c3 MK |
69 | static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { |
70 | /* QCA988X pre 2.0 chips are not supported because they need some nasty | |
71 | * hacks. ath10k doesn't have them and these devices crash horribly | |
72 | * because of that. | |
73 | */ | |
74 | { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, | |
36582e5d MK |
75 | |
76 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, | |
77 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, | |
78 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, | |
79 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, | |
80 | { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | |
81 | ||
d63955b3 MK |
82 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, |
83 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, | |
84 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, | |
85 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, | |
86 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | |
36582e5d | 87 | |
8a055a8a | 88 | { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, |
034074f3 BM |
89 | |
90 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, | |
12551ced | 91 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, |
7505f7c3 MK |
92 | }; |
93 | ||
728f95ee | 94 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); |
fc36e3ff | 95 | static int ath10k_pci_cold_reset(struct ath10k *ar); |
6e4202c3 | 96 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar); |
d7fb47f5 | 97 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar); |
fc15ca13 MK |
98 | static int ath10k_pci_init_irq(struct ath10k *ar); |
99 | static int ath10k_pci_deinit_irq(struct ath10k *ar); | |
100 | static int ath10k_pci_request_irq(struct ath10k *ar); | |
101 | static void ath10k_pci_free_irq(struct ath10k *ar); | |
85622cde MK |
102 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
103 | struct ath10k_ce_pipe *rx_pipe, | |
104 | struct bmi_xfer *xfer); | |
6e4202c3 | 105 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); |
0e5b2950 | 106 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); |
9d9bdbb0 | 107 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
a70587b3 RM |
108 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); |
109 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); | |
6419fdbb | 110 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
afb0bf7f | 111 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); |
5e3dd157 | 112 | |
2727a743 | 113 | static struct ce_attr host_ce_config_wlan[] = { |
48e9c225 KV |
114 | /* CE0: host->target HTC control and raw streams */ |
115 | { | |
116 | .flags = CE_ATTR_FLAGS, | |
117 | .src_nentries = 16, | |
118 | .src_sz_max = 256, | |
119 | .dest_nentries = 0, | |
0e5b2950 | 120 | .send_cb = ath10k_pci_htc_tx_cb, |
48e9c225 KV |
121 | }, |
122 | ||
123 | /* CE1: target->host HTT + HTC control */ | |
124 | { | |
125 | .flags = CE_ATTR_FLAGS, | |
126 | .src_nentries = 0, | |
63838640 | 127 | .src_sz_max = 2048, |
48e9c225 | 128 | .dest_nentries = 512, |
6419fdbb | 129 | .recv_cb = ath10k_pci_htt_htc_rx_cb, |
48e9c225 KV |
130 | }, |
131 | ||
132 | /* CE2: target->host WMI */ | |
133 | { | |
134 | .flags = CE_ATTR_FLAGS, | |
135 | .src_nentries = 0, | |
136 | .src_sz_max = 2048, | |
30abb330 | 137 | .dest_nentries = 128, |
9d9bdbb0 | 138 | .recv_cb = ath10k_pci_htc_rx_cb, |
48e9c225 KV |
139 | }, |
140 | ||
141 | /* CE3: host->target WMI */ | |
142 | { | |
143 | .flags = CE_ATTR_FLAGS, | |
144 | .src_nentries = 32, | |
145 | .src_sz_max = 2048, | |
146 | .dest_nentries = 0, | |
0e5b2950 | 147 | .send_cb = ath10k_pci_htc_tx_cb, |
48e9c225 KV |
148 | }, |
149 | ||
150 | /* CE4: host->target HTT */ | |
151 | { | |
152 | .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, | |
153 | .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, | |
154 | .src_sz_max = 256, | |
155 | .dest_nentries = 0, | |
a70587b3 | 156 | .send_cb = ath10k_pci_htt_tx_cb, |
48e9c225 KV |
157 | }, |
158 | ||
a70587b3 | 159 | /* CE5: target->host HTT (HIF->HTT) */ |
48e9c225 KV |
160 | { |
161 | .flags = CE_ATTR_FLAGS, | |
162 | .src_nentries = 0, | |
a70587b3 RM |
163 | .src_sz_max = 512, |
164 | .dest_nentries = 512, | |
165 | .recv_cb = ath10k_pci_htt_rx_cb, | |
48e9c225 KV |
166 | }, |
167 | ||
168 | /* CE6: target autonomous hif_memcpy */ | |
169 | { | |
170 | .flags = CE_ATTR_FLAGS, | |
171 | .src_nentries = 0, | |
172 | .src_sz_max = 0, | |
173 | .dest_nentries = 0, | |
174 | }, | |
175 | ||
176 | /* CE7: ce_diag, the Diagnostic Window */ | |
177 | { | |
178 | .flags = CE_ATTR_FLAGS, | |
179 | .src_nentries = 2, | |
180 | .src_sz_max = DIAG_TRANSFER_LIMIT, | |
181 | .dest_nentries = 2, | |
182 | }, | |
050af069 VT |
183 | |
184 | /* CE8: target->host pktlog */ | |
185 | { | |
186 | .flags = CE_ATTR_FLAGS, | |
187 | .src_nentries = 0, | |
188 | .src_sz_max = 2048, | |
189 | .dest_nentries = 128, | |
afb0bf7f | 190 | .recv_cb = ath10k_pci_pktlog_rx_cb, |
050af069 VT |
191 | }, |
192 | ||
193 | /* CE9 target autonomous qcache memcpy */ | |
194 | { | |
195 | .flags = CE_ATTR_FLAGS, | |
196 | .src_nentries = 0, | |
197 | .src_sz_max = 0, | |
198 | .dest_nentries = 0, | |
199 | }, | |
200 | ||
201 | /* CE10: target autonomous hif memcpy */ | |
202 | { | |
203 | .flags = CE_ATTR_FLAGS, | |
204 | .src_nentries = 0, | |
205 | .src_sz_max = 0, | |
206 | .dest_nentries = 0, | |
207 | }, | |
208 | ||
209 | /* CE11: target autonomous hif memcpy */ | |
210 | { | |
211 | .flags = CE_ATTR_FLAGS, | |
212 | .src_nentries = 0, | |
213 | .src_sz_max = 0, | |
214 | .dest_nentries = 0, | |
215 | }, | |
5e3dd157 KV |
216 | }; |
217 | ||
218 | /* Target firmware's Copy Engine configuration. */ | |
2727a743 | 219 | static struct ce_pipe_config target_ce_config_wlan[] = { |
d88effba KV |
220 | /* CE0: host->target HTC control and raw streams */ |
221 | { | |
0fdc14e4 MK |
222 | .pipenum = __cpu_to_le32(0), |
223 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
224 | .nentries = __cpu_to_le32(32), | |
225 | .nbytes_max = __cpu_to_le32(256), | |
226 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
227 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
228 | }, |
229 | ||
230 | /* CE1: target->host HTT + HTC control */ | |
231 | { | |
0fdc14e4 MK |
232 | .pipenum = __cpu_to_le32(1), |
233 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
234 | .nentries = __cpu_to_le32(32), | |
63838640 | 235 | .nbytes_max = __cpu_to_le32(2048), |
0fdc14e4 MK |
236 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
237 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
238 | }, |
239 | ||
240 | /* CE2: target->host WMI */ | |
241 | { | |
0fdc14e4 MK |
242 | .pipenum = __cpu_to_le32(2), |
243 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
30abb330 | 244 | .nentries = __cpu_to_le32(64), |
0fdc14e4 MK |
245 | .nbytes_max = __cpu_to_le32(2048), |
246 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
247 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
248 | }, |
249 | ||
250 | /* CE3: host->target WMI */ | |
251 | { | |
0fdc14e4 MK |
252 | .pipenum = __cpu_to_le32(3), |
253 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
254 | .nentries = __cpu_to_le32(32), | |
255 | .nbytes_max = __cpu_to_le32(2048), | |
256 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
257 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
258 | }, |
259 | ||
260 | /* CE4: host->target HTT */ | |
261 | { | |
0fdc14e4 MK |
262 | .pipenum = __cpu_to_le32(4), |
263 | .pipedir = __cpu_to_le32(PIPEDIR_OUT), | |
264 | .nentries = __cpu_to_le32(256), | |
265 | .nbytes_max = __cpu_to_le32(256), | |
266 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
267 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
268 | }, |
269 | ||
5e3dd157 | 270 | /* NB: 50% of src nentries, since tx has 2 frags */ |
d88effba | 271 | |
a70587b3 | 272 | /* CE5: target->host HTT (HIF->HTT) */ |
d88effba | 273 | { |
0fdc14e4 | 274 | .pipenum = __cpu_to_le32(5), |
a70587b3 | 275 | .pipedir = __cpu_to_le32(PIPEDIR_IN), |
0fdc14e4 | 276 | .nentries = __cpu_to_le32(32), |
a70587b3 | 277 | .nbytes_max = __cpu_to_le32(512), |
0fdc14e4 MK |
278 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), |
279 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
280 | }, |
281 | ||
282 | /* CE6: Reserved for target autonomous hif_memcpy */ | |
283 | { | |
0fdc14e4 MK |
284 | .pipenum = __cpu_to_le32(6), |
285 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
286 | .nentries = __cpu_to_le32(32), | |
287 | .nbytes_max = __cpu_to_le32(4096), | |
288 | .flags = __cpu_to_le32(CE_ATTR_FLAGS), | |
289 | .reserved = __cpu_to_le32(0), | |
d88effba KV |
290 | }, |
291 | ||
5e3dd157 | 292 | /* CE7 used only by Host */ |
050af069 VT |
293 | { |
294 | .pipenum = __cpu_to_le32(7), | |
295 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
296 | .nentries = __cpu_to_le32(0), | |
297 | .nbytes_max = __cpu_to_le32(0), | |
298 | .flags = __cpu_to_le32(0), | |
299 | .reserved = __cpu_to_le32(0), | |
300 | }, | |
301 | ||
302 | /* CE8 target->host packtlog */ | |
303 | { | |
304 | .pipenum = __cpu_to_le32(8), | |
305 | .pipedir = __cpu_to_le32(PIPEDIR_IN), | |
306 | .nentries = __cpu_to_le32(64), | |
307 | .nbytes_max = __cpu_to_le32(2048), | |
308 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), | |
309 | .reserved = __cpu_to_le32(0), | |
310 | }, | |
311 | ||
312 | /* CE9 target autonomous qcache memcpy */ | |
313 | { | |
314 | .pipenum = __cpu_to_le32(9), | |
315 | .pipedir = __cpu_to_le32(PIPEDIR_INOUT), | |
316 | .nentries = __cpu_to_le32(32), | |
317 | .nbytes_max = __cpu_to_le32(2048), | |
318 | .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), | |
319 | .reserved = __cpu_to_le32(0), | |
320 | }, | |
321 | ||
322 | /* It not necessary to send target wlan configuration for CE10 & CE11 | |
323 | * as these CEs are not actively used in target. | |
324 | */ | |
5e3dd157 KV |
325 | }; |
326 | ||
d7bfb7aa MK |
327 | /* |
328 | * Map from service/endpoint to Copy Engine. | |
329 | * This table is derived from the CE_PCI TABLE, above. | |
330 | * It is passed to the Target at startup for use by firmware. | |
331 | */ | |
2727a743 | 332 | static struct service_to_pipe target_service_to_ce_map_wlan[] = { |
d7bfb7aa | 333 | { |
0fdc14e4 MK |
334 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
335 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
336 | __cpu_to_le32(3), | |
d7bfb7aa MK |
337 | }, |
338 | { | |
0fdc14e4 MK |
339 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
340 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
341 | __cpu_to_le32(2), | |
d7bfb7aa MK |
342 | }, |
343 | { | |
0fdc14e4 MK |
344 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
345 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
346 | __cpu_to_le32(3), | |
d7bfb7aa MK |
347 | }, |
348 | { | |
0fdc14e4 MK |
349 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), |
350 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
351 | __cpu_to_le32(2), | |
d7bfb7aa MK |
352 | }, |
353 | { | |
0fdc14e4 MK |
354 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
355 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
356 | __cpu_to_le32(3), | |
d7bfb7aa MK |
357 | }, |
358 | { | |
0fdc14e4 MK |
359 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), |
360 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
361 | __cpu_to_le32(2), | |
d7bfb7aa MK |
362 | }, |
363 | { | |
0fdc14e4 MK |
364 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
365 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
366 | __cpu_to_le32(3), | |
d7bfb7aa MK |
367 | }, |
368 | { | |
0fdc14e4 MK |
369 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), |
370 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
371 | __cpu_to_le32(2), | |
d7bfb7aa MK |
372 | }, |
373 | { | |
0fdc14e4 MK |
374 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
375 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
376 | __cpu_to_le32(3), | |
d7bfb7aa MK |
377 | }, |
378 | { | |
0fdc14e4 MK |
379 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), |
380 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
381 | __cpu_to_le32(2), | |
d7bfb7aa MK |
382 | }, |
383 | { | |
0fdc14e4 MK |
384 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
385 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
386 | __cpu_to_le32(0), | |
d7bfb7aa MK |
387 | }, |
388 | { | |
0fdc14e4 MK |
389 | __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), |
390 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
391 | __cpu_to_le32(1), | |
d7bfb7aa | 392 | }, |
0fdc14e4 MK |
393 | { /* not used */ |
394 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), | |
395 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
396 | __cpu_to_le32(0), | |
d7bfb7aa | 397 | }, |
0fdc14e4 MK |
398 | { /* not used */ |
399 | __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), | |
400 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
401 | __cpu_to_le32(1), | |
d7bfb7aa MK |
402 | }, |
403 | { | |
0fdc14e4 MK |
404 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
405 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | |
406 | __cpu_to_le32(4), | |
d7bfb7aa MK |
407 | }, |
408 | { | |
0fdc14e4 MK |
409 | __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), |
410 | __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ | |
a70587b3 | 411 | __cpu_to_le32(5), |
d7bfb7aa MK |
412 | }, |
413 | ||
414 | /* (Additions here) */ | |
415 | ||
0fdc14e4 MK |
416 | { /* must be last */ |
417 | __cpu_to_le32(0), | |
418 | __cpu_to_le32(0), | |
419 | __cpu_to_le32(0), | |
d7bfb7aa MK |
420 | }, |
421 | }; | |
422 | ||
77258d40 MK |
423 | static bool ath10k_pci_is_awake(struct ath10k *ar) |
424 | { | |
425 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
426 | u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
427 | RTC_STATE_ADDRESS); | |
428 | ||
429 | return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; | |
430 | } | |
431 | ||
432 | static void __ath10k_pci_wake(struct ath10k *ar) | |
433 | { | |
434 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
435 | ||
436 | lockdep_assert_held(&ar_pci->ps_lock); | |
437 | ||
438 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", | |
439 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
440 | ||
441 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
442 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
443 | PCIE_SOC_WAKE_ADDRESS); | |
444 | } | |
445 | ||
446 | static void __ath10k_pci_sleep(struct ath10k *ar) | |
447 | { | |
448 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
449 | ||
450 | lockdep_assert_held(&ar_pci->ps_lock); | |
451 | ||
452 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", | |
453 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
454 | ||
455 | iowrite32(PCIE_SOC_WAKE_RESET, | |
456 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
457 | PCIE_SOC_WAKE_ADDRESS); | |
458 | ar_pci->ps_awake = false; | |
459 | } | |
460 | ||
461 | static int ath10k_pci_wake_wait(struct ath10k *ar) | |
462 | { | |
463 | int tot_delay = 0; | |
464 | int curr_delay = 5; | |
465 | ||
466 | while (tot_delay < PCIE_WAKE_TIMEOUT) { | |
39b91b81 MK |
467 | if (ath10k_pci_is_awake(ar)) { |
468 | if (tot_delay > PCIE_WAKE_LATE_US) | |
469 | ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", | |
470 | tot_delay / 1000); | |
77258d40 | 471 | return 0; |
39b91b81 | 472 | } |
77258d40 MK |
473 | |
474 | udelay(curr_delay); | |
475 | tot_delay += curr_delay; | |
476 | ||
477 | if (curr_delay < 50) | |
478 | curr_delay += 5; | |
479 | } | |
480 | ||
481 | return -ETIMEDOUT; | |
482 | } | |
483 | ||
1aaf8efb AK |
484 | static int ath10k_pci_force_wake(struct ath10k *ar) |
485 | { | |
486 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
487 | unsigned long flags; | |
488 | int ret = 0; | |
489 | ||
490 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
491 | ||
492 | if (!ar_pci->ps_awake) { | |
493 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
494 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
495 | PCIE_SOC_WAKE_ADDRESS); | |
496 | ||
497 | ret = ath10k_pci_wake_wait(ar); | |
498 | if (ret == 0) | |
499 | ar_pci->ps_awake = true; | |
500 | } | |
501 | ||
502 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
503 | ||
504 | return ret; | |
505 | } | |
506 | ||
507 | static void ath10k_pci_force_sleep(struct ath10k *ar) | |
508 | { | |
509 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
510 | unsigned long flags; | |
511 | ||
512 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
513 | ||
514 | iowrite32(PCIE_SOC_WAKE_RESET, | |
515 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
516 | PCIE_SOC_WAKE_ADDRESS); | |
517 | ar_pci->ps_awake = false; | |
518 | ||
519 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
520 | } | |
521 | ||
77258d40 MK |
522 | static int ath10k_pci_wake(struct ath10k *ar) |
523 | { | |
524 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
525 | unsigned long flags; | |
526 | int ret = 0; | |
527 | ||
1aaf8efb AK |
528 | if (ar_pci->pci_ps == 0) |
529 | return ret; | |
530 | ||
77258d40 MK |
531 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
532 | ||
533 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", | |
534 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
535 | ||
536 | /* This function can be called very frequently. To avoid excessive | |
537 | * CPU stalls for MMIO reads use a cache var to hold the device state. | |
538 | */ | |
539 | if (!ar_pci->ps_awake) { | |
540 | __ath10k_pci_wake(ar); | |
541 | ||
542 | ret = ath10k_pci_wake_wait(ar); | |
543 | if (ret == 0) | |
544 | ar_pci->ps_awake = true; | |
545 | } | |
546 | ||
547 | if (ret == 0) { | |
548 | ar_pci->ps_wake_refcount++; | |
549 | WARN_ON(ar_pci->ps_wake_refcount == 0); | |
550 | } | |
551 | ||
552 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
553 | ||
554 | return ret; | |
555 | } | |
556 | ||
557 | static void ath10k_pci_sleep(struct ath10k *ar) | |
558 | { | |
559 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
560 | unsigned long flags; | |
561 | ||
1aaf8efb AK |
562 | if (ar_pci->pci_ps == 0) |
563 | return; | |
564 | ||
77258d40 MK |
565 | spin_lock_irqsave(&ar_pci->ps_lock, flags); |
566 | ||
567 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", | |
568 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
569 | ||
570 | if (WARN_ON(ar_pci->ps_wake_refcount == 0)) | |
571 | goto skip; | |
572 | ||
573 | ar_pci->ps_wake_refcount--; | |
574 | ||
575 | mod_timer(&ar_pci->ps_timer, jiffies + | |
576 | msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); | |
577 | ||
578 | skip: | |
579 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
580 | } | |
581 | ||
582 | static void ath10k_pci_ps_timer(unsigned long ptr) | |
583 | { | |
584 | struct ath10k *ar = (void *)ptr; | |
585 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
586 | unsigned long flags; | |
587 | ||
588 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
589 | ||
590 | ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", | |
591 | ar_pci->ps_wake_refcount, ar_pci->ps_awake); | |
592 | ||
593 | if (ar_pci->ps_wake_refcount > 0) | |
594 | goto skip; | |
595 | ||
596 | __ath10k_pci_sleep(ar); | |
597 | ||
598 | skip: | |
599 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
600 | } | |
601 | ||
602 | static void ath10k_pci_sleep_sync(struct ath10k *ar) | |
603 | { | |
604 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
605 | unsigned long flags; | |
606 | ||
1aaf8efb AK |
607 | if (ar_pci->pci_ps == 0) { |
608 | ath10k_pci_force_sleep(ar); | |
609 | return; | |
610 | } | |
611 | ||
77258d40 MK |
612 | del_timer_sync(&ar_pci->ps_timer); |
613 | ||
614 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
615 | WARN_ON(ar_pci->ps_wake_refcount > 0); | |
616 | __ath10k_pci_sleep(ar); | |
617 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
618 | } | |
619 | ||
620 | void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) | |
621 | { | |
622 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
623 | int ret; | |
624 | ||
aeae5b4c MK |
625 | if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { |
626 | ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", | |
627 | offset, offset + sizeof(value), ar_pci->mem_len); | |
628 | return; | |
629 | } | |
630 | ||
77258d40 MK |
631 | ret = ath10k_pci_wake(ar); |
632 | if (ret) { | |
633 | ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", | |
634 | value, offset, ret); | |
635 | return; | |
636 | } | |
637 | ||
638 | iowrite32(value, ar_pci->mem + offset); | |
639 | ath10k_pci_sleep(ar); | |
640 | } | |
641 | ||
642 | u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) | |
643 | { | |
644 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
645 | u32 val; | |
646 | int ret; | |
647 | ||
aeae5b4c MK |
648 | if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { |
649 | ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", | |
650 | offset, offset + sizeof(val), ar_pci->mem_len); | |
651 | return 0; | |
652 | } | |
653 | ||
77258d40 MK |
654 | ret = ath10k_pci_wake(ar); |
655 | if (ret) { | |
656 | ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", | |
657 | offset, ret); | |
658 | return 0xffffffff; | |
659 | } | |
660 | ||
661 | val = ioread32(ar_pci->mem + offset); | |
662 | ath10k_pci_sleep(ar); | |
663 | ||
664 | return val; | |
665 | } | |
666 | ||
667 | u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) | |
668 | { | |
669 | return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); | |
670 | } | |
671 | ||
672 | void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) | |
673 | { | |
674 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); | |
675 | } | |
676 | ||
677 | u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) | |
678 | { | |
679 | return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); | |
680 | } | |
681 | ||
682 | void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) | |
683 | { | |
684 | ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); | |
685 | } | |
686 | ||
e539887b MK |
687 | static bool ath10k_pci_irq_pending(struct ath10k *ar) |
688 | { | |
689 | u32 cause; | |
690 | ||
691 | /* Check if the shared legacy irq is for us */ | |
692 | cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + | |
693 | PCIE_INTR_CAUSE_ADDRESS); | |
694 | if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) | |
695 | return true; | |
696 | ||
697 | return false; | |
698 | } | |
699 | ||
2685218b MK |
700 | static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) |
701 | { | |
702 | /* IMPORTANT: INTR_CLR register has to be set after | |
703 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | |
704 | * really cleared. */ | |
705 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, | |
706 | 0); | |
707 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, | |
708 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
709 | ||
710 | /* IMPORTANT: this extra read transaction is required to | |
711 | * flush the posted write buffer. */ | |
cfbc06a9 KV |
712 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
713 | PCIE_INTR_ENABLE_ADDRESS); | |
2685218b MK |
714 | } |
715 | ||
716 | static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) | |
717 | { | |
718 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
719 | PCIE_INTR_ENABLE_ADDRESS, | |
720 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
721 | ||
722 | /* IMPORTANT: this extra read transaction is required to | |
723 | * flush the posted write buffer. */ | |
cfbc06a9 KV |
724 | (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
725 | PCIE_INTR_ENABLE_ADDRESS); | |
2685218b MK |
726 | } |
727 | ||
403d627b | 728 | static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) |
ab977bd0 | 729 | { |
ab977bd0 MK |
730 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
731 | ||
403d627b MK |
732 | if (ar_pci->num_msi_intrs > 1) |
733 | return "msi-x"; | |
d8bb26b9 KV |
734 | |
735 | if (ar_pci->num_msi_intrs == 1) | |
403d627b | 736 | return "msi"; |
d8bb26b9 KV |
737 | |
738 | return "legacy"; | |
ab977bd0 MK |
739 | } |
740 | ||
728f95ee | 741 | static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) |
ab977bd0 | 742 | { |
728f95ee | 743 | struct ath10k *ar = pipe->hif_ce_state; |
ab977bd0 | 744 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
728f95ee MK |
745 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; |
746 | struct sk_buff *skb; | |
747 | dma_addr_t paddr; | |
ab977bd0 MK |
748 | int ret; |
749 | ||
728f95ee MK |
750 | skb = dev_alloc_skb(pipe->buf_sz); |
751 | if (!skb) | |
752 | return -ENOMEM; | |
753 | ||
754 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); | |
755 | ||
756 | paddr = dma_map_single(ar->dev, skb->data, | |
757 | skb->len + skb_tailroom(skb), | |
758 | DMA_FROM_DEVICE); | |
759 | if (unlikely(dma_mapping_error(ar->dev, paddr))) { | |
7aa7a72a | 760 | ath10k_warn(ar, "failed to dma map pci rx buf\n"); |
728f95ee MK |
761 | dev_kfree_skb_any(skb); |
762 | return -EIO; | |
763 | } | |
764 | ||
8582bf3b | 765 | ATH10K_SKB_RXCB(skb)->paddr = paddr; |
728f95ee | 766 | |
ab4e3db0 | 767 | spin_lock_bh(&ar_pci->ce_lock); |
728f95ee | 768 | ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); |
ab4e3db0 | 769 | spin_unlock_bh(&ar_pci->ce_lock); |
ab977bd0 | 770 | if (ret) { |
728f95ee MK |
771 | dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), |
772 | DMA_FROM_DEVICE); | |
773 | dev_kfree_skb_any(skb); | |
ab977bd0 MK |
774 | return ret; |
775 | } | |
776 | ||
777 | return 0; | |
778 | } | |
779 | ||
ab4e3db0 | 780 | static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) |
ab977bd0 | 781 | { |
728f95ee MK |
782 | struct ath10k *ar = pipe->hif_ce_state; |
783 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
784 | struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; | |
785 | int ret, num; | |
786 | ||
728f95ee MK |
787 | if (pipe->buf_sz == 0) |
788 | return; | |
789 | ||
790 | if (!ce_pipe->dest_ring) | |
791 | return; | |
792 | ||
ab4e3db0 | 793 | spin_lock_bh(&ar_pci->ce_lock); |
728f95ee | 794 | num = __ath10k_ce_rx_num_free_bufs(ce_pipe); |
ab4e3db0 | 795 | spin_unlock_bh(&ar_pci->ce_lock); |
728f95ee MK |
796 | while (num--) { |
797 | ret = __ath10k_pci_rx_post_buf(pipe); | |
798 | if (ret) { | |
ab4e3db0 RM |
799 | if (ret == -ENOSPC) |
800 | break; | |
7aa7a72a | 801 | ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); |
728f95ee MK |
802 | mod_timer(&ar_pci->rx_post_retry, jiffies + |
803 | ATH10K_PCI_RX_POST_RETRY_MS); | |
804 | break; | |
805 | } | |
806 | } | |
807 | } | |
808 | ||
728f95ee MK |
809 | static void ath10k_pci_rx_post(struct ath10k *ar) |
810 | { | |
811 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
812 | int i; | |
813 | ||
728f95ee | 814 | for (i = 0; i < CE_COUNT; i++) |
ab4e3db0 | 815 | ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); |
728f95ee MK |
816 | } |
817 | ||
818 | static void ath10k_pci_rx_replenish_retry(unsigned long ptr) | |
819 | { | |
820 | struct ath10k *ar = (void *)ptr; | |
821 | ||
822 | ath10k_pci_rx_post(ar); | |
ab977bd0 MK |
823 | } |
824 | ||
418ca599 VT |
825 | static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) |
826 | { | |
827 | u32 val = 0; | |
828 | ||
829 | switch (ar->hw_rev) { | |
830 | case ATH10K_HW_QCA988X: | |
831 | case ATH10K_HW_QCA6174: | |
a226b519 | 832 | case ATH10K_HW_QCA9377: |
418ca599 VT |
833 | val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
834 | CORE_CTRL_ADDRESS) & | |
3c7e256a | 835 | 0x7ff) << 21; |
418ca599 VT |
836 | break; |
837 | case ATH10K_HW_QCA99X0: | |
838 | val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); | |
839 | break; | |
840 | } | |
841 | ||
842 | val |= 0x100000 | (addr & 0xfffff); | |
843 | return val; | |
844 | } | |
845 | ||
5e3dd157 KV |
846 | /* |
847 | * Diagnostic read/write access is provided for startup/config/debug usage. | |
848 | * Caller must guarantee proper alignment, when applicable, and single user | |
849 | * at any moment. | |
850 | */ | |
851 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, | |
852 | int nbytes) | |
853 | { | |
854 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
855 | int ret = 0; | |
856 | u32 buf; | |
857 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | |
858 | unsigned int id; | |
859 | unsigned int flags; | |
2aa39115 | 860 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
861 | /* Host buffer address in CE space */ |
862 | u32 ce_data; | |
863 | dma_addr_t ce_data_base = 0; | |
864 | void *data_buf = NULL; | |
865 | int i; | |
866 | ||
eef25405 KV |
867 | spin_lock_bh(&ar_pci->ce_lock); |
868 | ||
5e3dd157 KV |
869 | ce_diag = ar_pci->ce_diag; |
870 | ||
871 | /* | |
872 | * Allocate a temporary bounce buffer to hold caller's data | |
873 | * to be DMA'ed from Target. This guarantees | |
874 | * 1) 4-byte alignment | |
875 | * 2) Buffer in DMA-able space | |
876 | */ | |
877 | orig_nbytes = nbytes; | |
68c03249 MK |
878 | data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, |
879 | orig_nbytes, | |
880 | &ce_data_base, | |
881 | GFP_ATOMIC); | |
5e3dd157 KV |
882 | |
883 | if (!data_buf) { | |
884 | ret = -ENOMEM; | |
885 | goto done; | |
886 | } | |
887 | memset(data_buf, 0, orig_nbytes); | |
888 | ||
889 | remaining_bytes = orig_nbytes; | |
890 | ce_data = ce_data_base; | |
891 | while (remaining_bytes) { | |
892 | nbytes = min_t(unsigned int, remaining_bytes, | |
893 | DIAG_TRANSFER_LIMIT); | |
894 | ||
eef25405 | 895 | ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); |
5e3dd157 KV |
896 | if (ret != 0) |
897 | goto done; | |
898 | ||
899 | /* Request CE to send from Target(!) address to Host buffer */ | |
900 | /* | |
901 | * The address supplied by the caller is in the | |
902 | * Target CPU virtual address space. | |
903 | * | |
904 | * In order to use this address with the diagnostic CE, | |
905 | * convert it from Target CPU virtual address space | |
906 | * to CE address space | |
907 | */ | |
418ca599 | 908 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); |
5e3dd157 | 909 | |
eef25405 KV |
910 | ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, |
911 | 0); | |
5e3dd157 KV |
912 | if (ret) |
913 | goto done; | |
914 | ||
915 | i = 0; | |
765952e4 RM |
916 | while (ath10k_ce_completed_send_next_nolock(ce_diag, |
917 | NULL) != 0) { | |
5e3dd157 KV |
918 | mdelay(1); |
919 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
920 | ret = -EBUSY; | |
921 | goto done; | |
922 | } | |
923 | } | |
924 | ||
5e3dd157 | 925 | i = 0; |
eef25405 KV |
926 | while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, |
927 | &completed_nbytes, | |
928 | &id, &flags) != 0) { | |
5e3dd157 KV |
929 | mdelay(1); |
930 | ||
931 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
932 | ret = -EBUSY; | |
933 | goto done; | |
934 | } | |
935 | } | |
936 | ||
937 | if (nbytes != completed_nbytes) { | |
938 | ret = -EIO; | |
939 | goto done; | |
940 | } | |
941 | ||
942 | if (buf != ce_data) { | |
943 | ret = -EIO; | |
944 | goto done; | |
945 | } | |
946 | ||
947 | remaining_bytes -= nbytes; | |
948 | address += nbytes; | |
949 | ce_data += nbytes; | |
950 | } | |
951 | ||
952 | done: | |
0fdc14e4 MK |
953 | if (ret == 0) |
954 | memcpy(data, data_buf, orig_nbytes); | |
955 | else | |
7aa7a72a | 956 | ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", |
50f87a67 | 957 | address, ret); |
5e3dd157 KV |
958 | |
959 | if (data_buf) | |
68c03249 MK |
960 | dma_free_coherent(ar->dev, orig_nbytes, data_buf, |
961 | ce_data_base); | |
5e3dd157 | 962 | |
eef25405 KV |
963 | spin_unlock_bh(&ar_pci->ce_lock); |
964 | ||
5e3dd157 KV |
965 | return ret; |
966 | } | |
967 | ||
3d29a3e0 KV |
968 | static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) |
969 | { | |
0fdc14e4 MK |
970 | __le32 val = 0; |
971 | int ret; | |
972 | ||
973 | ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); | |
974 | *value = __le32_to_cpu(val); | |
975 | ||
976 | return ret; | |
3d29a3e0 KV |
977 | } |
978 | ||
979 | static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, | |
980 | u32 src, u32 len) | |
981 | { | |
982 | u32 host_addr, addr; | |
983 | int ret; | |
984 | ||
985 | host_addr = host_interest_item_address(src); | |
986 | ||
987 | ret = ath10k_pci_diag_read32(ar, host_addr, &addr); | |
988 | if (ret != 0) { | |
7aa7a72a | 989 | ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", |
3d29a3e0 KV |
990 | src, ret); |
991 | return ret; | |
992 | } | |
993 | ||
994 | ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); | |
995 | if (ret != 0) { | |
7aa7a72a | 996 | ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", |
3d29a3e0 KV |
997 | addr, len, ret); |
998 | return ret; | |
999 | } | |
1000 | ||
1001 | return 0; | |
1002 | } | |
1003 | ||
1004 | #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ | |
8cc7f26c | 1005 | __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) |
3d29a3e0 | 1006 | |
5e3dd157 KV |
1007 | static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, |
1008 | const void *data, int nbytes) | |
1009 | { | |
1010 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1011 | int ret = 0; | |
1012 | u32 buf; | |
1013 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | |
1014 | unsigned int id; | |
1015 | unsigned int flags; | |
2aa39115 | 1016 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
1017 | void *data_buf = NULL; |
1018 | u32 ce_data; /* Host buffer address in CE space */ | |
1019 | dma_addr_t ce_data_base = 0; | |
1020 | int i; | |
1021 | ||
eef25405 KV |
1022 | spin_lock_bh(&ar_pci->ce_lock); |
1023 | ||
5e3dd157 KV |
1024 | ce_diag = ar_pci->ce_diag; |
1025 | ||
1026 | /* | |
1027 | * Allocate a temporary bounce buffer to hold caller's data | |
1028 | * to be DMA'ed to Target. This guarantees | |
1029 | * 1) 4-byte alignment | |
1030 | * 2) Buffer in DMA-able space | |
1031 | */ | |
1032 | orig_nbytes = nbytes; | |
68c03249 MK |
1033 | data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, |
1034 | orig_nbytes, | |
1035 | &ce_data_base, | |
1036 | GFP_ATOMIC); | |
5e3dd157 KV |
1037 | if (!data_buf) { |
1038 | ret = -ENOMEM; | |
1039 | goto done; | |
1040 | } | |
1041 | ||
1042 | /* Copy caller's data to allocated DMA buf */ | |
0fdc14e4 | 1043 | memcpy(data_buf, data, orig_nbytes); |
5e3dd157 KV |
1044 | |
1045 | /* | |
1046 | * The address supplied by the caller is in the | |
1047 | * Target CPU virtual address space. | |
1048 | * | |
1049 | * In order to use this address with the diagnostic CE, | |
1050 | * convert it from | |
1051 | * Target CPU virtual address space | |
1052 | * to | |
1053 | * CE address space | |
1054 | */ | |
418ca599 | 1055 | address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); |
5e3dd157 KV |
1056 | |
1057 | remaining_bytes = orig_nbytes; | |
1058 | ce_data = ce_data_base; | |
1059 | while (remaining_bytes) { | |
1060 | /* FIXME: check cast */ | |
1061 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); | |
1062 | ||
1063 | /* Set up to receive directly into Target(!) address */ | |
eef25405 | 1064 | ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); |
5e3dd157 KV |
1065 | if (ret != 0) |
1066 | goto done; | |
1067 | ||
1068 | /* | |
1069 | * Request CE to send caller-supplied data that | |
1070 | * was copied to bounce buffer to Target(!) address. | |
1071 | */ | |
eef25405 KV |
1072 | ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, |
1073 | nbytes, 0, 0); | |
5e3dd157 KV |
1074 | if (ret != 0) |
1075 | goto done; | |
1076 | ||
1077 | i = 0; | |
765952e4 RM |
1078 | while (ath10k_ce_completed_send_next_nolock(ce_diag, |
1079 | NULL) != 0) { | |
5e3dd157 KV |
1080 | mdelay(1); |
1081 | ||
1082 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
1083 | ret = -EBUSY; | |
1084 | goto done; | |
1085 | } | |
1086 | } | |
1087 | ||
5e3dd157 | 1088 | i = 0; |
eef25405 KV |
1089 | while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, |
1090 | &completed_nbytes, | |
1091 | &id, &flags) != 0) { | |
5e3dd157 KV |
1092 | mdelay(1); |
1093 | ||
1094 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
1095 | ret = -EBUSY; | |
1096 | goto done; | |
1097 | } | |
1098 | } | |
1099 | ||
1100 | if (nbytes != completed_nbytes) { | |
1101 | ret = -EIO; | |
1102 | goto done; | |
1103 | } | |
1104 | ||
1105 | if (buf != address) { | |
1106 | ret = -EIO; | |
1107 | goto done; | |
1108 | } | |
1109 | ||
1110 | remaining_bytes -= nbytes; | |
1111 | address += nbytes; | |
1112 | ce_data += nbytes; | |
1113 | } | |
1114 | ||
1115 | done: | |
1116 | if (data_buf) { | |
68c03249 MK |
1117 | dma_free_coherent(ar->dev, orig_nbytes, data_buf, |
1118 | ce_data_base); | |
5e3dd157 KV |
1119 | } |
1120 | ||
1121 | if (ret != 0) | |
7aa7a72a | 1122 | ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", |
50f87a67 | 1123 | address, ret); |
5e3dd157 | 1124 | |
eef25405 KV |
1125 | spin_unlock_bh(&ar_pci->ce_lock); |
1126 | ||
5e3dd157 KV |
1127 | return ret; |
1128 | } | |
1129 | ||
0fdc14e4 MK |
1130 | static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) |
1131 | { | |
1132 | __le32 val = __cpu_to_le32(value); | |
1133 | ||
1134 | return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); | |
1135 | } | |
1136 | ||
5e3dd157 | 1137 | /* Called by lower (CE) layer when a send to Target completes. */ |
0e5b2950 | 1138 | static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) |
5e3dd157 KV |
1139 | { |
1140 | struct ath10k *ar = ce_state->ar; | |
1cb86d47 MK |
1141 | struct sk_buff_head list; |
1142 | struct sk_buff *skb; | |
5e3dd157 | 1143 | |
1cb86d47 | 1144 | __skb_queue_head_init(&list); |
765952e4 | 1145 | while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { |
a16942e6 | 1146 | /* no need to call tx completion for NULL pointers */ |
1cb86d47 | 1147 | if (skb == NULL) |
726346fc MK |
1148 | continue; |
1149 | ||
1cb86d47 | 1150 | __skb_queue_tail(&list, skb); |
5440ce25 | 1151 | } |
1cb86d47 MK |
1152 | |
1153 | while ((skb = __skb_dequeue(&list))) | |
0e5b2950 | 1154 | ath10k_htc_tx_completion_handler(ar, skb); |
5e3dd157 KV |
1155 | } |
1156 | ||
a70587b3 RM |
1157 | static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, |
1158 | void (*callback)(struct ath10k *ar, | |
1159 | struct sk_buff *skb)) | |
5e3dd157 KV |
1160 | { |
1161 | struct ath10k *ar = ce_state->ar; | |
1162 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 1163 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
5e3dd157 | 1164 | struct sk_buff *skb; |
1cb86d47 | 1165 | struct sk_buff_head list; |
5440ce25 MK |
1166 | void *transfer_context; |
1167 | u32 ce_data; | |
2f5280da | 1168 | unsigned int nbytes, max_nbytes; |
5440ce25 MK |
1169 | unsigned int transfer_id; |
1170 | unsigned int flags; | |
5e3dd157 | 1171 | |
1cb86d47 | 1172 | __skb_queue_head_init(&list); |
5440ce25 MK |
1173 | while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, |
1174 | &ce_data, &nbytes, &transfer_id, | |
1175 | &flags) == 0) { | |
5e3dd157 | 1176 | skb = transfer_context; |
2f5280da | 1177 | max_nbytes = skb->len + skb_tailroom(skb); |
8582bf3b | 1178 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
2f5280da MK |
1179 | max_nbytes, DMA_FROM_DEVICE); |
1180 | ||
1181 | if (unlikely(max_nbytes < nbytes)) { | |
7aa7a72a | 1182 | ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", |
2f5280da MK |
1183 | nbytes, max_nbytes); |
1184 | dev_kfree_skb_any(skb); | |
1185 | continue; | |
1186 | } | |
5e3dd157 | 1187 | |
2f5280da | 1188 | skb_put(skb, nbytes); |
1cb86d47 MK |
1189 | __skb_queue_tail(&list, skb); |
1190 | } | |
a360e54c | 1191 | |
1cb86d47 | 1192 | while ((skb = __skb_dequeue(&list))) { |
a360e54c MK |
1193 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", |
1194 | ce_state->id, skb->len); | |
1195 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", | |
1196 | skb->data, skb->len); | |
1197 | ||
a70587b3 | 1198 | callback(ar, skb); |
2f5280da | 1199 | } |
c29a380e | 1200 | |
728f95ee | 1201 | ath10k_pci_rx_post_pipe(pipe_info); |
5e3dd157 KV |
1202 | } |
1203 | ||
a70587b3 RM |
1204 | /* Called by lower (CE) layer when data is received from the Target. */ |
1205 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1206 | { | |
1207 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | |
6419fdbb RM |
1208 | } |
1209 | ||
1210 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1211 | { | |
1212 | /* CE4 polling needs to be done whenever CE pipe which transports | |
1213 | * HTT Rx (target->host) is processed. | |
1214 | */ | |
1215 | ath10k_ce_per_engine_service(ce_state->ar, 4); | |
1216 | ||
1217 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | |
a70587b3 RM |
1218 | } |
1219 | ||
afb0bf7f VN |
1220 | /* Called by lower (CE) layer when data is received from the Target. |
1221 | * Only 10.4 firmware uses separate CE to transfer pktlog data. | |
1222 | */ | |
1223 | static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1224 | { | |
1225 | ath10k_pci_process_rx_cb(ce_state, | |
1226 | ath10k_htt_rx_pktlog_completion_handler); | |
1227 | } | |
1228 | ||
a70587b3 RM |
1229 | /* Called by lower (CE) layer when a send to HTT Target completes. */ |
1230 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) | |
1231 | { | |
1232 | struct ath10k *ar = ce_state->ar; | |
1233 | struct sk_buff *skb; | |
a70587b3 | 1234 | |
765952e4 | 1235 | while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { |
a70587b3 RM |
1236 | /* no need to call tx completion for NULL pointers */ |
1237 | if (!skb) | |
1238 | continue; | |
1239 | ||
1240 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | |
1241 | skb->len, DMA_TO_DEVICE); | |
1242 | ath10k_htt_hif_tx_complete(ar, skb); | |
1243 | } | |
1244 | } | |
1245 | ||
1246 | static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) | |
1247 | { | |
1248 | skb_pull(skb, sizeof(struct ath10k_htc_hdr)); | |
1249 | ath10k_htt_t2h_msg_handler(ar, skb); | |
1250 | } | |
1251 | ||
1252 | /* Called by lower (CE) layer when HTT data is received from the Target. */ | |
1253 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) | |
1254 | { | |
1255 | /* CE4 polling needs to be done whenever CE pipe which transports | |
1256 | * HTT Rx (target->host) is processed. | |
1257 | */ | |
1258 | ath10k_ce_per_engine_service(ce_state->ar, 4); | |
1259 | ||
1260 | ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); | |
1261 | } | |
1262 | ||
726346fc MK |
1263 | static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, |
1264 | struct ath10k_hif_sg_item *items, int n_items) | |
5e3dd157 | 1265 | { |
5e3dd157 | 1266 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
726346fc MK |
1267 | struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; |
1268 | struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; | |
1269 | struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; | |
7147a131 MK |
1270 | unsigned int nentries_mask; |
1271 | unsigned int sw_index; | |
1272 | unsigned int write_index; | |
08b8aa09 | 1273 | int err, i = 0; |
5e3dd157 | 1274 | |
726346fc | 1275 | spin_lock_bh(&ar_pci->ce_lock); |
5e3dd157 | 1276 | |
7147a131 MK |
1277 | nentries_mask = src_ring->nentries_mask; |
1278 | sw_index = src_ring->sw_index; | |
1279 | write_index = src_ring->write_index; | |
1280 | ||
726346fc MK |
1281 | if (unlikely(CE_RING_DELTA(nentries_mask, |
1282 | write_index, sw_index - 1) < n_items)) { | |
1283 | err = -ENOBUFS; | |
08b8aa09 | 1284 | goto err; |
726346fc | 1285 | } |
5e3dd157 | 1286 | |
726346fc | 1287 | for (i = 0; i < n_items - 1; i++) { |
7aa7a72a | 1288 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
726346fc MK |
1289 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
1290 | i, items[i].paddr, items[i].len, n_items); | |
7aa7a72a | 1291 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", |
726346fc | 1292 | items[i].vaddr, items[i].len); |
5e3dd157 | 1293 | |
726346fc MK |
1294 | err = ath10k_ce_send_nolock(ce_pipe, |
1295 | items[i].transfer_context, | |
1296 | items[i].paddr, | |
1297 | items[i].len, | |
1298 | items[i].transfer_id, | |
1299 | CE_SEND_FLAG_GATHER); | |
1300 | if (err) | |
08b8aa09 | 1301 | goto err; |
726346fc MK |
1302 | } |
1303 | ||
1304 | /* `i` is equal to `n_items -1` after for() */ | |
1305 | ||
7aa7a72a | 1306 | ath10k_dbg(ar, ATH10K_DBG_PCI, |
726346fc MK |
1307 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
1308 | i, items[i].paddr, items[i].len, n_items); | |
7aa7a72a | 1309 | ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", |
726346fc MK |
1310 | items[i].vaddr, items[i].len); |
1311 | ||
1312 | err = ath10k_ce_send_nolock(ce_pipe, | |
1313 | items[i].transfer_context, | |
1314 | items[i].paddr, | |
1315 | items[i].len, | |
1316 | items[i].transfer_id, | |
1317 | 0); | |
1318 | if (err) | |
08b8aa09 MK |
1319 | goto err; |
1320 | ||
1321 | spin_unlock_bh(&ar_pci->ce_lock); | |
1322 | return 0; | |
1323 | ||
1324 | err: | |
1325 | for (; i > 0; i--) | |
1326 | __ath10k_ce_send_revert(ce_pipe); | |
726346fc | 1327 | |
726346fc MK |
1328 | spin_unlock_bh(&ar_pci->ce_lock); |
1329 | return err; | |
5e3dd157 KV |
1330 | } |
1331 | ||
eef25405 KV |
1332 | static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, |
1333 | size_t buf_len) | |
1334 | { | |
1335 | return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); | |
1336 | } | |
1337 | ||
5e3dd157 KV |
1338 | static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) |
1339 | { | |
1340 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
50f87a67 | 1341 | |
7aa7a72a | 1342 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); |
50f87a67 | 1343 | |
3efcb3b4 | 1344 | return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); |
5e3dd157 KV |
1345 | } |
1346 | ||
384914b2 BG |
1347 | static void ath10k_pci_dump_registers(struct ath10k *ar, |
1348 | struct ath10k_fw_crash_data *crash_data) | |
5e3dd157 | 1349 | { |
0fdc14e4 MK |
1350 | __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; |
1351 | int i, ret; | |
5e3dd157 | 1352 | |
384914b2 | 1353 | lockdep_assert_held(&ar->data_lock); |
5e3dd157 | 1354 | |
3d29a3e0 KV |
1355 | ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], |
1356 | hi_failure_state, | |
0fdc14e4 | 1357 | REG_DUMP_COUNT_QCA988X * sizeof(__le32)); |
1d2b48d6 | 1358 | if (ret) { |
7aa7a72a | 1359 | ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); |
5e3dd157 KV |
1360 | return; |
1361 | } | |
1362 | ||
1363 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); | |
1364 | ||
7aa7a72a | 1365 | ath10k_err(ar, "firmware register dump:\n"); |
5e3dd157 | 1366 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) |
7aa7a72a | 1367 | ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", |
5e3dd157 | 1368 | i, |
0fdc14e4 MK |
1369 | __le32_to_cpu(reg_dump_values[i]), |
1370 | __le32_to_cpu(reg_dump_values[i + 1]), | |
1371 | __le32_to_cpu(reg_dump_values[i + 2]), | |
1372 | __le32_to_cpu(reg_dump_values[i + 3])); | |
affd3217 | 1373 | |
1bbb119d MK |
1374 | if (!crash_data) |
1375 | return; | |
1376 | ||
384914b2 | 1377 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) |
0fdc14e4 | 1378 | crash_data->registers[i] = reg_dump_values[i]; |
384914b2 BG |
1379 | } |
1380 | ||
0e9848c0 | 1381 | static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) |
384914b2 BG |
1382 | { |
1383 | struct ath10k_fw_crash_data *crash_data; | |
1384 | char uuid[50]; | |
1385 | ||
1386 | spin_lock_bh(&ar->data_lock); | |
1387 | ||
f51dbe73 BG |
1388 | ar->stats.fw_crash_counter++; |
1389 | ||
384914b2 BG |
1390 | crash_data = ath10k_debug_get_new_fw_crash_data(ar); |
1391 | ||
1392 | if (crash_data) | |
1393 | scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); | |
1394 | else | |
1395 | scnprintf(uuid, sizeof(uuid), "n/a"); | |
1396 | ||
7aa7a72a | 1397 | ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); |
8a0c797e | 1398 | ath10k_print_driver_info(ar); |
384914b2 BG |
1399 | ath10k_pci_dump_registers(ar, crash_data); |
1400 | ||
384914b2 | 1401 | spin_unlock_bh(&ar->data_lock); |
affd3217 | 1402 | |
5e90de86 | 1403 | queue_work(ar->workqueue, &ar->restart_work); |
5e3dd157 KV |
1404 | } |
1405 | ||
1406 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, | |
1407 | int force) | |
1408 | { | |
7aa7a72a | 1409 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); |
50f87a67 | 1410 | |
5e3dd157 KV |
1411 | if (!force) { |
1412 | int resources; | |
1413 | /* | |
1414 | * Decide whether to actually poll for completions, or just | |
1415 | * wait for a later chance. | |
1416 | * If there seem to be plenty of resources left, then just wait | |
1417 | * since checking involves reading a CE register, which is a | |
1418 | * relatively expensive operation. | |
1419 | */ | |
1420 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); | |
1421 | ||
1422 | /* | |
1423 | * If at least 50% of the total resources are still available, | |
1424 | * don't bother checking again yet. | |
1425 | */ | |
1426 | if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) | |
1427 | return; | |
1428 | } | |
1429 | ath10k_ce_per_engine_service(ar, pipe); | |
1430 | } | |
1431 | ||
96a9d0dc | 1432 | static void ath10k_pci_kill_tasklet(struct ath10k *ar) |
5e3dd157 KV |
1433 | { |
1434 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 1435 | int i; |
5e3dd157 | 1436 | |
5e3dd157 | 1437 | tasklet_kill(&ar_pci->intr_tq); |
103d4f5e | 1438 | tasklet_kill(&ar_pci->msi_fw_err); |
5e3dd157 KV |
1439 | |
1440 | for (i = 0; i < CE_COUNT; i++) | |
1441 | tasklet_kill(&ar_pci->pipe_info[i].intr); | |
728f95ee MK |
1442 | |
1443 | del_timer_sync(&ar_pci->rx_post_retry); | |
96a9d0dc MK |
1444 | } |
1445 | ||
400143e4 RM |
1446 | static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, |
1447 | u8 *ul_pipe, u8 *dl_pipe) | |
5e3dd157 | 1448 | { |
7c6aa25d MK |
1449 | const struct service_to_pipe *entry; |
1450 | bool ul_set = false, dl_set = false; | |
1451 | int i; | |
5e3dd157 | 1452 | |
7aa7a72a | 1453 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); |
50f87a67 | 1454 | |
7c6aa25d MK |
1455 | for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { |
1456 | entry = &target_service_to_ce_map_wlan[i]; | |
5e3dd157 | 1457 | |
0fdc14e4 | 1458 | if (__le32_to_cpu(entry->service_id) != service_id) |
7c6aa25d | 1459 | continue; |
5e3dd157 | 1460 | |
0fdc14e4 | 1461 | switch (__le32_to_cpu(entry->pipedir)) { |
7c6aa25d MK |
1462 | case PIPEDIR_NONE: |
1463 | break; | |
1464 | case PIPEDIR_IN: | |
1465 | WARN_ON(dl_set); | |
0fdc14e4 | 1466 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
7c6aa25d MK |
1467 | dl_set = true; |
1468 | break; | |
1469 | case PIPEDIR_OUT: | |
1470 | WARN_ON(ul_set); | |
0fdc14e4 | 1471 | *ul_pipe = __le32_to_cpu(entry->pipenum); |
7c6aa25d MK |
1472 | ul_set = true; |
1473 | break; | |
1474 | case PIPEDIR_INOUT: | |
1475 | WARN_ON(dl_set); | |
1476 | WARN_ON(ul_set); | |
0fdc14e4 MK |
1477 | *dl_pipe = __le32_to_cpu(entry->pipenum); |
1478 | *ul_pipe = __le32_to_cpu(entry->pipenum); | |
7c6aa25d MK |
1479 | dl_set = true; |
1480 | ul_set = true; | |
1481 | break; | |
1482 | } | |
5e3dd157 | 1483 | } |
5e3dd157 | 1484 | |
7c6aa25d MK |
1485 | if (WARN_ON(!ul_set || !dl_set)) |
1486 | return -ENOENT; | |
5e3dd157 | 1487 | |
7c6aa25d | 1488 | return 0; |
5e3dd157 KV |
1489 | } |
1490 | ||
1491 | static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, | |
5b07e07f | 1492 | u8 *ul_pipe, u8 *dl_pipe) |
5e3dd157 | 1493 | { |
7aa7a72a | 1494 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); |
50f87a67 | 1495 | |
5e3dd157 KV |
1496 | (void)ath10k_pci_hif_map_service_to_pipe(ar, |
1497 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | |
400143e4 | 1498 | ul_pipe, dl_pipe); |
5e3dd157 KV |
1499 | } |
1500 | ||
7c0f0e3c | 1501 | static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) |
5e3dd157 | 1502 | { |
7c0f0e3c MK |
1503 | u32 val; |
1504 | ||
6e4202c3 VT |
1505 | switch (ar->hw_rev) { |
1506 | case ATH10K_HW_QCA988X: | |
1507 | case ATH10K_HW_QCA6174: | |
a226b519 | 1508 | case ATH10K_HW_QCA9377: |
6e4202c3 VT |
1509 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1510 | CORE_CTRL_ADDRESS); | |
1511 | val &= ~CORE_CTRL_PCIE_REG_31_MASK; | |
1512 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
1513 | CORE_CTRL_ADDRESS, val); | |
1514 | break; | |
1515 | case ATH10K_HW_QCA99X0: | |
1516 | /* TODO: Find appropriate register configuration for QCA99X0 | |
1517 | * to mask irq/MSI. | |
1518 | */ | |
1519 | break; | |
1520 | } | |
7c0f0e3c MK |
1521 | } |
1522 | ||
1523 | static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) | |
1524 | { | |
1525 | u32 val; | |
1526 | ||
6e4202c3 VT |
1527 | switch (ar->hw_rev) { |
1528 | case ATH10K_HW_QCA988X: | |
1529 | case ATH10K_HW_QCA6174: | |
a226b519 | 1530 | case ATH10K_HW_QCA9377: |
6e4202c3 VT |
1531 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
1532 | CORE_CTRL_ADDRESS); | |
1533 | val |= CORE_CTRL_PCIE_REG_31_MASK; | |
1534 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | |
1535 | CORE_CTRL_ADDRESS, val); | |
1536 | break; | |
1537 | case ATH10K_HW_QCA99X0: | |
1538 | /* TODO: Find appropriate register configuration for QCA99X0 | |
1539 | * to unmask irq/MSI. | |
1540 | */ | |
1541 | break; | |
1542 | } | |
7c0f0e3c | 1543 | } |
5e3dd157 | 1544 | |
7c0f0e3c MK |
1545 | static void ath10k_pci_irq_disable(struct ath10k *ar) |
1546 | { | |
ec5ba4d3 | 1547 | ath10k_ce_disable_interrupts(ar); |
e75db4e3 | 1548 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
7c0f0e3c MK |
1549 | ath10k_pci_irq_msi_fw_mask(ar); |
1550 | } | |
1551 | ||
1552 | static void ath10k_pci_irq_sync(struct ath10k *ar) | |
1553 | { | |
1554 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1555 | int i; | |
5e3dd157 | 1556 | |
ec5ba4d3 MK |
1557 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) |
1558 | synchronize_irq(ar_pci->pdev->irq + i); | |
5e3dd157 KV |
1559 | } |
1560 | ||
ec5ba4d3 | 1561 | static void ath10k_pci_irq_enable(struct ath10k *ar) |
5e3dd157 | 1562 | { |
ec5ba4d3 | 1563 | ath10k_ce_enable_interrupts(ar); |
e75db4e3 | 1564 | ath10k_pci_enable_legacy_irq(ar); |
7c0f0e3c | 1565 | ath10k_pci_irq_msi_fw_unmask(ar); |
5e3dd157 KV |
1566 | } |
1567 | ||
1568 | static int ath10k_pci_hif_start(struct ath10k *ar) | |
1569 | { | |
76d870ed | 1570 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
9a14969f | 1571 | |
7aa7a72a | 1572 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); |
5e3dd157 | 1573 | |
ec5ba4d3 | 1574 | ath10k_pci_irq_enable(ar); |
728f95ee | 1575 | ath10k_pci_rx_post(ar); |
50f87a67 | 1576 | |
76d870ed JD |
1577 | pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, |
1578 | ar_pci->link_ctl); | |
1579 | ||
5e3dd157 KV |
1580 | return 0; |
1581 | } | |
1582 | ||
099ac7ce | 1583 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
5e3dd157 KV |
1584 | { |
1585 | struct ath10k *ar; | |
099ac7ce MK |
1586 | struct ath10k_ce_pipe *ce_pipe; |
1587 | struct ath10k_ce_ring *ce_ring; | |
1588 | struct sk_buff *skb; | |
1589 | int i; | |
5e3dd157 | 1590 | |
099ac7ce MK |
1591 | ar = pci_pipe->hif_ce_state; |
1592 | ce_pipe = pci_pipe->ce_hdl; | |
1593 | ce_ring = ce_pipe->dest_ring; | |
5e3dd157 | 1594 | |
099ac7ce | 1595 | if (!ce_ring) |
5e3dd157 KV |
1596 | return; |
1597 | ||
099ac7ce MK |
1598 | if (!pci_pipe->buf_sz) |
1599 | return; | |
5e3dd157 | 1600 | |
099ac7ce MK |
1601 | for (i = 0; i < ce_ring->nentries; i++) { |
1602 | skb = ce_ring->per_transfer_context[i]; | |
1603 | if (!skb) | |
1604 | continue; | |
1605 | ||
1606 | ce_ring->per_transfer_context[i] = NULL; | |
1607 | ||
8582bf3b | 1608 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
099ac7ce | 1609 | skb->len + skb_tailroom(skb), |
5e3dd157 | 1610 | DMA_FROM_DEVICE); |
099ac7ce | 1611 | dev_kfree_skb_any(skb); |
5e3dd157 KV |
1612 | } |
1613 | } | |
1614 | ||
099ac7ce | 1615 | static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) |
5e3dd157 KV |
1616 | { |
1617 | struct ath10k *ar; | |
1618 | struct ath10k_pci *ar_pci; | |
099ac7ce MK |
1619 | struct ath10k_ce_pipe *ce_pipe; |
1620 | struct ath10k_ce_ring *ce_ring; | |
099ac7ce | 1621 | struct sk_buff *skb; |
099ac7ce | 1622 | int i; |
5e3dd157 | 1623 | |
099ac7ce MK |
1624 | ar = pci_pipe->hif_ce_state; |
1625 | ar_pci = ath10k_pci_priv(ar); | |
1626 | ce_pipe = pci_pipe->ce_hdl; | |
1627 | ce_ring = ce_pipe->src_ring; | |
5e3dd157 | 1628 | |
099ac7ce | 1629 | if (!ce_ring) |
5e3dd157 KV |
1630 | return; |
1631 | ||
099ac7ce MK |
1632 | if (!pci_pipe->buf_sz) |
1633 | return; | |
5e3dd157 | 1634 | |
099ac7ce MK |
1635 | for (i = 0; i < ce_ring->nentries; i++) { |
1636 | skb = ce_ring->per_transfer_context[i]; | |
1637 | if (!skb) | |
2415fc16 | 1638 | continue; |
2415fc16 | 1639 | |
099ac7ce | 1640 | ce_ring->per_transfer_context[i] = NULL; |
099ac7ce | 1641 | |
0e5b2950 | 1642 | ath10k_htc_tx_completion_handler(ar, skb); |
5e3dd157 KV |
1643 | } |
1644 | } | |
1645 | ||
1646 | /* | |
1647 | * Cleanup residual buffers for device shutdown: | |
1648 | * buffers that were enqueued for receive | |
1649 | * buffers that were to be sent | |
1650 | * Note: Buffers that had completed but which were | |
1651 | * not yet processed are on a completion queue. They | |
1652 | * are handled when the completion thread shuts down. | |
1653 | */ | |
1654 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) | |
1655 | { | |
1656 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1657 | int pipe_num; | |
1658 | ||
fad6ed78 | 1659 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
87263e5b | 1660 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1661 | |
1662 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1663 | ath10k_pci_rx_pipe_cleanup(pipe_info); | |
1664 | ath10k_pci_tx_pipe_cleanup(pipe_info); | |
1665 | } | |
1666 | } | |
1667 | ||
1668 | static void ath10k_pci_ce_deinit(struct ath10k *ar) | |
1669 | { | |
25d0dbcb | 1670 | int i; |
5e3dd157 | 1671 | |
25d0dbcb MK |
1672 | for (i = 0; i < CE_COUNT; i++) |
1673 | ath10k_ce_deinit_pipe(ar, i); | |
5e3dd157 KV |
1674 | } |
1675 | ||
728f95ee | 1676 | static void ath10k_pci_flush(struct ath10k *ar) |
5e3dd157 | 1677 | { |
5d1aa946 | 1678 | ath10k_pci_kill_tasklet(ar); |
728f95ee MK |
1679 | ath10k_pci_buffer_cleanup(ar); |
1680 | } | |
5e3dd157 | 1681 | |
5e3dd157 KV |
1682 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
1683 | { | |
77258d40 MK |
1684 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1685 | unsigned long flags; | |
1686 | ||
7aa7a72a | 1687 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); |
5e3dd157 | 1688 | |
10d23db4 MK |
1689 | /* Most likely the device has HTT Rx ring configured. The only way to |
1690 | * prevent the device from accessing (and possible corrupting) host | |
1691 | * memory is to reset the chip now. | |
e75db4e3 MK |
1692 | * |
1693 | * There's also no known way of masking MSI interrupts on the device. | |
1694 | * For ranged MSI the CE-related interrupts can be masked. However | |
1695 | * regardless how many MSI interrupts are assigned the first one | |
1696 | * is always used for firmware indications (crashes) and cannot be | |
1697 | * masked. To prevent the device from asserting the interrupt reset it | |
1698 | * before proceeding with cleanup. | |
10d23db4 | 1699 | */ |
6e4202c3 | 1700 | ath10k_pci_safe_chip_reset(ar); |
e75db4e3 MK |
1701 | |
1702 | ath10k_pci_irq_disable(ar); | |
7c0f0e3c | 1703 | ath10k_pci_irq_sync(ar); |
e75db4e3 | 1704 | ath10k_pci_flush(ar); |
77258d40 MK |
1705 | |
1706 | spin_lock_irqsave(&ar_pci->ps_lock, flags); | |
1707 | WARN_ON(ar_pci->ps_wake_refcount > 0); | |
1708 | spin_unlock_irqrestore(&ar_pci->ps_lock, flags); | |
5e3dd157 KV |
1709 | } |
1710 | ||
1711 | static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, | |
1712 | void *req, u32 req_len, | |
1713 | void *resp, u32 *resp_len) | |
1714 | { | |
1715 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2aa39115 MK |
1716 | struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; |
1717 | struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | |
1718 | struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; | |
1719 | struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; | |
5e3dd157 KV |
1720 | dma_addr_t req_paddr = 0; |
1721 | dma_addr_t resp_paddr = 0; | |
1722 | struct bmi_xfer xfer = {}; | |
1723 | void *treq, *tresp = NULL; | |
1724 | int ret = 0; | |
1725 | ||
85622cde MK |
1726 | might_sleep(); |
1727 | ||
5e3dd157 KV |
1728 | if (resp && !resp_len) |
1729 | return -EINVAL; | |
1730 | ||
1731 | if (resp && resp_len && *resp_len == 0) | |
1732 | return -EINVAL; | |
1733 | ||
1734 | treq = kmemdup(req, req_len, GFP_KERNEL); | |
1735 | if (!treq) | |
1736 | return -ENOMEM; | |
1737 | ||
1738 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); | |
1739 | ret = dma_mapping_error(ar->dev, req_paddr); | |
5e55e3cb MK |
1740 | if (ret) { |
1741 | ret = -EIO; | |
5e3dd157 | 1742 | goto err_dma; |
5e55e3cb | 1743 | } |
5e3dd157 KV |
1744 | |
1745 | if (resp && resp_len) { | |
1746 | tresp = kzalloc(*resp_len, GFP_KERNEL); | |
1747 | if (!tresp) { | |
1748 | ret = -ENOMEM; | |
1749 | goto err_req; | |
1750 | } | |
1751 | ||
1752 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, | |
1753 | DMA_FROM_DEVICE); | |
1754 | ret = dma_mapping_error(ar->dev, resp_paddr); | |
5e55e3cb MK |
1755 | if (ret) { |
1756 | ret = EIO; | |
5e3dd157 | 1757 | goto err_req; |
5e55e3cb | 1758 | } |
5e3dd157 KV |
1759 | |
1760 | xfer.wait_for_resp = true; | |
1761 | xfer.resp_len = 0; | |
1762 | ||
728f95ee | 1763 | ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); |
5e3dd157 KV |
1764 | } |
1765 | ||
5e3dd157 KV |
1766 | ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); |
1767 | if (ret) | |
1768 | goto err_resp; | |
1769 | ||
85622cde MK |
1770 | ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); |
1771 | if (ret) { | |
5e3dd157 KV |
1772 | u32 unused_buffer; |
1773 | unsigned int unused_nbytes; | |
1774 | unsigned int unused_id; | |
1775 | ||
5e3dd157 KV |
1776 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, |
1777 | &unused_nbytes, &unused_id); | |
1778 | } else { | |
1779 | /* non-zero means we did not time out */ | |
1780 | ret = 0; | |
1781 | } | |
1782 | ||
1783 | err_resp: | |
1784 | if (resp) { | |
1785 | u32 unused_buffer; | |
1786 | ||
1787 | ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); | |
1788 | dma_unmap_single(ar->dev, resp_paddr, | |
1789 | *resp_len, DMA_FROM_DEVICE); | |
1790 | } | |
1791 | err_req: | |
1792 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); | |
1793 | ||
1794 | if (ret == 0 && resp_len) { | |
1795 | *resp_len = min(*resp_len, xfer.resp_len); | |
1796 | memcpy(resp, tresp, xfer.resp_len); | |
1797 | } | |
1798 | err_dma: | |
1799 | kfree(treq); | |
1800 | kfree(tresp); | |
1801 | ||
1802 | return ret; | |
1803 | } | |
1804 | ||
5440ce25 | 1805 | static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) |
5e3dd157 | 1806 | { |
5440ce25 | 1807 | struct bmi_xfer *xfer; |
5440ce25 | 1808 | |
765952e4 | 1809 | if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) |
5440ce25 | 1810 | return; |
5e3dd157 | 1811 | |
2374b186 | 1812 | xfer->tx_done = true; |
5e3dd157 KV |
1813 | } |
1814 | ||
5440ce25 | 1815 | static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) |
5e3dd157 | 1816 | { |
7aa7a72a | 1817 | struct ath10k *ar = ce_state->ar; |
5440ce25 MK |
1818 | struct bmi_xfer *xfer; |
1819 | u32 ce_data; | |
1820 | unsigned int nbytes; | |
1821 | unsigned int transfer_id; | |
1822 | unsigned int flags; | |
1823 | ||
1824 | if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, | |
1825 | &nbytes, &transfer_id, &flags)) | |
1826 | return; | |
5e3dd157 | 1827 | |
04ed9dfe MK |
1828 | if (WARN_ON_ONCE(!xfer)) |
1829 | return; | |
1830 | ||
5e3dd157 | 1831 | if (!xfer->wait_for_resp) { |
7aa7a72a | 1832 | ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); |
5e3dd157 KV |
1833 | return; |
1834 | } | |
1835 | ||
1836 | xfer->resp_len = nbytes; | |
2374b186 | 1837 | xfer->rx_done = true; |
5e3dd157 KV |
1838 | } |
1839 | ||
85622cde MK |
1840 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
1841 | struct ath10k_ce_pipe *rx_pipe, | |
1842 | struct bmi_xfer *xfer) | |
1843 | { | |
1844 | unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; | |
1845 | ||
1846 | while (time_before_eq(jiffies, timeout)) { | |
1847 | ath10k_pci_bmi_send_done(tx_pipe); | |
1848 | ath10k_pci_bmi_recv_data(rx_pipe); | |
1849 | ||
2374b186 | 1850 | if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) |
85622cde MK |
1851 | return 0; |
1852 | ||
1853 | schedule(); | |
1854 | } | |
5e3dd157 | 1855 | |
85622cde MK |
1856 | return -ETIMEDOUT; |
1857 | } | |
5e3dd157 KV |
1858 | |
1859 | /* | |
1860 | * Send an interrupt to the device to wake up the Target CPU | |
1861 | * so it has an opportunity to notice any changed state. | |
1862 | */ | |
1863 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |
1864 | { | |
9e264945 | 1865 | u32 addr, val; |
5e3dd157 | 1866 | |
9e264945 MK |
1867 | addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; |
1868 | val = ath10k_pci_read32(ar, addr); | |
1869 | val |= CORE_CTRL_CPU_INTR_MASK; | |
1870 | ath10k_pci_write32(ar, addr, val); | |
5e3dd157 | 1871 | |
1d2b48d6 | 1872 | return 0; |
5e3dd157 KV |
1873 | } |
1874 | ||
d63955b3 MK |
1875 | static int ath10k_pci_get_num_banks(struct ath10k *ar) |
1876 | { | |
1877 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1878 | ||
1879 | switch (ar_pci->pdev->device) { | |
1880 | case QCA988X_2_0_DEVICE_ID: | |
8bd47021 | 1881 | case QCA99X0_2_0_DEVICE_ID: |
d63955b3 | 1882 | return 1; |
36582e5d | 1883 | case QCA6164_2_1_DEVICE_ID: |
d63955b3 MK |
1884 | case QCA6174_2_1_DEVICE_ID: |
1885 | switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { | |
1886 | case QCA6174_HW_1_0_CHIP_ID_REV: | |
1887 | case QCA6174_HW_1_1_CHIP_ID_REV: | |
11a002ef MK |
1888 | case QCA6174_HW_2_1_CHIP_ID_REV: |
1889 | case QCA6174_HW_2_2_CHIP_ID_REV: | |
d63955b3 MK |
1890 | return 3; |
1891 | case QCA6174_HW_1_3_CHIP_ID_REV: | |
1892 | return 2; | |
d63955b3 MK |
1893 | case QCA6174_HW_3_0_CHIP_ID_REV: |
1894 | case QCA6174_HW_3_1_CHIP_ID_REV: | |
1895 | case QCA6174_HW_3_2_CHIP_ID_REV: | |
1896 | return 9; | |
1897 | } | |
1898 | break; | |
a226b519 BM |
1899 | case QCA9377_1_0_DEVICE_ID: |
1900 | return 2; | |
d63955b3 MK |
1901 | } |
1902 | ||
1903 | ath10k_warn(ar, "unknown number of banks, assuming 1\n"); | |
1904 | return 1; | |
1905 | } | |
1906 | ||
5e3dd157 KV |
1907 | static int ath10k_pci_init_config(struct ath10k *ar) |
1908 | { | |
1909 | u32 interconnect_targ_addr; | |
1910 | u32 pcie_state_targ_addr = 0; | |
1911 | u32 pipe_cfg_targ_addr = 0; | |
1912 | u32 svc_to_pipe_map = 0; | |
1913 | u32 pcie_config_flags = 0; | |
1914 | u32 ealloc_value; | |
1915 | u32 ealloc_targ_addr; | |
1916 | u32 flag2_value; | |
1917 | u32 flag2_targ_addr; | |
1918 | int ret = 0; | |
1919 | ||
1920 | /* Download to Target the CE Config and the service-to-CE map */ | |
1921 | interconnect_targ_addr = | |
1922 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); | |
1923 | ||
1924 | /* Supply Target-side CE configuration */ | |
9e264945 MK |
1925 | ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, |
1926 | &pcie_state_targ_addr); | |
5e3dd157 | 1927 | if (ret != 0) { |
7aa7a72a | 1928 | ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); |
5e3dd157 KV |
1929 | return ret; |
1930 | } | |
1931 | ||
1932 | if (pcie_state_targ_addr == 0) { | |
1933 | ret = -EIO; | |
7aa7a72a | 1934 | ath10k_err(ar, "Invalid pcie state addr\n"); |
5e3dd157 KV |
1935 | return ret; |
1936 | } | |
1937 | ||
9e264945 | 1938 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 1939 | offsetof(struct pcie_state, |
9e264945 MK |
1940 | pipe_cfg_addr)), |
1941 | &pipe_cfg_targ_addr); | |
5e3dd157 | 1942 | if (ret != 0) { |
7aa7a72a | 1943 | ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); |
5e3dd157 KV |
1944 | return ret; |
1945 | } | |
1946 | ||
1947 | if (pipe_cfg_targ_addr == 0) { | |
1948 | ret = -EIO; | |
7aa7a72a | 1949 | ath10k_err(ar, "Invalid pipe cfg addr\n"); |
5e3dd157 KV |
1950 | return ret; |
1951 | } | |
1952 | ||
1953 | ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, | |
5b07e07f | 1954 | target_ce_config_wlan, |
050af069 VT |
1955 | sizeof(struct ce_pipe_config) * |
1956 | NUM_TARGET_CE_CONFIG_WLAN); | |
5e3dd157 KV |
1957 | |
1958 | if (ret != 0) { | |
7aa7a72a | 1959 | ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); |
5e3dd157 KV |
1960 | return ret; |
1961 | } | |
1962 | ||
9e264945 | 1963 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 1964 | offsetof(struct pcie_state, |
9e264945 MK |
1965 | svc_to_pipe_map)), |
1966 | &svc_to_pipe_map); | |
5e3dd157 | 1967 | if (ret != 0) { |
7aa7a72a | 1968 | ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); |
5e3dd157 KV |
1969 | return ret; |
1970 | } | |
1971 | ||
1972 | if (svc_to_pipe_map == 0) { | |
1973 | ret = -EIO; | |
7aa7a72a | 1974 | ath10k_err(ar, "Invalid svc_to_pipe map\n"); |
5e3dd157 KV |
1975 | return ret; |
1976 | } | |
1977 | ||
1978 | ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, | |
5b07e07f KV |
1979 | target_service_to_ce_map_wlan, |
1980 | sizeof(target_service_to_ce_map_wlan)); | |
5e3dd157 | 1981 | if (ret != 0) { |
7aa7a72a | 1982 | ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); |
5e3dd157 KV |
1983 | return ret; |
1984 | } | |
1985 | ||
9e264945 | 1986 | ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + |
5e3dd157 | 1987 | offsetof(struct pcie_state, |
9e264945 MK |
1988 | config_flags)), |
1989 | &pcie_config_flags); | |
5e3dd157 | 1990 | if (ret != 0) { |
7aa7a72a | 1991 | ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); |
5e3dd157 KV |
1992 | return ret; |
1993 | } | |
1994 | ||
1995 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; | |
1996 | ||
9e264945 MK |
1997 | ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + |
1998 | offsetof(struct pcie_state, | |
1999 | config_flags)), | |
2000 | pcie_config_flags); | |
5e3dd157 | 2001 | if (ret != 0) { |
7aa7a72a | 2002 | ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); |
5e3dd157 KV |
2003 | return ret; |
2004 | } | |
2005 | ||
2006 | /* configure early allocation */ | |
2007 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); | |
2008 | ||
9e264945 | 2009 | ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); |
5e3dd157 | 2010 | if (ret != 0) { |
7aa7a72a | 2011 | ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); |
5e3dd157 KV |
2012 | return ret; |
2013 | } | |
2014 | ||
2015 | /* first bank is switched to IRAM */ | |
2016 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & | |
2017 | HI_EARLY_ALLOC_MAGIC_MASK); | |
d63955b3 MK |
2018 | ealloc_value |= ((ath10k_pci_get_num_banks(ar) << |
2019 | HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & | |
5e3dd157 KV |
2020 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); |
2021 | ||
9e264945 | 2022 | ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); |
5e3dd157 | 2023 | if (ret != 0) { |
7aa7a72a | 2024 | ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); |
5e3dd157 KV |
2025 | return ret; |
2026 | } | |
2027 | ||
2028 | /* Tell Target to proceed with initialization */ | |
2029 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); | |
2030 | ||
9e264945 | 2031 | ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); |
5e3dd157 | 2032 | if (ret != 0) { |
7aa7a72a | 2033 | ath10k_err(ar, "Failed to get option val: %d\n", ret); |
5e3dd157 KV |
2034 | return ret; |
2035 | } | |
2036 | ||
2037 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; | |
2038 | ||
9e264945 | 2039 | ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); |
5e3dd157 | 2040 | if (ret != 0) { |
7aa7a72a | 2041 | ath10k_err(ar, "Failed to set option val: %d\n", ret); |
5e3dd157 KV |
2042 | return ret; |
2043 | } | |
2044 | ||
2045 | return 0; | |
2046 | } | |
2047 | ||
2727a743 RH |
2048 | static void ath10k_pci_override_ce_config(struct ath10k *ar) |
2049 | { | |
2050 | struct ce_attr *attr; | |
2051 | struct ce_pipe_config *config; | |
2052 | ||
2053 | /* For QCA6174 we're overriding the Copy Engine 5 configuration, | |
2054 | * since it is currently used for other feature. | |
2055 | */ | |
2056 | ||
2057 | /* Override Host's Copy Engine 5 configuration */ | |
2058 | attr = &host_ce_config_wlan[5]; | |
2059 | attr->src_sz_max = 0; | |
2060 | attr->dest_nentries = 0; | |
2061 | ||
2062 | /* Override Target firmware's Copy Engine configuration */ | |
2063 | config = &target_ce_config_wlan[5]; | |
2064 | config->pipedir = __cpu_to_le32(PIPEDIR_OUT); | |
2065 | config->nbytes_max = __cpu_to_le32(2048); | |
2066 | ||
2067 | /* Map from service/endpoint to Copy Engine */ | |
2068 | target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); | |
2069 | } | |
2070 | ||
84cbf3a7 | 2071 | static int ath10k_pci_alloc_pipes(struct ath10k *ar) |
25d0dbcb | 2072 | { |
84cbf3a7 MK |
2073 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2074 | struct ath10k_pci_pipe *pipe; | |
25d0dbcb MK |
2075 | int i, ret; |
2076 | ||
2077 | for (i = 0; i < CE_COUNT; i++) { | |
84cbf3a7 MK |
2078 | pipe = &ar_pci->pipe_info[i]; |
2079 | pipe->ce_hdl = &ar_pci->ce_states[i]; | |
2080 | pipe->pipe_num = i; | |
2081 | pipe->hif_ce_state = ar; | |
2082 | ||
9d9bdbb0 | 2083 | ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); |
25d0dbcb | 2084 | if (ret) { |
7aa7a72a | 2085 | ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", |
25d0dbcb MK |
2086 | i, ret); |
2087 | return ret; | |
2088 | } | |
84cbf3a7 MK |
2089 | |
2090 | /* Last CE is Diagnostic Window */ | |
050af069 | 2091 | if (i == CE_DIAG_PIPE) { |
84cbf3a7 MK |
2092 | ar_pci->ce_diag = pipe->ce_hdl; |
2093 | continue; | |
2094 | } | |
2095 | ||
2096 | pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); | |
25d0dbcb MK |
2097 | } |
2098 | ||
2099 | return 0; | |
2100 | } | |
2101 | ||
84cbf3a7 | 2102 | static void ath10k_pci_free_pipes(struct ath10k *ar) |
25d0dbcb MK |
2103 | { |
2104 | int i; | |
5e3dd157 | 2105 | |
25d0dbcb MK |
2106 | for (i = 0; i < CE_COUNT; i++) |
2107 | ath10k_ce_free_pipe(ar, i); | |
2108 | } | |
5e3dd157 | 2109 | |
84cbf3a7 | 2110 | static int ath10k_pci_init_pipes(struct ath10k *ar) |
5e3dd157 | 2111 | { |
84cbf3a7 | 2112 | int i, ret; |
5e3dd157 | 2113 | |
84cbf3a7 MK |
2114 | for (i = 0; i < CE_COUNT; i++) { |
2115 | ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); | |
25d0dbcb | 2116 | if (ret) { |
7aa7a72a | 2117 | ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", |
84cbf3a7 | 2118 | i, ret); |
25d0dbcb | 2119 | return ret; |
5e3dd157 | 2120 | } |
5e3dd157 KV |
2121 | } |
2122 | ||
5e3dd157 KV |
2123 | return 0; |
2124 | } | |
2125 | ||
5c771e74 | 2126 | static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) |
5e3dd157 | 2127 | { |
5c771e74 MK |
2128 | return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & |
2129 | FW_IND_EVENT_PENDING; | |
2130 | } | |
5e3dd157 | 2131 | |
5c771e74 MK |
2132 | static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) |
2133 | { | |
2134 | u32 val; | |
5e3dd157 | 2135 | |
5c771e74 MK |
2136 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); |
2137 | val &= ~FW_IND_EVENT_PENDING; | |
2138 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); | |
5e3dd157 KV |
2139 | } |
2140 | ||
de01357b MK |
2141 | /* this function effectively clears target memory controller assert line */ |
2142 | static void ath10k_pci_warm_reset_si0(struct ath10k *ar) | |
2143 | { | |
2144 | u32 val; | |
2145 | ||
2146 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2147 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, | |
2148 | val | SOC_RESET_CONTROL_SI0_RST_MASK); | |
2149 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2150 | ||
2151 | msleep(10); | |
2152 | ||
2153 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2154 | ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, | |
2155 | val & ~SOC_RESET_CONTROL_SI0_RST_MASK); | |
2156 | val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); | |
2157 | ||
2158 | msleep(10); | |
2159 | } | |
2160 | ||
61c1648b | 2161 | static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) |
fc36e3ff | 2162 | { |
fc36e3ff MK |
2163 | u32 val; |
2164 | ||
b39712ce | 2165 | ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); |
fc36e3ff | 2166 | |
fc36e3ff | 2167 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
61c1648b MK |
2168 | SOC_RESET_CONTROL_ADDRESS); |
2169 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, | |
2170 | val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); | |
2171 | } | |
2172 | ||
2173 | static void ath10k_pci_warm_reset_ce(struct ath10k *ar) | |
2174 | { | |
2175 | u32 val; | |
fc36e3ff | 2176 | |
fc36e3ff MK |
2177 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
2178 | SOC_RESET_CONTROL_ADDRESS); | |
61c1648b | 2179 | |
fc36e3ff MK |
2180 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
2181 | val | SOC_RESET_CONTROL_CE_RST_MASK); | |
fc36e3ff | 2182 | msleep(10); |
fc36e3ff MK |
2183 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
2184 | val & ~SOC_RESET_CONTROL_CE_RST_MASK); | |
61c1648b MK |
2185 | } |
2186 | ||
2187 | static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) | |
2188 | { | |
2189 | u32 val; | |
2190 | ||
fc36e3ff | 2191 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
61c1648b MK |
2192 | SOC_LF_TIMER_CONTROL0_ADDRESS); |
2193 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + | |
2194 | SOC_LF_TIMER_CONTROL0_ADDRESS, | |
2195 | val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); | |
2196 | } | |
fc36e3ff | 2197 | |
61c1648b MK |
2198 | static int ath10k_pci_warm_reset(struct ath10k *ar) |
2199 | { | |
2200 | int ret; | |
2201 | ||
2202 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); | |
de01357b | 2203 | |
61c1648b MK |
2204 | spin_lock_bh(&ar->data_lock); |
2205 | ar->stats.fw_warm_reset_counter++; | |
2206 | spin_unlock_bh(&ar->data_lock); | |
fc36e3ff | 2207 | |
61c1648b | 2208 | ath10k_pci_irq_disable(ar); |
fc36e3ff | 2209 | |
61c1648b MK |
2210 | /* Make sure the target CPU is not doing anything dangerous, e.g. if it |
2211 | * were to access copy engine while host performs copy engine reset | |
2212 | * then it is possible for the device to confuse pci-e controller to | |
2213 | * the point of bringing host system to a complete stop (i.e. hang). | |
2214 | */ | |
2215 | ath10k_pci_warm_reset_si0(ar); | |
2216 | ath10k_pci_warm_reset_cpu(ar); | |
2217 | ath10k_pci_init_pipes(ar); | |
2218 | ath10k_pci_wait_for_target_init(ar); | |
fc36e3ff | 2219 | |
61c1648b MK |
2220 | ath10k_pci_warm_reset_clear_lf(ar); |
2221 | ath10k_pci_warm_reset_ce(ar); | |
2222 | ath10k_pci_warm_reset_cpu(ar); | |
2223 | ath10k_pci_init_pipes(ar); | |
fc36e3ff | 2224 | |
61c1648b MK |
2225 | ret = ath10k_pci_wait_for_target_init(ar); |
2226 | if (ret) { | |
2227 | ath10k_warn(ar, "failed to wait for target init: %d\n", ret); | |
2228 | return ret; | |
2229 | } | |
fc36e3ff | 2230 | |
7aa7a72a | 2231 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); |
fc36e3ff | 2232 | |
c0c378f9 | 2233 | return 0; |
fc36e3ff MK |
2234 | } |
2235 | ||
6e4202c3 VT |
2236 | static int ath10k_pci_safe_chip_reset(struct ath10k *ar) |
2237 | { | |
2238 | if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { | |
2239 | return ath10k_pci_warm_reset(ar); | |
2240 | } else if (QCA_REV_99X0(ar)) { | |
2241 | ath10k_pci_irq_disable(ar); | |
2242 | return ath10k_pci_qca99x0_chip_reset(ar); | |
2243 | } else { | |
2244 | return -ENOTSUPP; | |
2245 | } | |
2246 | } | |
2247 | ||
d63955b3 | 2248 | static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) |
0bc14d06 MK |
2249 | { |
2250 | int i, ret; | |
2251 | u32 val; | |
2252 | ||
d63955b3 | 2253 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); |
0bc14d06 MK |
2254 | |
2255 | /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. | |
2256 | * It is thus preferred to use warm reset which is safer but may not be | |
2257 | * able to recover the device from all possible fail scenarios. | |
2258 | * | |
2259 | * Warm reset doesn't always work on first try so attempt it a few | |
2260 | * times before giving up. | |
2261 | */ | |
2262 | for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { | |
2263 | ret = ath10k_pci_warm_reset(ar); | |
2264 | if (ret) { | |
2265 | ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", | |
2266 | i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, | |
2267 | ret); | |
2268 | continue; | |
2269 | } | |
2270 | ||
2271 | /* FIXME: Sometimes copy engine doesn't recover after warm | |
2272 | * reset. In most cases this needs cold reset. In some of these | |
2273 | * cases the device is in such a state that a cold reset may | |
2274 | * lock up the host. | |
2275 | * | |
2276 | * Reading any host interest register via copy engine is | |
2277 | * sufficient to verify if device is capable of booting | |
2278 | * firmware blob. | |
2279 | */ | |
2280 | ret = ath10k_pci_init_pipes(ar); | |
2281 | if (ret) { | |
2282 | ath10k_warn(ar, "failed to init copy engine: %d\n", | |
2283 | ret); | |
2284 | continue; | |
2285 | } | |
2286 | ||
2287 | ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, | |
2288 | &val); | |
2289 | if (ret) { | |
2290 | ath10k_warn(ar, "failed to poke copy engine: %d\n", | |
2291 | ret); | |
2292 | continue; | |
2293 | } | |
2294 | ||
2295 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); | |
2296 | return 0; | |
2297 | } | |
2298 | ||
2299 | if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { | |
2300 | ath10k_warn(ar, "refusing cold reset as requested\n"); | |
2301 | return -EPERM; | |
2302 | } | |
2303 | ||
2304 | ret = ath10k_pci_cold_reset(ar); | |
2305 | if (ret) { | |
2306 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2307 | return ret; | |
2308 | } | |
2309 | ||
2310 | ret = ath10k_pci_wait_for_target_init(ar); | |
2311 | if (ret) { | |
2312 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
2313 | ret); | |
2314 | return ret; | |
2315 | } | |
2316 | ||
d63955b3 MK |
2317 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); |
2318 | ||
2319 | return 0; | |
2320 | } | |
2321 | ||
2322 | static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) | |
2323 | { | |
2324 | int ret; | |
2325 | ||
2326 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); | |
2327 | ||
2328 | /* FIXME: QCA6174 requires cold + warm reset to work. */ | |
2329 | ||
2330 | ret = ath10k_pci_cold_reset(ar); | |
2331 | if (ret) { | |
2332 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2333 | return ret; | |
2334 | } | |
2335 | ||
2336 | ret = ath10k_pci_wait_for_target_init(ar); | |
2337 | if (ret) { | |
2338 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
617b0f4d | 2339 | ret); |
d63955b3 MK |
2340 | return ret; |
2341 | } | |
2342 | ||
2343 | ret = ath10k_pci_warm_reset(ar); | |
2344 | if (ret) { | |
2345 | ath10k_warn(ar, "failed to warm reset: %d\n", ret); | |
2346 | return ret; | |
2347 | } | |
2348 | ||
2349 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); | |
0bc14d06 MK |
2350 | |
2351 | return 0; | |
2352 | } | |
2353 | ||
6e4202c3 VT |
2354 | static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) |
2355 | { | |
2356 | int ret; | |
2357 | ||
2358 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); | |
2359 | ||
2360 | ret = ath10k_pci_cold_reset(ar); | |
2361 | if (ret) { | |
2362 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | |
2363 | return ret; | |
2364 | } | |
2365 | ||
2366 | ret = ath10k_pci_wait_for_target_init(ar); | |
2367 | if (ret) { | |
2368 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | |
2369 | ret); | |
2370 | return ret; | |
2371 | } | |
2372 | ||
2373 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); | |
2374 | ||
2375 | return 0; | |
2376 | } | |
2377 | ||
d63955b3 MK |
2378 | static int ath10k_pci_chip_reset(struct ath10k *ar) |
2379 | { | |
2380 | if (QCA_REV_988X(ar)) | |
2381 | return ath10k_pci_qca988x_chip_reset(ar); | |
2382 | else if (QCA_REV_6174(ar)) | |
2383 | return ath10k_pci_qca6174_chip_reset(ar); | |
a226b519 BM |
2384 | else if (QCA_REV_9377(ar)) |
2385 | return ath10k_pci_qca6174_chip_reset(ar); | |
6e4202c3 VT |
2386 | else if (QCA_REV_99X0(ar)) |
2387 | return ath10k_pci_qca99x0_chip_reset(ar); | |
d63955b3 MK |
2388 | else |
2389 | return -ENOTSUPP; | |
2390 | } | |
2391 | ||
0bc14d06 | 2392 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
8c5c5368 | 2393 | { |
76d870ed | 2394 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
8c5c5368 MK |
2395 | int ret; |
2396 | ||
0bc14d06 MK |
2397 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); |
2398 | ||
76d870ed JD |
2399 | pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, |
2400 | &ar_pci->link_ctl); | |
2401 | pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, | |
2402 | ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); | |
2403 | ||
8c5c5368 MK |
2404 | /* |
2405 | * Bring the target up cleanly. | |
2406 | * | |
2407 | * The target may be in an undefined state with an AUX-powered Target | |
2408 | * and a Host in WoW mode. If the Host crashes, loses power, or is | |
2409 | * restarted (without unloading the driver) then the Target is left | |
2410 | * (aux) powered and running. On a subsequent driver load, the Target | |
2411 | * is in an unexpected state. We try to catch that here in order to | |
2412 | * reset the Target and retry the probe. | |
2413 | */ | |
0bc14d06 | 2414 | ret = ath10k_pci_chip_reset(ar); |
5b2589fc | 2415 | if (ret) { |
a2fa8800 MK |
2416 | if (ath10k_pci_has_fw_crashed(ar)) { |
2417 | ath10k_warn(ar, "firmware crashed during chip reset\n"); | |
2418 | ath10k_pci_fw_crashed_clear(ar); | |
2419 | ath10k_pci_fw_crashed_dump(ar); | |
2420 | } | |
2421 | ||
0bc14d06 | 2422 | ath10k_err(ar, "failed to reset chip: %d\n", ret); |
707b1bbd | 2423 | goto err_sleep; |
5b2589fc | 2424 | } |
8c5c5368 | 2425 | |
84cbf3a7 | 2426 | ret = ath10k_pci_init_pipes(ar); |
1d2b48d6 | 2427 | if (ret) { |
7aa7a72a | 2428 | ath10k_err(ar, "failed to initialize CE: %d\n", ret); |
707b1bbd | 2429 | goto err_sleep; |
ab977bd0 MK |
2430 | } |
2431 | ||
98563d5a MK |
2432 | ret = ath10k_pci_init_config(ar); |
2433 | if (ret) { | |
7aa7a72a | 2434 | ath10k_err(ar, "failed to setup init config: %d\n", ret); |
5c771e74 | 2435 | goto err_ce; |
98563d5a | 2436 | } |
8c5c5368 MK |
2437 | |
2438 | ret = ath10k_pci_wake_target_cpu(ar); | |
2439 | if (ret) { | |
7aa7a72a | 2440 | ath10k_err(ar, "could not wake up target CPU: %d\n", ret); |
5c771e74 | 2441 | goto err_ce; |
8c5c5368 MK |
2442 | } |
2443 | ||
2444 | return 0; | |
2445 | ||
2446 | err_ce: | |
2447 | ath10k_pci_ce_deinit(ar); | |
61c95cea | 2448 | |
707b1bbd | 2449 | err_sleep: |
61c95cea MK |
2450 | return ret; |
2451 | } | |
2452 | ||
8c5c5368 MK |
2453 | static void ath10k_pci_hif_power_down(struct ath10k *ar) |
2454 | { | |
7aa7a72a | 2455 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); |
50f87a67 | 2456 | |
c011b281 MK |
2457 | /* Currently hif_power_up performs effectively a reset and hif_stop |
2458 | * resets the chip as well so there's no point in resetting here. | |
2459 | */ | |
8c5c5368 MK |
2460 | } |
2461 | ||
8cd13cad MK |
2462 | #ifdef CONFIG_PM |
2463 | ||
8cd13cad MK |
2464 | static int ath10k_pci_hif_suspend(struct ath10k *ar) |
2465 | { | |
77258d40 MK |
2466 | /* The grace timer can still be counting down and ar->ps_awake be true. |
2467 | * It is known that the device may be asleep after resuming regardless | |
2468 | * of the SoC powersave state before suspending. Hence make sure the | |
2469 | * device is asleep before proceeding. | |
2470 | */ | |
2471 | ath10k_pci_sleep_sync(ar); | |
320e14b8 | 2472 | |
8cd13cad MK |
2473 | return 0; |
2474 | } | |
2475 | ||
2476 | static int ath10k_pci_hif_resume(struct ath10k *ar) | |
2477 | { | |
2478 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2479 | struct pci_dev *pdev = ar_pci->pdev; | |
2480 | u32 val; | |
1aaf8efb AK |
2481 | int ret = 0; |
2482 | ||
2483 | if (ar_pci->pci_ps == 0) { | |
2484 | ret = ath10k_pci_force_wake(ar); | |
2485 | if (ret) { | |
2486 | ath10k_err(ar, "failed to wake up target: %d\n", ret); | |
2487 | return ret; | |
2488 | } | |
2489 | } | |
8cd13cad | 2490 | |
9ff4be96 MK |
2491 | /* Suspend/Resume resets the PCI configuration space, so we have to |
2492 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries | |
2493 | * from interfering with C3 CPU state. pci_restore_state won't help | |
2494 | * here since it only restores the first 64 bytes pci config header. | |
2495 | */ | |
2496 | pci_read_config_dword(pdev, 0x40, &val); | |
2497 | if ((val & 0x0000ff00) != 0) | |
2498 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | |
8cd13cad | 2499 | |
1aaf8efb | 2500 | return ret; |
8cd13cad MK |
2501 | } |
2502 | #endif | |
2503 | ||
5e3dd157 | 2504 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { |
726346fc | 2505 | .tx_sg = ath10k_pci_hif_tx_sg, |
eef25405 | 2506 | .diag_read = ath10k_pci_hif_diag_read, |
9f65ad25 | 2507 | .diag_write = ath10k_pci_diag_write_mem, |
5e3dd157 KV |
2508 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, |
2509 | .start = ath10k_pci_hif_start, | |
2510 | .stop = ath10k_pci_hif_stop, | |
2511 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, | |
2512 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, | |
2513 | .send_complete_check = ath10k_pci_hif_send_complete_check, | |
5e3dd157 | 2514 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, |
8c5c5368 MK |
2515 | .power_up = ath10k_pci_hif_power_up, |
2516 | .power_down = ath10k_pci_hif_power_down, | |
077a3804 YL |
2517 | .read32 = ath10k_pci_read32, |
2518 | .write32 = ath10k_pci_write32, | |
8cd13cad MK |
2519 | #ifdef CONFIG_PM |
2520 | .suspend = ath10k_pci_hif_suspend, | |
2521 | .resume = ath10k_pci_hif_resume, | |
2522 | #endif | |
5e3dd157 KV |
2523 | }; |
2524 | ||
2525 | static void ath10k_pci_ce_tasklet(unsigned long ptr) | |
2526 | { | |
87263e5b | 2527 | struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; |
5e3dd157 KV |
2528 | struct ath10k_pci *ar_pci = pipe->ar_pci; |
2529 | ||
2530 | ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); | |
2531 | } | |
2532 | ||
2533 | static void ath10k_msi_err_tasklet(unsigned long data) | |
2534 | { | |
2535 | struct ath10k *ar = (struct ath10k *)data; | |
2536 | ||
5c771e74 | 2537 | if (!ath10k_pci_has_fw_crashed(ar)) { |
7aa7a72a | 2538 | ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); |
5c771e74 MK |
2539 | return; |
2540 | } | |
2541 | ||
6f3b7ff4 | 2542 | ath10k_pci_irq_disable(ar); |
5c771e74 MK |
2543 | ath10k_pci_fw_crashed_clear(ar); |
2544 | ath10k_pci_fw_crashed_dump(ar); | |
5e3dd157 KV |
2545 | } |
2546 | ||
2547 | /* | |
2548 | * Handler for a per-engine interrupt on a PARTICULAR CE. | |
2549 | * This is used in cases where each CE has a private MSI interrupt. | |
2550 | */ | |
2551 | static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) | |
2552 | { | |
2553 | struct ath10k *ar = arg; | |
2554 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2555 | int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; | |
2556 | ||
e5742672 | 2557 | if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { |
7aa7a72a MK |
2558 | ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, |
2559 | ce_id); | |
5e3dd157 KV |
2560 | return IRQ_HANDLED; |
2561 | } | |
2562 | ||
2563 | /* | |
2564 | * NOTE: We are able to derive ce_id from irq because we | |
2565 | * use a one-to-one mapping for CE's 0..5. | |
2566 | * CE's 6 & 7 do not use interrupts at all. | |
2567 | * | |
2568 | * This mapping must be kept in sync with the mapping | |
2569 | * used by firmware. | |
2570 | */ | |
2571 | tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); | |
2572 | return IRQ_HANDLED; | |
2573 | } | |
2574 | ||
2575 | static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) | |
2576 | { | |
2577 | struct ath10k *ar = arg; | |
2578 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2579 | ||
2580 | tasklet_schedule(&ar_pci->msi_fw_err); | |
2581 | return IRQ_HANDLED; | |
2582 | } | |
2583 | ||
2584 | /* | |
2585 | * Top-level interrupt handler for all PCI interrupts from a Target. | |
2586 | * When a block of MSI interrupts is allocated, this top-level handler | |
2587 | * is not used; instead, we directly call the correct sub-handler. | |
2588 | */ | |
2589 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | |
2590 | { | |
2591 | struct ath10k *ar = arg; | |
2592 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1aaf8efb AK |
2593 | int ret; |
2594 | ||
2595 | if (ar_pci->pci_ps == 0) { | |
2596 | ret = ath10k_pci_force_wake(ar); | |
2597 | if (ret) { | |
2598 | ath10k_warn(ar, "failed to wake device up on irq: %d\n", | |
2599 | ret); | |
2600 | return IRQ_NONE; | |
2601 | } | |
2602 | } | |
5e3dd157 KV |
2603 | |
2604 | if (ar_pci->num_msi_intrs == 0) { | |
e539887b MK |
2605 | if (!ath10k_pci_irq_pending(ar)) |
2606 | return IRQ_NONE; | |
2607 | ||
2685218b | 2608 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
5e3dd157 KV |
2609 | } |
2610 | ||
2611 | tasklet_schedule(&ar_pci->intr_tq); | |
2612 | ||
2613 | return IRQ_HANDLED; | |
2614 | } | |
2615 | ||
5c771e74 | 2616 | static void ath10k_pci_tasklet(unsigned long data) |
ab977bd0 MK |
2617 | { |
2618 | struct ath10k *ar = (struct ath10k *)data; | |
5c771e74 | 2619 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
ab977bd0 | 2620 | |
5c771e74 | 2621 | if (ath10k_pci_has_fw_crashed(ar)) { |
6f3b7ff4 | 2622 | ath10k_pci_irq_disable(ar); |
5c771e74 | 2623 | ath10k_pci_fw_crashed_clear(ar); |
0e9848c0 | 2624 | ath10k_pci_fw_crashed_dump(ar); |
ab977bd0 MK |
2625 | return; |
2626 | } | |
2627 | ||
5e3dd157 KV |
2628 | ath10k_ce_per_engine_service_any(ar); |
2629 | ||
2685218b MK |
2630 | /* Re-enable legacy irq that was disabled in the irq handler */ |
2631 | if (ar_pci->num_msi_intrs == 0) | |
2632 | ath10k_pci_enable_legacy_irq(ar); | |
5e3dd157 KV |
2633 | } |
2634 | ||
fc15ca13 | 2635 | static int ath10k_pci_request_irq_msix(struct ath10k *ar) |
5e3dd157 KV |
2636 | { |
2637 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
fc15ca13 | 2638 | int ret, i; |
5e3dd157 KV |
2639 | |
2640 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, | |
2641 | ath10k_pci_msi_fw_handler, | |
2642 | IRQF_SHARED, "ath10k_pci", ar); | |
591ecdb8 | 2643 | if (ret) { |
7aa7a72a | 2644 | ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", |
591ecdb8 | 2645 | ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); |
5e3dd157 | 2646 | return ret; |
591ecdb8 | 2647 | } |
5e3dd157 KV |
2648 | |
2649 | for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { | |
2650 | ret = request_irq(ar_pci->pdev->irq + i, | |
2651 | ath10k_pci_per_engine_handler, | |
2652 | IRQF_SHARED, "ath10k_pci", ar); | |
2653 | if (ret) { | |
7aa7a72a | 2654 | ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", |
5e3dd157 KV |
2655 | ar_pci->pdev->irq + i, ret); |
2656 | ||
87b1423b MK |
2657 | for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) |
2658 | free_irq(ar_pci->pdev->irq + i, ar); | |
5e3dd157 | 2659 | |
87b1423b | 2660 | free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); |
5e3dd157 KV |
2661 | return ret; |
2662 | } | |
2663 | } | |
2664 | ||
5e3dd157 KV |
2665 | return 0; |
2666 | } | |
2667 | ||
fc15ca13 | 2668 | static int ath10k_pci_request_irq_msi(struct ath10k *ar) |
5e3dd157 KV |
2669 | { |
2670 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2671 | int ret; | |
2672 | ||
5e3dd157 KV |
2673 | ret = request_irq(ar_pci->pdev->irq, |
2674 | ath10k_pci_interrupt_handler, | |
2675 | IRQF_SHARED, "ath10k_pci", ar); | |
fc15ca13 | 2676 | if (ret) { |
7aa7a72a | 2677 | ath10k_warn(ar, "failed to request MSI irq %d: %d\n", |
fc15ca13 | 2678 | ar_pci->pdev->irq, ret); |
5e3dd157 KV |
2679 | return ret; |
2680 | } | |
2681 | ||
5e3dd157 KV |
2682 | return 0; |
2683 | } | |
2684 | ||
fc15ca13 | 2685 | static int ath10k_pci_request_irq_legacy(struct ath10k *ar) |
5e3dd157 KV |
2686 | { |
2687 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2688 | int ret; | |
2689 | ||
2690 | ret = request_irq(ar_pci->pdev->irq, | |
2691 | ath10k_pci_interrupt_handler, | |
2692 | IRQF_SHARED, "ath10k_pci", ar); | |
f3782744 | 2693 | if (ret) { |
7aa7a72a | 2694 | ath10k_warn(ar, "failed to request legacy irq %d: %d\n", |
fc15ca13 | 2695 | ar_pci->pdev->irq, ret); |
5e3dd157 | 2696 | return ret; |
f3782744 | 2697 | } |
5e3dd157 | 2698 | |
5e3dd157 KV |
2699 | return 0; |
2700 | } | |
2701 | ||
fc15ca13 MK |
2702 | static int ath10k_pci_request_irq(struct ath10k *ar) |
2703 | { | |
2704 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 2705 | |
fc15ca13 MK |
2706 | switch (ar_pci->num_msi_intrs) { |
2707 | case 0: | |
2708 | return ath10k_pci_request_irq_legacy(ar); | |
2709 | case 1: | |
2710 | return ath10k_pci_request_irq_msi(ar); | |
b8402d82 | 2711 | default: |
fc15ca13 MK |
2712 | return ath10k_pci_request_irq_msix(ar); |
2713 | } | |
5e3dd157 KV |
2714 | } |
2715 | ||
fc15ca13 MK |
2716 | static void ath10k_pci_free_irq(struct ath10k *ar) |
2717 | { | |
2718 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2719 | int i; | |
2720 | ||
2721 | /* There's at least one interrupt irregardless whether its legacy INTR | |
2722 | * or MSI or MSI-X */ | |
2723 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | |
2724 | free_irq(ar_pci->pdev->irq + i, ar); | |
2725 | } | |
2726 | ||
2727 | static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) | |
5e3dd157 KV |
2728 | { |
2729 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 KV |
2730 | int i; |
2731 | ||
fc15ca13 | 2732 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); |
5e3dd157 | 2733 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, |
fc15ca13 | 2734 | (unsigned long)ar); |
5e3dd157 KV |
2735 | |
2736 | for (i = 0; i < CE_COUNT; i++) { | |
2737 | ar_pci->pipe_info[i].ar_pci = ar_pci; | |
fc15ca13 | 2738 | tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, |
5e3dd157 KV |
2739 | (unsigned long)&ar_pci->pipe_info[i]); |
2740 | } | |
fc15ca13 MK |
2741 | } |
2742 | ||
2743 | static int ath10k_pci_init_irq(struct ath10k *ar) | |
2744 | { | |
2745 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2746 | int ret; | |
5e3dd157 | 2747 | |
fc15ca13 | 2748 | ath10k_pci_init_irq_tasklets(ar); |
5e3dd157 | 2749 | |
403d627b | 2750 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) |
7aa7a72a MK |
2751 | ath10k_info(ar, "limiting irq mode to: %d\n", |
2752 | ath10k_pci_irq_mode); | |
5e3dd157 | 2753 | |
fc15ca13 | 2754 | /* Try MSI-X */ |
0edf2577 | 2755 | if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { |
b8402d82 | 2756 | ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1; |
5ad6867c | 2757 | ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, |
5b07e07f | 2758 | ar_pci->num_msi_intrs); |
5ad6867c | 2759 | if (ret > 0) |
cfe9c45b | 2760 | return 0; |
5e3dd157 | 2761 | |
cfe9c45b | 2762 | /* fall-through */ |
5e3dd157 KV |
2763 | } |
2764 | ||
fc15ca13 | 2765 | /* Try MSI */ |
cfe9c45b MK |
2766 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { |
2767 | ar_pci->num_msi_intrs = 1; | |
2768 | ret = pci_enable_msi(ar_pci->pdev); | |
5e3dd157 | 2769 | if (ret == 0) |
cfe9c45b | 2770 | return 0; |
5e3dd157 | 2771 | |
cfe9c45b | 2772 | /* fall-through */ |
5e3dd157 KV |
2773 | } |
2774 | ||
fc15ca13 MK |
2775 | /* Try legacy irq |
2776 | * | |
2777 | * A potential race occurs here: The CORE_BASE write | |
2778 | * depends on target correctly decoding AXI address but | |
2779 | * host won't know when target writes BAR to CORE_CTRL. | |
2780 | * This write might get lost if target has NOT written BAR. | |
2781 | * For now, fix the race by repeating the write in below | |
2782 | * synchronization checking. */ | |
2783 | ar_pci->num_msi_intrs = 0; | |
5e3dd157 | 2784 | |
fc15ca13 MK |
2785 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
2786 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | |
fc15ca13 MK |
2787 | |
2788 | return 0; | |
5e3dd157 KV |
2789 | } |
2790 | ||
c0c378f9 | 2791 | static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) |
5e3dd157 | 2792 | { |
fc15ca13 MK |
2793 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
2794 | 0); | |
5e3dd157 KV |
2795 | } |
2796 | ||
fc15ca13 | 2797 | static int ath10k_pci_deinit_irq(struct ath10k *ar) |
5e3dd157 KV |
2798 | { |
2799 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
5e3dd157 | 2800 | |
fc15ca13 MK |
2801 | switch (ar_pci->num_msi_intrs) { |
2802 | case 0: | |
c0c378f9 | 2803 | ath10k_pci_deinit_irq_legacy(ar); |
b8402d82 | 2804 | break; |
bb8b621a AG |
2805 | default: |
2806 | pci_disable_msi(ar_pci->pdev); | |
b8402d82 | 2807 | break; |
fc15ca13 MK |
2808 | } |
2809 | ||
b8402d82 | 2810 | return 0; |
5e3dd157 KV |
2811 | } |
2812 | ||
d7fb47f5 | 2813 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar) |
5e3dd157 KV |
2814 | { |
2815 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
0399eca8 | 2816 | unsigned long timeout; |
0399eca8 | 2817 | u32 val; |
5e3dd157 | 2818 | |
7aa7a72a | 2819 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); |
5e3dd157 | 2820 | |
0399eca8 KV |
2821 | timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); |
2822 | ||
2823 | do { | |
2824 | val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); | |
2825 | ||
7aa7a72a MK |
2826 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", |
2827 | val); | |
50f87a67 | 2828 | |
0399eca8 KV |
2829 | /* target should never return this */ |
2830 | if (val == 0xffffffff) | |
2831 | continue; | |
2832 | ||
7710cd2e MK |
2833 | /* the device has crashed so don't bother trying anymore */ |
2834 | if (val & FW_IND_EVENT_PENDING) | |
2835 | break; | |
2836 | ||
0399eca8 KV |
2837 | if (val & FW_IND_INITIALIZED) |
2838 | break; | |
2839 | ||
5e3dd157 KV |
2840 | if (ar_pci->num_msi_intrs == 0) |
2841 | /* Fix potential race by repeating CORE_BASE writes */ | |
a428249d | 2842 | ath10k_pci_enable_legacy_irq(ar); |
0399eca8 | 2843 | |
5e3dd157 | 2844 | mdelay(10); |
0399eca8 | 2845 | } while (time_before(jiffies, timeout)); |
5e3dd157 | 2846 | |
a428249d | 2847 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
7c0f0e3c | 2848 | ath10k_pci_irq_msi_fw_mask(ar); |
a428249d | 2849 | |
6a4f6e1d | 2850 | if (val == 0xffffffff) { |
7aa7a72a | 2851 | ath10k_err(ar, "failed to read device register, device is gone\n"); |
c0c378f9 | 2852 | return -EIO; |
6a4f6e1d MK |
2853 | } |
2854 | ||
7710cd2e | 2855 | if (val & FW_IND_EVENT_PENDING) { |
7aa7a72a | 2856 | ath10k_warn(ar, "device has crashed during init\n"); |
c0c378f9 | 2857 | return -ECOMM; |
7710cd2e MK |
2858 | } |
2859 | ||
6a4f6e1d | 2860 | if (!(val & FW_IND_INITIALIZED)) { |
7aa7a72a | 2861 | ath10k_err(ar, "failed to receive initialized event from target: %08x\n", |
0399eca8 | 2862 | val); |
c0c378f9 | 2863 | return -ETIMEDOUT; |
5e3dd157 KV |
2864 | } |
2865 | ||
7aa7a72a | 2866 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); |
c0c378f9 | 2867 | return 0; |
5e3dd157 KV |
2868 | } |
2869 | ||
fc36e3ff | 2870 | static int ath10k_pci_cold_reset(struct ath10k *ar) |
5e3dd157 | 2871 | { |
5e3dd157 KV |
2872 | u32 val; |
2873 | ||
7aa7a72a | 2874 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); |
5e3dd157 | 2875 | |
f51dbe73 BG |
2876 | spin_lock_bh(&ar->data_lock); |
2877 | ||
2878 | ar->stats.fw_cold_reset_counter++; | |
2879 | ||
2880 | spin_unlock_bh(&ar->data_lock); | |
2881 | ||
5e3dd157 | 2882 | /* Put Target, including PCIe, into RESET. */ |
e479ed43 | 2883 | val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); |
5e3dd157 | 2884 | val |= 1; |
e479ed43 | 2885 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
5e3dd157 | 2886 | |
acd19580 VT |
2887 | /* After writing into SOC_GLOBAL_RESET to put device into |
2888 | * reset and pulling out of reset pcie may not be stable | |
2889 | * for any immediate pcie register access and cause bus error, | |
2890 | * add delay before any pcie access request to fix this issue. | |
2891 | */ | |
2892 | msleep(20); | |
5e3dd157 KV |
2893 | |
2894 | /* Pull Target, including PCIe, out of RESET. */ | |
2895 | val &= ~1; | |
e479ed43 | 2896 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
5e3dd157 | 2897 | |
acd19580 | 2898 | msleep(20); |
5e3dd157 | 2899 | |
7aa7a72a | 2900 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); |
50f87a67 | 2901 | |
5b2589fc | 2902 | return 0; |
5e3dd157 KV |
2903 | } |
2904 | ||
2986e3ef | 2905 | static int ath10k_pci_claim(struct ath10k *ar) |
5e3dd157 | 2906 | { |
2986e3ef MK |
2907 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2908 | struct pci_dev *pdev = ar_pci->pdev; | |
2986e3ef | 2909 | int ret; |
5e3dd157 KV |
2910 | |
2911 | pci_set_drvdata(pdev, ar); | |
2912 | ||
5e3dd157 KV |
2913 | ret = pci_enable_device(pdev); |
2914 | if (ret) { | |
7aa7a72a | 2915 | ath10k_err(ar, "failed to enable pci device: %d\n", ret); |
2986e3ef | 2916 | return ret; |
5e3dd157 KV |
2917 | } |
2918 | ||
5e3dd157 KV |
2919 | ret = pci_request_region(pdev, BAR_NUM, "ath"); |
2920 | if (ret) { | |
7aa7a72a | 2921 | ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, |
2986e3ef | 2922 | ret); |
5e3dd157 KV |
2923 | goto err_device; |
2924 | } | |
2925 | ||
2986e3ef | 2926 | /* Target expects 32 bit DMA. Enforce it. */ |
5e3dd157 KV |
2927 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2928 | if (ret) { | |
7aa7a72a | 2929 | ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); |
5e3dd157 KV |
2930 | goto err_region; |
2931 | } | |
2932 | ||
2933 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2934 | if (ret) { | |
7aa7a72a | 2935 | ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", |
2986e3ef | 2936 | ret); |
5e3dd157 KV |
2937 | goto err_region; |
2938 | } | |
2939 | ||
5e3dd157 KV |
2940 | pci_set_master(pdev); |
2941 | ||
5e3dd157 | 2942 | /* Arrange for access to Target SoC registers. */ |
aeae5b4c | 2943 | ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); |
2986e3ef MK |
2944 | ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); |
2945 | if (!ar_pci->mem) { | |
7aa7a72a | 2946 | ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); |
5e3dd157 KV |
2947 | ret = -EIO; |
2948 | goto err_master; | |
2949 | } | |
2950 | ||
7aa7a72a | 2951 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); |
2986e3ef MK |
2952 | return 0; |
2953 | ||
2954 | err_master: | |
2955 | pci_clear_master(pdev); | |
2956 | ||
2957 | err_region: | |
2958 | pci_release_region(pdev, BAR_NUM); | |
2959 | ||
2960 | err_device: | |
2961 | pci_disable_device(pdev); | |
2962 | ||
2963 | return ret; | |
2964 | } | |
2965 | ||
2966 | static void ath10k_pci_release(struct ath10k *ar) | |
2967 | { | |
2968 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2969 | struct pci_dev *pdev = ar_pci->pdev; | |
2970 | ||
2971 | pci_iounmap(pdev, ar_pci->mem); | |
2972 | pci_release_region(pdev, BAR_NUM); | |
2973 | pci_clear_master(pdev); | |
2974 | pci_disable_device(pdev); | |
2975 | } | |
2976 | ||
7505f7c3 MK |
2977 | static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) |
2978 | { | |
2979 | const struct ath10k_pci_supp_chip *supp_chip; | |
2980 | int i; | |
2981 | u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); | |
2982 | ||
2983 | for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { | |
2984 | supp_chip = &ath10k_pci_supp_chips[i]; | |
2985 | ||
2986 | if (supp_chip->dev_id == dev_id && | |
2987 | supp_chip->rev_id == rev_id) | |
2988 | return true; | |
2989 | } | |
2990 | ||
2991 | return false; | |
2992 | } | |
2993 | ||
2986e3ef MK |
2994 | static int ath10k_pci_probe(struct pci_dev *pdev, |
2995 | const struct pci_device_id *pci_dev) | |
2996 | { | |
2997 | int ret = 0; | |
2998 | struct ath10k *ar; | |
2999 | struct ath10k_pci *ar_pci; | |
d63955b3 | 3000 | enum ath10k_hw_rev hw_rev; |
2986e3ef | 3001 | u32 chip_id; |
1aaf8efb | 3002 | bool pci_ps; |
2986e3ef | 3003 | |
d63955b3 MK |
3004 | switch (pci_dev->device) { |
3005 | case QCA988X_2_0_DEVICE_ID: | |
3006 | hw_rev = ATH10K_HW_QCA988X; | |
1aaf8efb | 3007 | pci_ps = false; |
d63955b3 | 3008 | break; |
36582e5d | 3009 | case QCA6164_2_1_DEVICE_ID: |
d63955b3 MK |
3010 | case QCA6174_2_1_DEVICE_ID: |
3011 | hw_rev = ATH10K_HW_QCA6174; | |
1aaf8efb | 3012 | pci_ps = true; |
d63955b3 | 3013 | break; |
8bd47021 VT |
3014 | case QCA99X0_2_0_DEVICE_ID: |
3015 | hw_rev = ATH10K_HW_QCA99X0; | |
1aaf8efb | 3016 | pci_ps = false; |
8bd47021 | 3017 | break; |
a226b519 BM |
3018 | case QCA9377_1_0_DEVICE_ID: |
3019 | hw_rev = ATH10K_HW_QCA9377; | |
3020 | pci_ps = true; | |
3021 | break; | |
d63955b3 MK |
3022 | default: |
3023 | WARN_ON(1); | |
3024 | return -ENOTSUPP; | |
3025 | } | |
3026 | ||
3027 | ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, | |
3028 | hw_rev, &ath10k_pci_hif_ops); | |
2986e3ef | 3029 | if (!ar) { |
7aa7a72a | 3030 | dev_err(&pdev->dev, "failed to allocate core\n"); |
2986e3ef MK |
3031 | return -ENOMEM; |
3032 | } | |
3033 | ||
0a51b343 MP |
3034 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", |
3035 | pdev->vendor, pdev->device, | |
3036 | pdev->subsystem_vendor, pdev->subsystem_device); | |
7aa7a72a | 3037 | |
2986e3ef MK |
3038 | ar_pci = ath10k_pci_priv(ar); |
3039 | ar_pci->pdev = pdev; | |
3040 | ar_pci->dev = &pdev->dev; | |
3041 | ar_pci->ar = ar; | |
36582e5d | 3042 | ar->dev_id = pci_dev->device; |
1aaf8efb | 3043 | ar_pci->pci_ps = pci_ps; |
5e3dd157 | 3044 | |
0a51b343 MP |
3045 | ar->id.vendor = pdev->vendor; |
3046 | ar->id.device = pdev->device; | |
3047 | ar->id.subsystem_vendor = pdev->subsystem_vendor; | |
3048 | ar->id.subsystem_device = pdev->subsystem_device; | |
de57e2c8 | 3049 | |
5e3dd157 | 3050 | spin_lock_init(&ar_pci->ce_lock); |
77258d40 MK |
3051 | spin_lock_init(&ar_pci->ps_lock); |
3052 | ||
728f95ee MK |
3053 | setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, |
3054 | (unsigned long)ar); | |
77258d40 MK |
3055 | setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, |
3056 | (unsigned long)ar); | |
5e3dd157 | 3057 | |
2986e3ef | 3058 | ret = ath10k_pci_claim(ar); |
e01ae68c | 3059 | if (ret) { |
7aa7a72a | 3060 | ath10k_err(ar, "failed to claim device: %d\n", ret); |
2986e3ef | 3061 | goto err_core_destroy; |
e01ae68c KV |
3062 | } |
3063 | ||
2727a743 RH |
3064 | if (QCA_REV_6174(ar)) |
3065 | ath10k_pci_override_ce_config(ar); | |
3066 | ||
84cbf3a7 | 3067 | ret = ath10k_pci_alloc_pipes(ar); |
25d0dbcb | 3068 | if (ret) { |
7aa7a72a MK |
3069 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", |
3070 | ret); | |
c0c378f9 | 3071 | goto err_sleep; |
25d0dbcb MK |
3072 | } |
3073 | ||
1aaf8efb AK |
3074 | if (ar_pci->pci_ps == 0) { |
3075 | ret = ath10k_pci_force_wake(ar); | |
3076 | if (ret) { | |
3077 | ath10k_warn(ar, "failed to wake up device : %d\n", ret); | |
3078 | goto err_free_pipes; | |
3079 | } | |
3080 | } | |
3081 | ||
aa538aca RM |
3082 | ath10k_pci_ce_deinit(ar); |
3083 | ath10k_pci_irq_disable(ar); | |
3084 | ||
403d627b | 3085 | ret = ath10k_pci_init_irq(ar); |
5e3dd157 | 3086 | if (ret) { |
7aa7a72a | 3087 | ath10k_err(ar, "failed to init irqs: %d\n", ret); |
84cbf3a7 | 3088 | goto err_free_pipes; |
5e3dd157 KV |
3089 | } |
3090 | ||
7aa7a72a | 3091 | ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", |
403d627b MK |
3092 | ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, |
3093 | ath10k_pci_irq_mode, ath10k_pci_reset_mode); | |
3094 | ||
5c771e74 MK |
3095 | ret = ath10k_pci_request_irq(ar); |
3096 | if (ret) { | |
7aa7a72a | 3097 | ath10k_warn(ar, "failed to request irqs: %d\n", ret); |
5c771e74 MK |
3098 | goto err_deinit_irq; |
3099 | } | |
3100 | ||
1a7fecb7 MK |
3101 | ret = ath10k_pci_chip_reset(ar); |
3102 | if (ret) { | |
3103 | ath10k_err(ar, "failed to reset chip: %d\n", ret); | |
3104 | goto err_free_irq; | |
3105 | } | |
3106 | ||
3107 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); | |
3108 | if (chip_id == 0xffffffff) { | |
3109 | ath10k_err(ar, "failed to get chip id\n"); | |
3110 | goto err_free_irq; | |
3111 | } | |
3112 | ||
3113 | if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { | |
3114 | ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", | |
3115 | pdev->device, chip_id); | |
d9585a92 | 3116 | goto err_free_irq; |
1a7fecb7 MK |
3117 | } |
3118 | ||
e01ae68c | 3119 | ret = ath10k_core_register(ar, chip_id); |
5e3dd157 | 3120 | if (ret) { |
7aa7a72a | 3121 | ath10k_err(ar, "failed to register driver core: %d\n", ret); |
5c771e74 | 3122 | goto err_free_irq; |
5e3dd157 KV |
3123 | } |
3124 | ||
3125 | return 0; | |
3126 | ||
5c771e74 MK |
3127 | err_free_irq: |
3128 | ath10k_pci_free_irq(ar); | |
21396271 | 3129 | ath10k_pci_kill_tasklet(ar); |
5c771e74 | 3130 | |
403d627b MK |
3131 | err_deinit_irq: |
3132 | ath10k_pci_deinit_irq(ar); | |
3133 | ||
84cbf3a7 MK |
3134 | err_free_pipes: |
3135 | ath10k_pci_free_pipes(ar); | |
2986e3ef | 3136 | |
c0c378f9 | 3137 | err_sleep: |
0bcbbe67 | 3138 | ath10k_pci_sleep_sync(ar); |
2986e3ef MK |
3139 | ath10k_pci_release(ar); |
3140 | ||
e7b54194 | 3141 | err_core_destroy: |
5e3dd157 | 3142 | ath10k_core_destroy(ar); |
5e3dd157 KV |
3143 | |
3144 | return ret; | |
3145 | } | |
3146 | ||
3147 | static void ath10k_pci_remove(struct pci_dev *pdev) | |
3148 | { | |
3149 | struct ath10k *ar = pci_get_drvdata(pdev); | |
3150 | struct ath10k_pci *ar_pci; | |
3151 | ||
7aa7a72a | 3152 | ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); |
5e3dd157 KV |
3153 | |
3154 | if (!ar) | |
3155 | return; | |
3156 | ||
3157 | ar_pci = ath10k_pci_priv(ar); | |
3158 | ||
3159 | if (!ar_pci) | |
3160 | return; | |
3161 | ||
5e3dd157 | 3162 | ath10k_core_unregister(ar); |
5c771e74 | 3163 | ath10k_pci_free_irq(ar); |
21396271 | 3164 | ath10k_pci_kill_tasklet(ar); |
403d627b MK |
3165 | ath10k_pci_deinit_irq(ar); |
3166 | ath10k_pci_ce_deinit(ar); | |
84cbf3a7 | 3167 | ath10k_pci_free_pipes(ar); |
77258d40 | 3168 | ath10k_pci_sleep_sync(ar); |
2986e3ef | 3169 | ath10k_pci_release(ar); |
5e3dd157 | 3170 | ath10k_core_destroy(ar); |
5e3dd157 KV |
3171 | } |
3172 | ||
5e3dd157 KV |
3173 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
3174 | ||
3175 | static struct pci_driver ath10k_pci_driver = { | |
3176 | .name = "ath10k_pci", | |
3177 | .id_table = ath10k_pci_id_table, | |
3178 | .probe = ath10k_pci_probe, | |
3179 | .remove = ath10k_pci_remove, | |
5e3dd157 KV |
3180 | }; |
3181 | ||
3182 | static int __init ath10k_pci_init(void) | |
3183 | { | |
3184 | int ret; | |
3185 | ||
3186 | ret = pci_register_driver(&ath10k_pci_driver); | |
3187 | if (ret) | |
7aa7a72a MK |
3188 | printk(KERN_ERR "failed to register ath10k pci driver: %d\n", |
3189 | ret); | |
5e3dd157 KV |
3190 | |
3191 | return ret; | |
3192 | } | |
3193 | module_init(ath10k_pci_init); | |
3194 | ||
3195 | static void __exit ath10k_pci_exit(void) | |
3196 | { | |
3197 | pci_unregister_driver(&ath10k_pci_driver); | |
3198 | } | |
3199 | ||
3200 | module_exit(ath10k_pci_exit); | |
3201 | ||
3202 | MODULE_AUTHOR("Qualcomm Atheros"); | |
3203 | MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); | |
3204 | MODULE_LICENSE("Dual BSD/GPL"); | |
5c427f5c BM |
3205 | |
3206 | /* QCA988x 2.0 firmware files */ | |
8026cae7 BM |
3207 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); |
3208 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); | |
3209 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); | |
5c427f5c | 3210 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); |
53513c30 | 3211 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
5e3dd157 | 3212 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |
0a51b343 | 3213 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
5c427f5c BM |
3214 | |
3215 | /* QCA6174 2.1 firmware files */ | |
3216 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); | |
e451c1db | 3217 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); |
5c427f5c | 3218 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); |
0a51b343 | 3219 | MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
5c427f5c BM |
3220 | |
3221 | /* QCA6174 3.1 firmware files */ | |
3222 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); | |
e451c1db | 3223 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); |
5c427f5c | 3224 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); |
0a51b343 | 3225 | MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); |
a226b519 BM |
3226 | |
3227 | /* QCA9377 1.0 firmware files */ | |
3228 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); | |
3229 | MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); |