]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/ath/ath10k/pci.c
ath10k: remove deprecated firmware API 1 support
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
35098463
KV
36enum ath10k_pci_reset_mode {
37 ATH10K_PCI_RESET_AUTO = 0,
38 ATH10K_PCI_RESET_WARM_ONLY = 1,
39};
40
cfe9c45b 41static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
35098463 42static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
cfe9c45b 43
cfe9c45b
MK
44module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
45MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
46
35098463
KV
47module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
48MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
49
0399eca8
KV
50/* how long wait to wait for target to initialise, in ms */
51#define ATH10K_PCI_TARGET_WAIT 3000
61c95cea 52#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
0399eca8 53
9baa3c34 54static const struct pci_device_id ath10k_pci_id_table[] = {
5e3dd157 55 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
36582e5d 56 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
d63955b3 57 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
8a055a8a 58 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
a226b519 59 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
5e3dd157
KV
60 {0}
61};
62
7505f7c3
MK
63static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
64 /* QCA988X pre 2.0 chips are not supported because they need some nasty
65 * hacks. ath10k doesn't have them and these devices crash horribly
66 * because of that.
67 */
68 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
36582e5d
MK
69
70 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
71 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
72 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
73 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
74 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
75
d63955b3
MK
76 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
36582e5d 81
8a055a8a 82 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
034074f3
BM
83
84 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
12551ced 85 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
7505f7c3
MK
86};
87
728f95ee 88static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
fc36e3ff 89static int ath10k_pci_cold_reset(struct ath10k *ar);
6e4202c3 90static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
fc15ca13
MK
91static int ath10k_pci_init_irq(struct ath10k *ar);
92static int ath10k_pci_deinit_irq(struct ath10k *ar);
93static int ath10k_pci_request_irq(struct ath10k *ar);
94static void ath10k_pci_free_irq(struct ath10k *ar);
85622cde
MK
95static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
96 struct ath10k_ce_pipe *rx_pipe,
97 struct bmi_xfer *xfer);
6e4202c3 98static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
0e5b2950 99static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
9d9bdbb0 100static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
a70587b3
RM
101static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
102static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
6419fdbb 103static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
afb0bf7f 104static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
5e3dd157 105
2727a743 106static struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
107 /* CE0: host->target HTC control and raw streams */
108 {
109 .flags = CE_ATTR_FLAGS,
110 .src_nentries = 16,
111 .src_sz_max = 256,
112 .dest_nentries = 0,
0e5b2950 113 .send_cb = ath10k_pci_htc_tx_cb,
48e9c225
KV
114 },
115
116 /* CE1: target->host HTT + HTC control */
117 {
118 .flags = CE_ATTR_FLAGS,
119 .src_nentries = 0,
63838640 120 .src_sz_max = 2048,
48e9c225 121 .dest_nentries = 512,
6419fdbb 122 .recv_cb = ath10k_pci_htt_htc_rx_cb,
48e9c225
KV
123 },
124
125 /* CE2: target->host WMI */
126 {
127 .flags = CE_ATTR_FLAGS,
128 .src_nentries = 0,
129 .src_sz_max = 2048,
30abb330 130 .dest_nentries = 128,
9d9bdbb0 131 .recv_cb = ath10k_pci_htc_rx_cb,
48e9c225
KV
132 },
133
134 /* CE3: host->target WMI */
135 {
136 .flags = CE_ATTR_FLAGS,
137 .src_nentries = 32,
138 .src_sz_max = 2048,
139 .dest_nentries = 0,
0e5b2950 140 .send_cb = ath10k_pci_htc_tx_cb,
48e9c225
KV
141 },
142
143 /* CE4: host->target HTT */
144 {
145 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
146 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
147 .src_sz_max = 256,
148 .dest_nentries = 0,
a70587b3 149 .send_cb = ath10k_pci_htt_tx_cb,
48e9c225
KV
150 },
151
a70587b3 152 /* CE5: target->host HTT (HIF->HTT) */
48e9c225
KV
153 {
154 .flags = CE_ATTR_FLAGS,
155 .src_nentries = 0,
a70587b3
RM
156 .src_sz_max = 512,
157 .dest_nentries = 512,
158 .recv_cb = ath10k_pci_htt_rx_cb,
48e9c225
KV
159 },
160
161 /* CE6: target autonomous hif_memcpy */
162 {
163 .flags = CE_ATTR_FLAGS,
164 .src_nentries = 0,
165 .src_sz_max = 0,
166 .dest_nentries = 0,
167 },
168
169 /* CE7: ce_diag, the Diagnostic Window */
170 {
171 .flags = CE_ATTR_FLAGS,
172 .src_nentries = 2,
173 .src_sz_max = DIAG_TRANSFER_LIMIT,
174 .dest_nentries = 2,
175 },
050af069
VT
176
177 /* CE8: target->host pktlog */
178 {
179 .flags = CE_ATTR_FLAGS,
180 .src_nentries = 0,
181 .src_sz_max = 2048,
182 .dest_nentries = 128,
afb0bf7f 183 .recv_cb = ath10k_pci_pktlog_rx_cb,
050af069
VT
184 },
185
186 /* CE9 target autonomous qcache memcpy */
187 {
188 .flags = CE_ATTR_FLAGS,
189 .src_nentries = 0,
190 .src_sz_max = 0,
191 .dest_nentries = 0,
192 },
193
194 /* CE10: target autonomous hif memcpy */
195 {
196 .flags = CE_ATTR_FLAGS,
197 .src_nentries = 0,
198 .src_sz_max = 0,
199 .dest_nentries = 0,
200 },
201
202 /* CE11: target autonomous hif memcpy */
203 {
204 .flags = CE_ATTR_FLAGS,
205 .src_nentries = 0,
206 .src_sz_max = 0,
207 .dest_nentries = 0,
208 },
5e3dd157
KV
209};
210
211/* Target firmware's Copy Engine configuration. */
2727a743 212static struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
213 /* CE0: host->target HTC control and raw streams */
214 {
0fdc14e4
MK
215 .pipenum = __cpu_to_le32(0),
216 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
217 .nentries = __cpu_to_le32(32),
218 .nbytes_max = __cpu_to_le32(256),
219 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
220 .reserved = __cpu_to_le32(0),
d88effba
KV
221 },
222
223 /* CE1: target->host HTT + HTC control */
224 {
0fdc14e4
MK
225 .pipenum = __cpu_to_le32(1),
226 .pipedir = __cpu_to_le32(PIPEDIR_IN),
227 .nentries = __cpu_to_le32(32),
63838640 228 .nbytes_max = __cpu_to_le32(2048),
0fdc14e4
MK
229 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
230 .reserved = __cpu_to_le32(0),
d88effba
KV
231 },
232
233 /* CE2: target->host WMI */
234 {
0fdc14e4
MK
235 .pipenum = __cpu_to_le32(2),
236 .pipedir = __cpu_to_le32(PIPEDIR_IN),
30abb330 237 .nentries = __cpu_to_le32(64),
0fdc14e4
MK
238 .nbytes_max = __cpu_to_le32(2048),
239 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
240 .reserved = __cpu_to_le32(0),
d88effba
KV
241 },
242
243 /* CE3: host->target WMI */
244 {
0fdc14e4
MK
245 .pipenum = __cpu_to_le32(3),
246 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
247 .nentries = __cpu_to_le32(32),
248 .nbytes_max = __cpu_to_le32(2048),
249 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
250 .reserved = __cpu_to_le32(0),
d88effba
KV
251 },
252
253 /* CE4: host->target HTT */
254 {
0fdc14e4
MK
255 .pipenum = __cpu_to_le32(4),
256 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
257 .nentries = __cpu_to_le32(256),
258 .nbytes_max = __cpu_to_le32(256),
259 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
260 .reserved = __cpu_to_le32(0),
d88effba
KV
261 },
262
5e3dd157 263 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba 264
a70587b3 265 /* CE5: target->host HTT (HIF->HTT) */
d88effba 266 {
0fdc14e4 267 .pipenum = __cpu_to_le32(5),
a70587b3 268 .pipedir = __cpu_to_le32(PIPEDIR_IN),
0fdc14e4 269 .nentries = __cpu_to_le32(32),
a70587b3 270 .nbytes_max = __cpu_to_le32(512),
0fdc14e4
MK
271 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
272 .reserved = __cpu_to_le32(0),
d88effba
KV
273 },
274
275 /* CE6: Reserved for target autonomous hif_memcpy */
276 {
0fdc14e4
MK
277 .pipenum = __cpu_to_le32(6),
278 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
279 .nentries = __cpu_to_le32(32),
280 .nbytes_max = __cpu_to_le32(4096),
281 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
282 .reserved = __cpu_to_le32(0),
d88effba
KV
283 },
284
5e3dd157 285 /* CE7 used only by Host */
050af069
VT
286 {
287 .pipenum = __cpu_to_le32(7),
288 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
289 .nentries = __cpu_to_le32(0),
290 .nbytes_max = __cpu_to_le32(0),
291 .flags = __cpu_to_le32(0),
292 .reserved = __cpu_to_le32(0),
293 },
294
295 /* CE8 target->host packtlog */
296 {
297 .pipenum = __cpu_to_le32(8),
298 .pipedir = __cpu_to_le32(PIPEDIR_IN),
299 .nentries = __cpu_to_le32(64),
300 .nbytes_max = __cpu_to_le32(2048),
301 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
302 .reserved = __cpu_to_le32(0),
303 },
304
305 /* CE9 target autonomous qcache memcpy */
306 {
307 .pipenum = __cpu_to_le32(9),
308 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
309 .nentries = __cpu_to_le32(32),
310 .nbytes_max = __cpu_to_le32(2048),
311 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
312 .reserved = __cpu_to_le32(0),
313 },
314
315 /* It not necessary to send target wlan configuration for CE10 & CE11
316 * as these CEs are not actively used in target.
317 */
5e3dd157
KV
318};
319
d7bfb7aa
MK
320/*
321 * Map from service/endpoint to Copy Engine.
322 * This table is derived from the CE_PCI TABLE, above.
323 * It is passed to the Target at startup for use by firmware.
324 */
2727a743 325static struct service_to_pipe target_service_to_ce_map_wlan[] = {
d7bfb7aa 326 {
0fdc14e4
MK
327 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
328 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
329 __cpu_to_le32(3),
d7bfb7aa
MK
330 },
331 {
0fdc14e4
MK
332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
333 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
334 __cpu_to_le32(2),
d7bfb7aa
MK
335 },
336 {
0fdc14e4
MK
337 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
338 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
339 __cpu_to_le32(3),
d7bfb7aa
MK
340 },
341 {
0fdc14e4
MK
342 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
343 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
344 __cpu_to_le32(2),
d7bfb7aa
MK
345 },
346 {
0fdc14e4
MK
347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
348 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
349 __cpu_to_le32(3),
d7bfb7aa
MK
350 },
351 {
0fdc14e4
MK
352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
353 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
354 __cpu_to_le32(2),
d7bfb7aa
MK
355 },
356 {
0fdc14e4
MK
357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
358 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
359 __cpu_to_le32(3),
d7bfb7aa
MK
360 },
361 {
0fdc14e4
MK
362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
363 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
364 __cpu_to_le32(2),
d7bfb7aa
MK
365 },
366 {
0fdc14e4
MK
367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
368 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
369 __cpu_to_le32(3),
d7bfb7aa
MK
370 },
371 {
0fdc14e4
MK
372 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
373 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
374 __cpu_to_le32(2),
d7bfb7aa
MK
375 },
376 {
0fdc14e4
MK
377 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
378 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
379 __cpu_to_le32(0),
d7bfb7aa
MK
380 },
381 {
0fdc14e4
MK
382 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
383 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
384 __cpu_to_le32(1),
d7bfb7aa 385 },
0fdc14e4
MK
386 { /* not used */
387 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
388 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
389 __cpu_to_le32(0),
d7bfb7aa 390 },
0fdc14e4
MK
391 { /* not used */
392 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
393 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
394 __cpu_to_le32(1),
d7bfb7aa
MK
395 },
396 {
0fdc14e4
MK
397 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
398 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
399 __cpu_to_le32(4),
d7bfb7aa
MK
400 },
401 {
0fdc14e4
MK
402 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
403 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
a70587b3 404 __cpu_to_le32(5),
d7bfb7aa
MK
405 },
406
407 /* (Additions here) */
408
0fdc14e4
MK
409 { /* must be last */
410 __cpu_to_le32(0),
411 __cpu_to_le32(0),
412 __cpu_to_le32(0),
d7bfb7aa
MK
413 },
414};
415
77258d40
MK
416static bool ath10k_pci_is_awake(struct ath10k *ar)
417{
418 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
419 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
420 RTC_STATE_ADDRESS);
421
422 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
423}
424
425static void __ath10k_pci_wake(struct ath10k *ar)
426{
427 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
428
429 lockdep_assert_held(&ar_pci->ps_lock);
430
431 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
432 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
433
434 iowrite32(PCIE_SOC_WAKE_V_MASK,
435 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
436 PCIE_SOC_WAKE_ADDRESS);
437}
438
439static void __ath10k_pci_sleep(struct ath10k *ar)
440{
441 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
442
443 lockdep_assert_held(&ar_pci->ps_lock);
444
445 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
446 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
447
448 iowrite32(PCIE_SOC_WAKE_RESET,
449 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
450 PCIE_SOC_WAKE_ADDRESS);
451 ar_pci->ps_awake = false;
452}
453
454static int ath10k_pci_wake_wait(struct ath10k *ar)
455{
456 int tot_delay = 0;
457 int curr_delay = 5;
458
459 while (tot_delay < PCIE_WAKE_TIMEOUT) {
39b91b81
MK
460 if (ath10k_pci_is_awake(ar)) {
461 if (tot_delay > PCIE_WAKE_LATE_US)
462 ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
463 tot_delay / 1000);
77258d40 464 return 0;
39b91b81 465 }
77258d40
MK
466
467 udelay(curr_delay);
468 tot_delay += curr_delay;
469
470 if (curr_delay < 50)
471 curr_delay += 5;
472 }
473
474 return -ETIMEDOUT;
475}
476
1aaf8efb
AK
477static int ath10k_pci_force_wake(struct ath10k *ar)
478{
479 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
480 unsigned long flags;
481 int ret = 0;
482
d9d6a5ae
RM
483 if (ar_pci->pci_ps)
484 return ret;
485
1aaf8efb
AK
486 spin_lock_irqsave(&ar_pci->ps_lock, flags);
487
488 if (!ar_pci->ps_awake) {
489 iowrite32(PCIE_SOC_WAKE_V_MASK,
490 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
491 PCIE_SOC_WAKE_ADDRESS);
492
493 ret = ath10k_pci_wake_wait(ar);
494 if (ret == 0)
495 ar_pci->ps_awake = true;
496 }
497
498 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
499
500 return ret;
501}
502
503static void ath10k_pci_force_sleep(struct ath10k *ar)
504{
505 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
506 unsigned long flags;
507
508 spin_lock_irqsave(&ar_pci->ps_lock, flags);
509
510 iowrite32(PCIE_SOC_WAKE_RESET,
511 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
512 PCIE_SOC_WAKE_ADDRESS);
513 ar_pci->ps_awake = false;
514
515 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
516}
517
77258d40
MK
518static int ath10k_pci_wake(struct ath10k *ar)
519{
520 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
521 unsigned long flags;
522 int ret = 0;
523
1aaf8efb
AK
524 if (ar_pci->pci_ps == 0)
525 return ret;
526
77258d40
MK
527 spin_lock_irqsave(&ar_pci->ps_lock, flags);
528
529 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
530 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
531
532 /* This function can be called very frequently. To avoid excessive
533 * CPU stalls for MMIO reads use a cache var to hold the device state.
534 */
535 if (!ar_pci->ps_awake) {
536 __ath10k_pci_wake(ar);
537
538 ret = ath10k_pci_wake_wait(ar);
539 if (ret == 0)
540 ar_pci->ps_awake = true;
541 }
542
543 if (ret == 0) {
544 ar_pci->ps_wake_refcount++;
545 WARN_ON(ar_pci->ps_wake_refcount == 0);
546 }
547
548 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
549
550 return ret;
551}
552
553static void ath10k_pci_sleep(struct ath10k *ar)
554{
555 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
556 unsigned long flags;
557
1aaf8efb
AK
558 if (ar_pci->pci_ps == 0)
559 return;
560
77258d40
MK
561 spin_lock_irqsave(&ar_pci->ps_lock, flags);
562
563 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
564 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
565
566 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
567 goto skip;
568
569 ar_pci->ps_wake_refcount--;
570
571 mod_timer(&ar_pci->ps_timer, jiffies +
572 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
573
574skip:
575 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
576}
577
578static void ath10k_pci_ps_timer(unsigned long ptr)
579{
580 struct ath10k *ar = (void *)ptr;
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 unsigned long flags;
583
584 spin_lock_irqsave(&ar_pci->ps_lock, flags);
585
586 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
587 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
588
589 if (ar_pci->ps_wake_refcount > 0)
590 goto skip;
591
592 __ath10k_pci_sleep(ar);
593
594skip:
595 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
596}
597
598static void ath10k_pci_sleep_sync(struct ath10k *ar)
599{
600 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
601 unsigned long flags;
602
1aaf8efb
AK
603 if (ar_pci->pci_ps == 0) {
604 ath10k_pci_force_sleep(ar);
605 return;
606 }
607
77258d40
MK
608 del_timer_sync(&ar_pci->ps_timer);
609
610 spin_lock_irqsave(&ar_pci->ps_lock, flags);
611 WARN_ON(ar_pci->ps_wake_refcount > 0);
612 __ath10k_pci_sleep(ar);
613 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
614}
615
4ddb3299 616static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
77258d40
MK
617{
618 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
619 int ret;
620
aeae5b4c
MK
621 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
622 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
623 offset, offset + sizeof(value), ar_pci->mem_len);
624 return;
625 }
626
77258d40
MK
627 ret = ath10k_pci_wake(ar);
628 if (ret) {
629 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
630 value, offset, ret);
631 return;
632 }
633
634 iowrite32(value, ar_pci->mem + offset);
635 ath10k_pci_sleep(ar);
636}
637
4ddb3299 638static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
77258d40
MK
639{
640 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641 u32 val;
642 int ret;
643
aeae5b4c
MK
644 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
645 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
646 offset, offset + sizeof(val), ar_pci->mem_len);
647 return 0;
648 }
649
77258d40
MK
650 ret = ath10k_pci_wake(ar);
651 if (ret) {
652 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
653 offset, ret);
654 return 0xffffffff;
655 }
656
657 val = ioread32(ar_pci->mem + offset);
658 ath10k_pci_sleep(ar);
659
660 return val;
661}
662
4ddb3299
RM
663inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
664{
665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
666
667 ar_pci->bus_ops->write32(ar, offset, value);
668}
669
670inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
671{
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673
674 return ar_pci->bus_ops->read32(ar, offset);
675}
676
77258d40
MK
677u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
678{
679 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
680}
681
682void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
683{
684 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
685}
686
687u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
688{
689 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
690}
691
692void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
693{
694 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
695}
696
f52f5171 697bool ath10k_pci_irq_pending(struct ath10k *ar)
e539887b
MK
698{
699 u32 cause;
700
701 /* Check if the shared legacy irq is for us */
702 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
703 PCIE_INTR_CAUSE_ADDRESS);
704 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
705 return true;
706
707 return false;
708}
709
f52f5171 710void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
2685218b
MK
711{
712 /* IMPORTANT: INTR_CLR register has to be set after
713 * INTR_ENABLE is set to 0, otherwise interrupt can not be
714 * really cleared. */
715 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
716 0);
717 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
718 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
719
720 /* IMPORTANT: this extra read transaction is required to
721 * flush the posted write buffer. */
cfbc06a9
KV
722 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
723 PCIE_INTR_ENABLE_ADDRESS);
2685218b
MK
724}
725
f52f5171 726void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
2685218b
MK
727{
728 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
729 PCIE_INTR_ENABLE_ADDRESS,
730 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
731
732 /* IMPORTANT: this extra read transaction is required to
733 * flush the posted write buffer. */
cfbc06a9
KV
734 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
735 PCIE_INTR_ENABLE_ADDRESS);
2685218b
MK
736}
737
403d627b 738static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
ab977bd0 739{
ab977bd0
MK
740 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
741
cfe9011a 742 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
403d627b 743 return "msi";
d8bb26b9
KV
744
745 return "legacy";
ab977bd0
MK
746}
747
728f95ee 748static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ab977bd0 749{
728f95ee 750 struct ath10k *ar = pipe->hif_ce_state;
ab977bd0 751 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
728f95ee
MK
752 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
753 struct sk_buff *skb;
754 dma_addr_t paddr;
ab977bd0
MK
755 int ret;
756
728f95ee
MK
757 skb = dev_alloc_skb(pipe->buf_sz);
758 if (!skb)
759 return -ENOMEM;
760
761 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
762
763 paddr = dma_map_single(ar->dev, skb->data,
764 skb->len + skb_tailroom(skb),
765 DMA_FROM_DEVICE);
766 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
7aa7a72a 767 ath10k_warn(ar, "failed to dma map pci rx buf\n");
728f95ee
MK
768 dev_kfree_skb_any(skb);
769 return -EIO;
770 }
771
8582bf3b 772 ATH10K_SKB_RXCB(skb)->paddr = paddr;
728f95ee 773
ab4e3db0 774 spin_lock_bh(&ar_pci->ce_lock);
728f95ee 775 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
ab4e3db0 776 spin_unlock_bh(&ar_pci->ce_lock);
ab977bd0 777 if (ret) {
728f95ee
MK
778 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
779 DMA_FROM_DEVICE);
780 dev_kfree_skb_any(skb);
ab977bd0
MK
781 return ret;
782 }
783
784 return 0;
785}
786
ab4e3db0 787static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ab977bd0 788{
728f95ee
MK
789 struct ath10k *ar = pipe->hif_ce_state;
790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
791 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
792 int ret, num;
793
728f95ee
MK
794 if (pipe->buf_sz == 0)
795 return;
796
797 if (!ce_pipe->dest_ring)
798 return;
799
ab4e3db0 800 spin_lock_bh(&ar_pci->ce_lock);
728f95ee 801 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
ab4e3db0 802 spin_unlock_bh(&ar_pci->ce_lock);
128abd09
RM
803
804 while (num >= 0) {
728f95ee
MK
805 ret = __ath10k_pci_rx_post_buf(pipe);
806 if (ret) {
ab4e3db0
RM
807 if (ret == -ENOSPC)
808 break;
7aa7a72a 809 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
728f95ee
MK
810 mod_timer(&ar_pci->rx_post_retry, jiffies +
811 ATH10K_PCI_RX_POST_RETRY_MS);
812 break;
813 }
128abd09 814 num--;
728f95ee
MK
815 }
816}
817
f52f5171 818void ath10k_pci_rx_post(struct ath10k *ar)
728f95ee
MK
819{
820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821 int i;
822
728f95ee 823 for (i = 0; i < CE_COUNT; i++)
ab4e3db0 824 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
728f95ee
MK
825}
826
f52f5171 827void ath10k_pci_rx_replenish_retry(unsigned long ptr)
728f95ee
MK
828{
829 struct ath10k *ar = (void *)ptr;
830
831 ath10k_pci_rx_post(ar);
ab977bd0
MK
832}
833
418ca599
VT
834static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
835{
836 u32 val = 0;
837
838 switch (ar->hw_rev) {
839 case ATH10K_HW_QCA988X:
840 case ATH10K_HW_QCA6174:
a226b519 841 case ATH10K_HW_QCA9377:
418ca599
VT
842 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
843 CORE_CTRL_ADDRESS) &
3c7e256a 844 0x7ff) << 21;
418ca599
VT
845 break;
846 case ATH10K_HW_QCA99X0:
0b523ced 847 case ATH10K_HW_QCA4019:
418ca599
VT
848 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
849 break;
850 }
851
852 val |= 0x100000 | (addr & 0xfffff);
853 return val;
854}
855
5e3dd157
KV
856/*
857 * Diagnostic read/write access is provided for startup/config/debug usage.
858 * Caller must guarantee proper alignment, when applicable, and single user
859 * at any moment.
860 */
861static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
862 int nbytes)
863{
864 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
865 int ret = 0;
24d9ef5e 866 u32 *buf;
5e3dd157 867 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
2aa39115 868 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
869 /* Host buffer address in CE space */
870 u32 ce_data;
871 dma_addr_t ce_data_base = 0;
872 void *data_buf = NULL;
873 int i;
874
eef25405
KV
875 spin_lock_bh(&ar_pci->ce_lock);
876
5e3dd157
KV
877 ce_diag = ar_pci->ce_diag;
878
879 /*
880 * Allocate a temporary bounce buffer to hold caller's data
881 * to be DMA'ed from Target. This guarantees
882 * 1) 4-byte alignment
883 * 2) Buffer in DMA-able space
884 */
885 orig_nbytes = nbytes;
68c03249
MK
886 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
887 orig_nbytes,
888 &ce_data_base,
889 GFP_ATOMIC);
5e3dd157
KV
890
891 if (!data_buf) {
892 ret = -ENOMEM;
893 goto done;
894 }
895 memset(data_buf, 0, orig_nbytes);
896
897 remaining_bytes = orig_nbytes;
898 ce_data = ce_data_base;
899 while (remaining_bytes) {
900 nbytes = min_t(unsigned int, remaining_bytes,
901 DIAG_TRANSFER_LIMIT);
902
24d9ef5e 903 ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
5e3dd157
KV
904 if (ret != 0)
905 goto done;
906
907 /* Request CE to send from Target(!) address to Host buffer */
908 /*
909 * The address supplied by the caller is in the
910 * Target CPU virtual address space.
911 *
912 * In order to use this address with the diagnostic CE,
913 * convert it from Target CPU virtual address space
914 * to CE address space
915 */
418ca599 916 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
5e3dd157 917
eef25405
KV
918 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
919 0);
5e3dd157
KV
920 if (ret)
921 goto done;
922
923 i = 0;
765952e4
RM
924 while (ath10k_ce_completed_send_next_nolock(ce_diag,
925 NULL) != 0) {
5e3dd157
KV
926 mdelay(1);
927 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
928 ret = -EBUSY;
929 goto done;
930 }
931 }
932
5e3dd157 933 i = 0;
24d9ef5e
RM
934 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
935 (void **)&buf,
936 &completed_nbytes)
937 != 0) {
5e3dd157
KV
938 mdelay(1);
939
940 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
941 ret = -EBUSY;
942 goto done;
943 }
944 }
945
946 if (nbytes != completed_nbytes) {
947 ret = -EIO;
948 goto done;
949 }
950
24d9ef5e 951 if (*buf != ce_data) {
5e3dd157
KV
952 ret = -EIO;
953 goto done;
954 }
955
956 remaining_bytes -= nbytes;
957 address += nbytes;
958 ce_data += nbytes;
959 }
960
961done:
0fdc14e4
MK
962 if (ret == 0)
963 memcpy(data, data_buf, orig_nbytes);
964 else
7aa7a72a 965 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
50f87a67 966 address, ret);
5e3dd157
KV
967
968 if (data_buf)
68c03249
MK
969 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
970 ce_data_base);
5e3dd157 971
eef25405
KV
972 spin_unlock_bh(&ar_pci->ce_lock);
973
5e3dd157
KV
974 return ret;
975}
976
3d29a3e0
KV
977static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
978{
0fdc14e4
MK
979 __le32 val = 0;
980 int ret;
981
982 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
983 *value = __le32_to_cpu(val);
984
985 return ret;
3d29a3e0
KV
986}
987
988static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
989 u32 src, u32 len)
990{
991 u32 host_addr, addr;
992 int ret;
993
994 host_addr = host_interest_item_address(src);
995
996 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
997 if (ret != 0) {
7aa7a72a 998 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
3d29a3e0
KV
999 src, ret);
1000 return ret;
1001 }
1002
1003 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1004 if (ret != 0) {
7aa7a72a 1005 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
3d29a3e0
KV
1006 addr, len, ret);
1007 return ret;
1008 }
1009
1010 return 0;
1011}
1012
1013#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
8cc7f26c 1014 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
3d29a3e0 1015
f52f5171
RM
1016int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1017 const void *data, int nbytes)
5e3dd157
KV
1018{
1019 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1020 int ret = 0;
24d9ef5e 1021 u32 *buf;
5e3dd157 1022 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
2aa39115 1023 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
1024 void *data_buf = NULL;
1025 u32 ce_data; /* Host buffer address in CE space */
1026 dma_addr_t ce_data_base = 0;
1027 int i;
1028
eef25405
KV
1029 spin_lock_bh(&ar_pci->ce_lock);
1030
5e3dd157
KV
1031 ce_diag = ar_pci->ce_diag;
1032
1033 /*
1034 * Allocate a temporary bounce buffer to hold caller's data
1035 * to be DMA'ed to Target. This guarantees
1036 * 1) 4-byte alignment
1037 * 2) Buffer in DMA-able space
1038 */
1039 orig_nbytes = nbytes;
68c03249
MK
1040 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1041 orig_nbytes,
1042 &ce_data_base,
1043 GFP_ATOMIC);
5e3dd157
KV
1044 if (!data_buf) {
1045 ret = -ENOMEM;
1046 goto done;
1047 }
1048
1049 /* Copy caller's data to allocated DMA buf */
0fdc14e4 1050 memcpy(data_buf, data, orig_nbytes);
5e3dd157
KV
1051
1052 /*
1053 * The address supplied by the caller is in the
1054 * Target CPU virtual address space.
1055 *
1056 * In order to use this address with the diagnostic CE,
1057 * convert it from
1058 * Target CPU virtual address space
1059 * to
1060 * CE address space
1061 */
418ca599 1062 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
5e3dd157
KV
1063
1064 remaining_bytes = orig_nbytes;
1065 ce_data = ce_data_base;
1066 while (remaining_bytes) {
1067 /* FIXME: check cast */
1068 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1069
1070 /* Set up to receive directly into Target(!) address */
24d9ef5e 1071 ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
5e3dd157
KV
1072 if (ret != 0)
1073 goto done;
1074
1075 /*
1076 * Request CE to send caller-supplied data that
1077 * was copied to bounce buffer to Target(!) address.
1078 */
eef25405
KV
1079 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1080 nbytes, 0, 0);
5e3dd157
KV
1081 if (ret != 0)
1082 goto done;
1083
1084 i = 0;
765952e4
RM
1085 while (ath10k_ce_completed_send_next_nolock(ce_diag,
1086 NULL) != 0) {
5e3dd157
KV
1087 mdelay(1);
1088
1089 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1090 ret = -EBUSY;
1091 goto done;
1092 }
1093 }
1094
5e3dd157 1095 i = 0;
24d9ef5e
RM
1096 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1097 (void **)&buf,
1098 &completed_nbytes)
1099 != 0) {
5e3dd157
KV
1100 mdelay(1);
1101
1102 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1103 ret = -EBUSY;
1104 goto done;
1105 }
1106 }
1107
1108 if (nbytes != completed_nbytes) {
1109 ret = -EIO;
1110 goto done;
1111 }
1112
24d9ef5e 1113 if (*buf != address) {
5e3dd157
KV
1114 ret = -EIO;
1115 goto done;
1116 }
1117
1118 remaining_bytes -= nbytes;
1119 address += nbytes;
1120 ce_data += nbytes;
1121 }
1122
1123done:
1124 if (data_buf) {
68c03249
MK
1125 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1126 ce_data_base);
5e3dd157
KV
1127 }
1128
1129 if (ret != 0)
7aa7a72a 1130 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
50f87a67 1131 address, ret);
5e3dd157 1132
eef25405
KV
1133 spin_unlock_bh(&ar_pci->ce_lock);
1134
5e3dd157
KV
1135 return ret;
1136}
1137
0fdc14e4
MK
1138static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1139{
1140 __le32 val = __cpu_to_le32(value);
1141
1142 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1143}
1144
5e3dd157 1145/* Called by lower (CE) layer when a send to Target completes. */
0e5b2950 1146static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
1147{
1148 struct ath10k *ar = ce_state->ar;
1cb86d47
MK
1149 struct sk_buff_head list;
1150 struct sk_buff *skb;
5e3dd157 1151
1cb86d47 1152 __skb_queue_head_init(&list);
765952e4 1153 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
a16942e6 1154 /* no need to call tx completion for NULL pointers */
1cb86d47 1155 if (skb == NULL)
726346fc
MK
1156 continue;
1157
1cb86d47 1158 __skb_queue_tail(&list, skb);
5440ce25 1159 }
1cb86d47
MK
1160
1161 while ((skb = __skb_dequeue(&list)))
0e5b2950 1162 ath10k_htc_tx_completion_handler(ar, skb);
5e3dd157
KV
1163}
1164
a70587b3
RM
1165static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1166 void (*callback)(struct ath10k *ar,
1167 struct sk_buff *skb))
5e3dd157
KV
1168{
1169 struct ath10k *ar = ce_state->ar;
1170 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1171 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157 1172 struct sk_buff *skb;
1cb86d47 1173 struct sk_buff_head list;
5440ce25 1174 void *transfer_context;
2f5280da 1175 unsigned int nbytes, max_nbytes;
5e3dd157 1176
1cb86d47 1177 __skb_queue_head_init(&list);
5440ce25 1178 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
24d9ef5e 1179 &nbytes) == 0) {
5e3dd157 1180 skb = transfer_context;
2f5280da 1181 max_nbytes = skb->len + skb_tailroom(skb);
8582bf3b 1182 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2f5280da
MK
1183 max_nbytes, DMA_FROM_DEVICE);
1184
1185 if (unlikely(max_nbytes < nbytes)) {
7aa7a72a 1186 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
2f5280da
MK
1187 nbytes, max_nbytes);
1188 dev_kfree_skb_any(skb);
1189 continue;
1190 }
5e3dd157 1191
2f5280da 1192 skb_put(skb, nbytes);
1cb86d47
MK
1193 __skb_queue_tail(&list, skb);
1194 }
a360e54c 1195
1cb86d47 1196 while ((skb = __skb_dequeue(&list))) {
a360e54c
MK
1197 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1198 ce_state->id, skb->len);
1199 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1200 skb->data, skb->len);
1201
a70587b3 1202 callback(ar, skb);
2f5280da 1203 }
c29a380e 1204
728f95ee 1205 ath10k_pci_rx_post_pipe(pipe_info);
5e3dd157
KV
1206}
1207
128abd09
RM
1208static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1209 void (*callback)(struct ath10k *ar,
1210 struct sk_buff *skb))
1211{
1212 struct ath10k *ar = ce_state->ar;
1213 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1214 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1215 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1216 struct sk_buff *skb;
1217 struct sk_buff_head list;
1218 void *transfer_context;
1219 unsigned int nbytes, max_nbytes, nentries;
1220 int orig_len;
1221
1222 /* No need to aquire ce_lock for CE5, since this is the only place CE5
1223 * is processed other than init and deinit. Before releasing CE5
1224 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1225 */
1226 __skb_queue_head_init(&list);
1227 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1228 &nbytes) == 0) {
1229 skb = transfer_context;
1230 max_nbytes = skb->len + skb_tailroom(skb);
1231
1232 if (unlikely(max_nbytes < nbytes)) {
1233 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1234 nbytes, max_nbytes);
1235 continue;
1236 }
1237
1238 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1239 max_nbytes, DMA_FROM_DEVICE);
1240 skb_put(skb, nbytes);
1241 __skb_queue_tail(&list, skb);
1242 }
1243
1244 nentries = skb_queue_len(&list);
1245 while ((skb = __skb_dequeue(&list))) {
1246 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1247 ce_state->id, skb->len);
1248 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1249 skb->data, skb->len);
1250
1251 orig_len = skb->len;
1252 callback(ar, skb);
1253 skb_push(skb, orig_len - skb->len);
1254 skb_reset_tail_pointer(skb);
1255 skb_trim(skb, 0);
1256
1257 /*let device gain the buffer again*/
1258 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1259 skb->len + skb_tailroom(skb),
1260 DMA_FROM_DEVICE);
1261 }
1262 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1263}
1264
a70587b3
RM
1265/* Called by lower (CE) layer when data is received from the Target. */
1266static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1267{
1268 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
6419fdbb
RM
1269}
1270
1271static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1272{
1273 /* CE4 polling needs to be done whenever CE pipe which transports
1274 * HTT Rx (target->host) is processed.
1275 */
1276 ath10k_ce_per_engine_service(ce_state->ar, 4);
1277
1278 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
a70587b3
RM
1279}
1280
afb0bf7f
VN
1281/* Called by lower (CE) layer when data is received from the Target.
1282 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1283 */
1284static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1285{
1286 ath10k_pci_process_rx_cb(ce_state,
1287 ath10k_htt_rx_pktlog_completion_handler);
1288}
1289
a70587b3
RM
1290/* Called by lower (CE) layer when a send to HTT Target completes. */
1291static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1292{
1293 struct ath10k *ar = ce_state->ar;
1294 struct sk_buff *skb;
a70587b3 1295
765952e4 1296 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
a70587b3
RM
1297 /* no need to call tx completion for NULL pointers */
1298 if (!skb)
1299 continue;
1300
1301 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1302 skb->len, DMA_TO_DEVICE);
1303 ath10k_htt_hif_tx_complete(ar, skb);
1304 }
1305}
1306
1307static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1308{
1309 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1310 ath10k_htt_t2h_msg_handler(ar, skb);
1311}
1312
1313/* Called by lower (CE) layer when HTT data is received from the Target. */
1314static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1315{
1316 /* CE4 polling needs to be done whenever CE pipe which transports
1317 * HTT Rx (target->host) is processed.
1318 */
1319 ath10k_ce_per_engine_service(ce_state->ar, 4);
1320
128abd09 1321 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
a70587b3
RM
1322}
1323
f52f5171
RM
1324int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1325 struct ath10k_hif_sg_item *items, int n_items)
5e3dd157 1326{
5e3dd157 1327 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
726346fc
MK
1328 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1329 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1330 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
7147a131
MK
1331 unsigned int nentries_mask;
1332 unsigned int sw_index;
1333 unsigned int write_index;
08b8aa09 1334 int err, i = 0;
5e3dd157 1335
726346fc 1336 spin_lock_bh(&ar_pci->ce_lock);
5e3dd157 1337
7147a131
MK
1338 nentries_mask = src_ring->nentries_mask;
1339 sw_index = src_ring->sw_index;
1340 write_index = src_ring->write_index;
1341
726346fc
MK
1342 if (unlikely(CE_RING_DELTA(nentries_mask,
1343 write_index, sw_index - 1) < n_items)) {
1344 err = -ENOBUFS;
08b8aa09 1345 goto err;
726346fc 1346 }
5e3dd157 1347
726346fc 1348 for (i = 0; i < n_items - 1; i++) {
7aa7a72a 1349 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
1350 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1351 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 1352 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc 1353 items[i].vaddr, items[i].len);
5e3dd157 1354
726346fc
MK
1355 err = ath10k_ce_send_nolock(ce_pipe,
1356 items[i].transfer_context,
1357 items[i].paddr,
1358 items[i].len,
1359 items[i].transfer_id,
1360 CE_SEND_FLAG_GATHER);
1361 if (err)
08b8aa09 1362 goto err;
726346fc
MK
1363 }
1364
1365 /* `i` is equal to `n_items -1` after for() */
1366
7aa7a72a 1367 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
1368 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1369 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 1370 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc
MK
1371 items[i].vaddr, items[i].len);
1372
1373 err = ath10k_ce_send_nolock(ce_pipe,
1374 items[i].transfer_context,
1375 items[i].paddr,
1376 items[i].len,
1377 items[i].transfer_id,
1378 0);
1379 if (err)
08b8aa09
MK
1380 goto err;
1381
1382 spin_unlock_bh(&ar_pci->ce_lock);
1383 return 0;
1384
1385err:
1386 for (; i > 0; i--)
1387 __ath10k_ce_send_revert(ce_pipe);
726346fc 1388
726346fc
MK
1389 spin_unlock_bh(&ar_pci->ce_lock);
1390 return err;
5e3dd157
KV
1391}
1392
f52f5171
RM
1393int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1394 size_t buf_len)
eef25405
KV
1395{
1396 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1397}
1398
f52f5171 1399u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
5e3dd157
KV
1400{
1401 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
50f87a67 1402
7aa7a72a 1403 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
50f87a67 1404
3efcb3b4 1405 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
1406}
1407
384914b2
BG
1408static void ath10k_pci_dump_registers(struct ath10k *ar,
1409 struct ath10k_fw_crash_data *crash_data)
5e3dd157 1410{
0fdc14e4
MK
1411 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1412 int i, ret;
5e3dd157 1413
384914b2 1414 lockdep_assert_held(&ar->data_lock);
5e3dd157 1415
3d29a3e0
KV
1416 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1417 hi_failure_state,
0fdc14e4 1418 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1d2b48d6 1419 if (ret) {
7aa7a72a 1420 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
5e3dd157
KV
1421 return;
1422 }
1423
1424 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1425
7aa7a72a 1426 ath10k_err(ar, "firmware register dump:\n");
5e3dd157 1427 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
7aa7a72a 1428 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5e3dd157 1429 i,
0fdc14e4
MK
1430 __le32_to_cpu(reg_dump_values[i]),
1431 __le32_to_cpu(reg_dump_values[i + 1]),
1432 __le32_to_cpu(reg_dump_values[i + 2]),
1433 __le32_to_cpu(reg_dump_values[i + 3]));
affd3217 1434
1bbb119d
MK
1435 if (!crash_data)
1436 return;
1437
384914b2 1438 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
0fdc14e4 1439 crash_data->registers[i] = reg_dump_values[i];
384914b2
BG
1440}
1441
0e9848c0 1442static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
384914b2
BG
1443{
1444 struct ath10k_fw_crash_data *crash_data;
1445 char uuid[50];
1446
1447 spin_lock_bh(&ar->data_lock);
1448
f51dbe73
BG
1449 ar->stats.fw_crash_counter++;
1450
384914b2
BG
1451 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1452
1453 if (crash_data)
1454 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1455 else
1456 scnprintf(uuid, sizeof(uuid), "n/a");
1457
7aa7a72a 1458 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
8a0c797e 1459 ath10k_print_driver_info(ar);
384914b2
BG
1460 ath10k_pci_dump_registers(ar, crash_data);
1461
384914b2 1462 spin_unlock_bh(&ar->data_lock);
affd3217 1463
5e90de86 1464 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
1465}
1466
f52f5171
RM
1467void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1468 int force)
5e3dd157 1469{
7aa7a72a 1470 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
50f87a67 1471
5e3dd157
KV
1472 if (!force) {
1473 int resources;
1474 /*
1475 * Decide whether to actually poll for completions, or just
1476 * wait for a later chance.
1477 * If there seem to be plenty of resources left, then just wait
1478 * since checking involves reading a CE register, which is a
1479 * relatively expensive operation.
1480 */
1481 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1482
1483 /*
1484 * If at least 50% of the total resources are still available,
1485 * don't bother checking again yet.
1486 */
1487 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1488 return;
1489 }
1490 ath10k_ce_per_engine_service(ar, pipe);
1491}
1492
f52f5171 1493void ath10k_pci_kill_tasklet(struct ath10k *ar)
5e3dd157
KV
1494{
1495 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1496
5e3dd157 1497 tasklet_kill(&ar_pci->intr_tq);
728f95ee
MK
1498
1499 del_timer_sync(&ar_pci->rx_post_retry);
96a9d0dc
MK
1500}
1501
f52f5171
RM
1502int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1503 u8 *ul_pipe, u8 *dl_pipe)
5e3dd157 1504{
7c6aa25d
MK
1505 const struct service_to_pipe *entry;
1506 bool ul_set = false, dl_set = false;
1507 int i;
5e3dd157 1508
7aa7a72a 1509 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
50f87a67 1510
7c6aa25d
MK
1511 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1512 entry = &target_service_to_ce_map_wlan[i];
5e3dd157 1513
0fdc14e4 1514 if (__le32_to_cpu(entry->service_id) != service_id)
7c6aa25d 1515 continue;
5e3dd157 1516
0fdc14e4 1517 switch (__le32_to_cpu(entry->pipedir)) {
7c6aa25d
MK
1518 case PIPEDIR_NONE:
1519 break;
1520 case PIPEDIR_IN:
1521 WARN_ON(dl_set);
0fdc14e4 1522 *dl_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1523 dl_set = true;
1524 break;
1525 case PIPEDIR_OUT:
1526 WARN_ON(ul_set);
0fdc14e4 1527 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1528 ul_set = true;
1529 break;
1530 case PIPEDIR_INOUT:
1531 WARN_ON(dl_set);
1532 WARN_ON(ul_set);
0fdc14e4
MK
1533 *dl_pipe = __le32_to_cpu(entry->pipenum);
1534 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1535 dl_set = true;
1536 ul_set = true;
1537 break;
1538 }
5e3dd157 1539 }
5e3dd157 1540
7c6aa25d
MK
1541 if (WARN_ON(!ul_set || !dl_set))
1542 return -ENOENT;
5e3dd157 1543
7c6aa25d 1544 return 0;
5e3dd157
KV
1545}
1546
f52f5171
RM
1547void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1548 u8 *ul_pipe, u8 *dl_pipe)
5e3dd157 1549{
7aa7a72a 1550 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
50f87a67 1551
5e3dd157
KV
1552 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1553 ATH10K_HTC_SVC_ID_RSVD_CTRL,
400143e4 1554 ul_pipe, dl_pipe);
5e3dd157
KV
1555}
1556
7c0f0e3c 1557static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
5e3dd157 1558{
7c0f0e3c
MK
1559 u32 val;
1560
6e4202c3
VT
1561 switch (ar->hw_rev) {
1562 case ATH10K_HW_QCA988X:
1563 case ATH10K_HW_QCA6174:
a226b519 1564 case ATH10K_HW_QCA9377:
6e4202c3
VT
1565 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1566 CORE_CTRL_ADDRESS);
1567 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1568 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1569 CORE_CTRL_ADDRESS, val);
1570 break;
1571 case ATH10K_HW_QCA99X0:
0b523ced 1572 case ATH10K_HW_QCA4019:
6e4202c3
VT
1573 /* TODO: Find appropriate register configuration for QCA99X0
1574 * to mask irq/MSI.
1575 */
1576 break;
1577 }
7c0f0e3c
MK
1578}
1579
1580static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1581{
1582 u32 val;
1583
6e4202c3
VT
1584 switch (ar->hw_rev) {
1585 case ATH10K_HW_QCA988X:
1586 case ATH10K_HW_QCA6174:
a226b519 1587 case ATH10K_HW_QCA9377:
6e4202c3
VT
1588 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1589 CORE_CTRL_ADDRESS);
1590 val |= CORE_CTRL_PCIE_REG_31_MASK;
1591 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1592 CORE_CTRL_ADDRESS, val);
1593 break;
1594 case ATH10K_HW_QCA99X0:
0b523ced 1595 case ATH10K_HW_QCA4019:
6e4202c3
VT
1596 /* TODO: Find appropriate register configuration for QCA99X0
1597 * to unmask irq/MSI.
1598 */
1599 break;
1600 }
7c0f0e3c 1601}
5e3dd157 1602
7c0f0e3c
MK
1603static void ath10k_pci_irq_disable(struct ath10k *ar)
1604{
ec5ba4d3 1605 ath10k_ce_disable_interrupts(ar);
e75db4e3 1606 ath10k_pci_disable_and_clear_legacy_irq(ar);
7c0f0e3c
MK
1607 ath10k_pci_irq_msi_fw_mask(ar);
1608}
1609
1610static void ath10k_pci_irq_sync(struct ath10k *ar)
1611{
1612 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1613
cfe9011a 1614 synchronize_irq(ar_pci->pdev->irq);
5e3dd157
KV
1615}
1616
ec5ba4d3 1617static void ath10k_pci_irq_enable(struct ath10k *ar)
5e3dd157 1618{
ec5ba4d3 1619 ath10k_ce_enable_interrupts(ar);
e75db4e3 1620 ath10k_pci_enable_legacy_irq(ar);
7c0f0e3c 1621 ath10k_pci_irq_msi_fw_unmask(ar);
5e3dd157
KV
1622}
1623
1624static int ath10k_pci_hif_start(struct ath10k *ar)
1625{
76d870ed 1626 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
9a14969f 1627
7aa7a72a 1628 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
5e3dd157 1629
ec5ba4d3 1630 ath10k_pci_irq_enable(ar);
728f95ee 1631 ath10k_pci_rx_post(ar);
50f87a67 1632
76d870ed
JD
1633 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1634 ar_pci->link_ctl);
1635
5e3dd157
KV
1636 return 0;
1637}
1638
099ac7ce 1639static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
5e3dd157
KV
1640{
1641 struct ath10k *ar;
099ac7ce
MK
1642 struct ath10k_ce_pipe *ce_pipe;
1643 struct ath10k_ce_ring *ce_ring;
1644 struct sk_buff *skb;
1645 int i;
5e3dd157 1646
099ac7ce
MK
1647 ar = pci_pipe->hif_ce_state;
1648 ce_pipe = pci_pipe->ce_hdl;
1649 ce_ring = ce_pipe->dest_ring;
5e3dd157 1650
099ac7ce 1651 if (!ce_ring)
5e3dd157
KV
1652 return;
1653
099ac7ce
MK
1654 if (!pci_pipe->buf_sz)
1655 return;
5e3dd157 1656
099ac7ce
MK
1657 for (i = 0; i < ce_ring->nentries; i++) {
1658 skb = ce_ring->per_transfer_context[i];
1659 if (!skb)
1660 continue;
1661
1662 ce_ring->per_transfer_context[i] = NULL;
1663
8582bf3b 1664 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
099ac7ce 1665 skb->len + skb_tailroom(skb),
5e3dd157 1666 DMA_FROM_DEVICE);
099ac7ce 1667 dev_kfree_skb_any(skb);
5e3dd157
KV
1668 }
1669}
1670
099ac7ce 1671static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
5e3dd157
KV
1672{
1673 struct ath10k *ar;
1674 struct ath10k_pci *ar_pci;
099ac7ce
MK
1675 struct ath10k_ce_pipe *ce_pipe;
1676 struct ath10k_ce_ring *ce_ring;
099ac7ce 1677 struct sk_buff *skb;
099ac7ce 1678 int i;
5e3dd157 1679
099ac7ce
MK
1680 ar = pci_pipe->hif_ce_state;
1681 ar_pci = ath10k_pci_priv(ar);
1682 ce_pipe = pci_pipe->ce_hdl;
1683 ce_ring = ce_pipe->src_ring;
5e3dd157 1684
099ac7ce 1685 if (!ce_ring)
5e3dd157
KV
1686 return;
1687
099ac7ce
MK
1688 if (!pci_pipe->buf_sz)
1689 return;
5e3dd157 1690
099ac7ce
MK
1691 for (i = 0; i < ce_ring->nentries; i++) {
1692 skb = ce_ring->per_transfer_context[i];
1693 if (!skb)
2415fc16 1694 continue;
2415fc16 1695
099ac7ce 1696 ce_ring->per_transfer_context[i] = NULL;
099ac7ce 1697
0e5b2950 1698 ath10k_htc_tx_completion_handler(ar, skb);
5e3dd157
KV
1699 }
1700}
1701
1702/*
1703 * Cleanup residual buffers for device shutdown:
1704 * buffers that were enqueued for receive
1705 * buffers that were to be sent
1706 * Note: Buffers that had completed but which were
1707 * not yet processed are on a completion queue. They
1708 * are handled when the completion thread shuts down.
1709 */
1710static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1711{
1712 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1713 int pipe_num;
1714
fad6ed78 1715 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1716 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1717
1718 pipe_info = &ar_pci->pipe_info[pipe_num];
1719 ath10k_pci_rx_pipe_cleanup(pipe_info);
1720 ath10k_pci_tx_pipe_cleanup(pipe_info);
1721 }
1722}
1723
f52f5171 1724void ath10k_pci_ce_deinit(struct ath10k *ar)
5e3dd157 1725{
25d0dbcb 1726 int i;
5e3dd157 1727
25d0dbcb
MK
1728 for (i = 0; i < CE_COUNT; i++)
1729 ath10k_ce_deinit_pipe(ar, i);
5e3dd157
KV
1730}
1731
f52f5171 1732void ath10k_pci_flush(struct ath10k *ar)
5e3dd157 1733{
5d1aa946 1734 ath10k_pci_kill_tasklet(ar);
728f95ee
MK
1735 ath10k_pci_buffer_cleanup(ar);
1736}
5e3dd157 1737
5e3dd157
KV
1738static void ath10k_pci_hif_stop(struct ath10k *ar)
1739{
77258d40
MK
1740 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1741 unsigned long flags;
1742
7aa7a72a 1743 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
5e3dd157 1744
10d23db4
MK
1745 /* Most likely the device has HTT Rx ring configured. The only way to
1746 * prevent the device from accessing (and possible corrupting) host
1747 * memory is to reset the chip now.
e75db4e3
MK
1748 *
1749 * There's also no known way of masking MSI interrupts on the device.
1750 * For ranged MSI the CE-related interrupts can be masked. However
1751 * regardless how many MSI interrupts are assigned the first one
1752 * is always used for firmware indications (crashes) and cannot be
1753 * masked. To prevent the device from asserting the interrupt reset it
1754 * before proceeding with cleanup.
10d23db4 1755 */
6e4202c3 1756 ath10k_pci_safe_chip_reset(ar);
e75db4e3
MK
1757
1758 ath10k_pci_irq_disable(ar);
7c0f0e3c 1759 ath10k_pci_irq_sync(ar);
e75db4e3 1760 ath10k_pci_flush(ar);
77258d40
MK
1761
1762 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1763 WARN_ON(ar_pci->ps_wake_refcount > 0);
1764 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
5e3dd157
KV
1765}
1766
f52f5171
RM
1767int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1768 void *req, u32 req_len,
1769 void *resp, u32 *resp_len)
5e3dd157
KV
1770{
1771 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
1772 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1773 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1774 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1775 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
1776 dma_addr_t req_paddr = 0;
1777 dma_addr_t resp_paddr = 0;
1778 struct bmi_xfer xfer = {};
1779 void *treq, *tresp = NULL;
1780 int ret = 0;
1781
85622cde
MK
1782 might_sleep();
1783
5e3dd157
KV
1784 if (resp && !resp_len)
1785 return -EINVAL;
1786
1787 if (resp && resp_len && *resp_len == 0)
1788 return -EINVAL;
1789
1790 treq = kmemdup(req, req_len, GFP_KERNEL);
1791 if (!treq)
1792 return -ENOMEM;
1793
1794 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1795 ret = dma_mapping_error(ar->dev, req_paddr);
5e55e3cb
MK
1796 if (ret) {
1797 ret = -EIO;
5e3dd157 1798 goto err_dma;
5e55e3cb 1799 }
5e3dd157
KV
1800
1801 if (resp && resp_len) {
1802 tresp = kzalloc(*resp_len, GFP_KERNEL);
1803 if (!tresp) {
1804 ret = -ENOMEM;
1805 goto err_req;
1806 }
1807
1808 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1809 DMA_FROM_DEVICE);
1810 ret = dma_mapping_error(ar->dev, resp_paddr);
5e55e3cb 1811 if (ret) {
22baa980 1812 ret = -EIO;
5e3dd157 1813 goto err_req;
5e55e3cb 1814 }
5e3dd157
KV
1815
1816 xfer.wait_for_resp = true;
1817 xfer.resp_len = 0;
1818
728f95ee 1819 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
5e3dd157
KV
1820 }
1821
5e3dd157
KV
1822 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1823 if (ret)
1824 goto err_resp;
1825
85622cde
MK
1826 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1827 if (ret) {
5e3dd157
KV
1828 u32 unused_buffer;
1829 unsigned int unused_nbytes;
1830 unsigned int unused_id;
1831
5e3dd157
KV
1832 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1833 &unused_nbytes, &unused_id);
1834 } else {
1835 /* non-zero means we did not time out */
1836 ret = 0;
1837 }
1838
1839err_resp:
1840 if (resp) {
1841 u32 unused_buffer;
1842
1843 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1844 dma_unmap_single(ar->dev, resp_paddr,
1845 *resp_len, DMA_FROM_DEVICE);
1846 }
1847err_req:
1848 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1849
1850 if (ret == 0 && resp_len) {
1851 *resp_len = min(*resp_len, xfer.resp_len);
1852 memcpy(resp, tresp, xfer.resp_len);
1853 }
1854err_dma:
1855 kfree(treq);
1856 kfree(tresp);
1857
1858 return ret;
1859}
1860
5440ce25 1861static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 1862{
5440ce25 1863 struct bmi_xfer *xfer;
5440ce25 1864
765952e4 1865 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
5440ce25 1866 return;
5e3dd157 1867
2374b186 1868 xfer->tx_done = true;
5e3dd157
KV
1869}
1870
5440ce25 1871static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 1872{
7aa7a72a 1873 struct ath10k *ar = ce_state->ar;
5440ce25 1874 struct bmi_xfer *xfer;
5440ce25 1875 unsigned int nbytes;
5440ce25 1876
24d9ef5e
RM
1877 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1878 &nbytes))
5440ce25 1879 return;
5e3dd157 1880
04ed9dfe
MK
1881 if (WARN_ON_ONCE(!xfer))
1882 return;
1883
5e3dd157 1884 if (!xfer->wait_for_resp) {
7aa7a72a 1885 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
5e3dd157
KV
1886 return;
1887 }
1888
1889 xfer->resp_len = nbytes;
2374b186 1890 xfer->rx_done = true;
5e3dd157
KV
1891}
1892
85622cde
MK
1893static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1894 struct ath10k_ce_pipe *rx_pipe,
1895 struct bmi_xfer *xfer)
1896{
1897 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1898
1899 while (time_before_eq(jiffies, timeout)) {
1900 ath10k_pci_bmi_send_done(tx_pipe);
1901 ath10k_pci_bmi_recv_data(rx_pipe);
1902
2374b186 1903 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
85622cde
MK
1904 return 0;
1905
1906 schedule();
1907 }
5e3dd157 1908
85622cde
MK
1909 return -ETIMEDOUT;
1910}
5e3dd157
KV
1911
1912/*
1913 * Send an interrupt to the device to wake up the Target CPU
1914 * so it has an opportunity to notice any changed state.
1915 */
1916static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1917{
9e264945 1918 u32 addr, val;
5e3dd157 1919
9e264945
MK
1920 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1921 val = ath10k_pci_read32(ar, addr);
1922 val |= CORE_CTRL_CPU_INTR_MASK;
1923 ath10k_pci_write32(ar, addr, val);
5e3dd157 1924
1d2b48d6 1925 return 0;
5e3dd157
KV
1926}
1927
d63955b3
MK
1928static int ath10k_pci_get_num_banks(struct ath10k *ar)
1929{
1930 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1931
1932 switch (ar_pci->pdev->device) {
1933 case QCA988X_2_0_DEVICE_ID:
8bd47021 1934 case QCA99X0_2_0_DEVICE_ID:
d63955b3 1935 return 1;
36582e5d 1936 case QCA6164_2_1_DEVICE_ID:
d63955b3
MK
1937 case QCA6174_2_1_DEVICE_ID:
1938 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1939 case QCA6174_HW_1_0_CHIP_ID_REV:
1940 case QCA6174_HW_1_1_CHIP_ID_REV:
11a002ef
MK
1941 case QCA6174_HW_2_1_CHIP_ID_REV:
1942 case QCA6174_HW_2_2_CHIP_ID_REV:
d63955b3
MK
1943 return 3;
1944 case QCA6174_HW_1_3_CHIP_ID_REV:
1945 return 2;
d63955b3
MK
1946 case QCA6174_HW_3_0_CHIP_ID_REV:
1947 case QCA6174_HW_3_1_CHIP_ID_REV:
1948 case QCA6174_HW_3_2_CHIP_ID_REV:
1949 return 9;
1950 }
1951 break;
a226b519
BM
1952 case QCA9377_1_0_DEVICE_ID:
1953 return 2;
d63955b3
MK
1954 }
1955
1956 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1957 return 1;
1958}
1959
4ddb3299
RM
1960static int ath10k_bus_get_num_banks(struct ath10k *ar)
1961{
1962 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1963
1964 return ar_pci->bus_ops->get_num_banks(ar);
1965}
1966
f52f5171 1967int ath10k_pci_init_config(struct ath10k *ar)
5e3dd157
KV
1968{
1969 u32 interconnect_targ_addr;
1970 u32 pcie_state_targ_addr = 0;
1971 u32 pipe_cfg_targ_addr = 0;
1972 u32 svc_to_pipe_map = 0;
1973 u32 pcie_config_flags = 0;
1974 u32 ealloc_value;
1975 u32 ealloc_targ_addr;
1976 u32 flag2_value;
1977 u32 flag2_targ_addr;
1978 int ret = 0;
1979
1980 /* Download to Target the CE Config and the service-to-CE map */
1981 interconnect_targ_addr =
1982 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1983
1984 /* Supply Target-side CE configuration */
9e264945
MK
1985 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1986 &pcie_state_targ_addr);
5e3dd157 1987 if (ret != 0) {
7aa7a72a 1988 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
5e3dd157
KV
1989 return ret;
1990 }
1991
1992 if (pcie_state_targ_addr == 0) {
1993 ret = -EIO;
7aa7a72a 1994 ath10k_err(ar, "Invalid pcie state addr\n");
5e3dd157
KV
1995 return ret;
1996 }
1997
9e264945 1998 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 1999 offsetof(struct pcie_state,
9e264945
MK
2000 pipe_cfg_addr)),
2001 &pipe_cfg_targ_addr);
5e3dd157 2002 if (ret != 0) {
7aa7a72a 2003 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
5e3dd157
KV
2004 return ret;
2005 }
2006
2007 if (pipe_cfg_targ_addr == 0) {
2008 ret = -EIO;
7aa7a72a 2009 ath10k_err(ar, "Invalid pipe cfg addr\n");
5e3dd157
KV
2010 return ret;
2011 }
2012
2013 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
5b07e07f 2014 target_ce_config_wlan,
050af069
VT
2015 sizeof(struct ce_pipe_config) *
2016 NUM_TARGET_CE_CONFIG_WLAN);
5e3dd157
KV
2017
2018 if (ret != 0) {
7aa7a72a 2019 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
5e3dd157
KV
2020 return ret;
2021 }
2022
9e264945 2023 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 2024 offsetof(struct pcie_state,
9e264945
MK
2025 svc_to_pipe_map)),
2026 &svc_to_pipe_map);
5e3dd157 2027 if (ret != 0) {
7aa7a72a 2028 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
5e3dd157
KV
2029 return ret;
2030 }
2031
2032 if (svc_to_pipe_map == 0) {
2033 ret = -EIO;
7aa7a72a 2034 ath10k_err(ar, "Invalid svc_to_pipe map\n");
5e3dd157
KV
2035 return ret;
2036 }
2037
2038 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
5b07e07f
KV
2039 target_service_to_ce_map_wlan,
2040 sizeof(target_service_to_ce_map_wlan));
5e3dd157 2041 if (ret != 0) {
7aa7a72a 2042 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
5e3dd157
KV
2043 return ret;
2044 }
2045
9e264945 2046 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 2047 offsetof(struct pcie_state,
9e264945
MK
2048 config_flags)),
2049 &pcie_config_flags);
5e3dd157 2050 if (ret != 0) {
7aa7a72a 2051 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
5e3dd157
KV
2052 return ret;
2053 }
2054
2055 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2056
9e264945
MK
2057 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2058 offsetof(struct pcie_state,
2059 config_flags)),
2060 pcie_config_flags);
5e3dd157 2061 if (ret != 0) {
7aa7a72a 2062 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
5e3dd157
KV
2063 return ret;
2064 }
2065
2066 /* configure early allocation */
2067 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2068
9e264945 2069 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
5e3dd157 2070 if (ret != 0) {
7aa7a72a 2071 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
5e3dd157
KV
2072 return ret;
2073 }
2074
2075 /* first bank is switched to IRAM */
2076 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2077 HI_EARLY_ALLOC_MAGIC_MASK);
4ddb3299 2078 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
d63955b3 2079 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
5e3dd157
KV
2080 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2081
9e264945 2082 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
5e3dd157 2083 if (ret != 0) {
7aa7a72a 2084 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
5e3dd157
KV
2085 return ret;
2086 }
2087
2088 /* Tell Target to proceed with initialization */
2089 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2090
9e264945 2091 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
5e3dd157 2092 if (ret != 0) {
7aa7a72a 2093 ath10k_err(ar, "Failed to get option val: %d\n", ret);
5e3dd157
KV
2094 return ret;
2095 }
2096
2097 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2098
9e264945 2099 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
5e3dd157 2100 if (ret != 0) {
7aa7a72a 2101 ath10k_err(ar, "Failed to set option val: %d\n", ret);
5e3dd157
KV
2102 return ret;
2103 }
2104
2105 return 0;
2106}
2107
2727a743
RH
2108static void ath10k_pci_override_ce_config(struct ath10k *ar)
2109{
2110 struct ce_attr *attr;
2111 struct ce_pipe_config *config;
2112
2113 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2114 * since it is currently used for other feature.
2115 */
2116
2117 /* Override Host's Copy Engine 5 configuration */
2118 attr = &host_ce_config_wlan[5];
2119 attr->src_sz_max = 0;
2120 attr->dest_nentries = 0;
2121
2122 /* Override Target firmware's Copy Engine configuration */
2123 config = &target_ce_config_wlan[5];
2124 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2125 config->nbytes_max = __cpu_to_le32(2048);
2126
2127 /* Map from service/endpoint to Copy Engine */
2128 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2129}
2130
f52f5171 2131int ath10k_pci_alloc_pipes(struct ath10k *ar)
25d0dbcb 2132{
84cbf3a7
MK
2133 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2134 struct ath10k_pci_pipe *pipe;
25d0dbcb
MK
2135 int i, ret;
2136
2137 for (i = 0; i < CE_COUNT; i++) {
84cbf3a7
MK
2138 pipe = &ar_pci->pipe_info[i];
2139 pipe->ce_hdl = &ar_pci->ce_states[i];
2140 pipe->pipe_num = i;
2141 pipe->hif_ce_state = ar;
2142
9d9bdbb0 2143 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
25d0dbcb 2144 if (ret) {
7aa7a72a 2145 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
25d0dbcb
MK
2146 i, ret);
2147 return ret;
2148 }
84cbf3a7
MK
2149
2150 /* Last CE is Diagnostic Window */
050af069 2151 if (i == CE_DIAG_PIPE) {
84cbf3a7
MK
2152 ar_pci->ce_diag = pipe->ce_hdl;
2153 continue;
2154 }
2155
2156 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
25d0dbcb
MK
2157 }
2158
2159 return 0;
2160}
2161
f52f5171 2162void ath10k_pci_free_pipes(struct ath10k *ar)
25d0dbcb
MK
2163{
2164 int i;
5e3dd157 2165
25d0dbcb
MK
2166 for (i = 0; i < CE_COUNT; i++)
2167 ath10k_ce_free_pipe(ar, i);
2168}
5e3dd157 2169
f52f5171 2170int ath10k_pci_init_pipes(struct ath10k *ar)
5e3dd157 2171{
84cbf3a7 2172 int i, ret;
5e3dd157 2173
84cbf3a7
MK
2174 for (i = 0; i < CE_COUNT; i++) {
2175 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
25d0dbcb 2176 if (ret) {
7aa7a72a 2177 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
84cbf3a7 2178 i, ret);
25d0dbcb 2179 return ret;
5e3dd157 2180 }
5e3dd157
KV
2181 }
2182
5e3dd157
KV
2183 return 0;
2184}
2185
5c771e74 2186static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
5e3dd157 2187{
5c771e74
MK
2188 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2189 FW_IND_EVENT_PENDING;
2190}
5e3dd157 2191
5c771e74
MK
2192static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2193{
2194 u32 val;
5e3dd157 2195
5c771e74
MK
2196 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2197 val &= ~FW_IND_EVENT_PENDING;
2198 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
5e3dd157
KV
2199}
2200
de01357b
MK
2201/* this function effectively clears target memory controller assert line */
2202static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2203{
2204 u32 val;
2205
2206 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2207 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2208 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2209 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2210
2211 msleep(10);
2212
2213 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2214 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2215 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2216 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2217
2218 msleep(10);
2219}
2220
61c1648b 2221static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
fc36e3ff 2222{
fc36e3ff
MK
2223 u32 val;
2224
b39712ce 2225 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
fc36e3ff 2226
fc36e3ff 2227 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
61c1648b
MK
2228 SOC_RESET_CONTROL_ADDRESS);
2229 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2230 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2231}
2232
2233static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2234{
2235 u32 val;
fc36e3ff 2236
fc36e3ff
MK
2237 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2238 SOC_RESET_CONTROL_ADDRESS);
61c1648b 2239
fc36e3ff
MK
2240 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2241 val | SOC_RESET_CONTROL_CE_RST_MASK);
fc36e3ff 2242 msleep(10);
fc36e3ff
MK
2243 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2244 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
61c1648b
MK
2245}
2246
2247static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2248{
2249 u32 val;
2250
fc36e3ff 2251 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
61c1648b
MK
2252 SOC_LF_TIMER_CONTROL0_ADDRESS);
2253 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2254 SOC_LF_TIMER_CONTROL0_ADDRESS,
2255 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2256}
fc36e3ff 2257
61c1648b
MK
2258static int ath10k_pci_warm_reset(struct ath10k *ar)
2259{
2260 int ret;
2261
2262 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
de01357b 2263
61c1648b
MK
2264 spin_lock_bh(&ar->data_lock);
2265 ar->stats.fw_warm_reset_counter++;
2266 spin_unlock_bh(&ar->data_lock);
fc36e3ff 2267
61c1648b 2268 ath10k_pci_irq_disable(ar);
fc36e3ff 2269
61c1648b
MK
2270 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2271 * were to access copy engine while host performs copy engine reset
2272 * then it is possible for the device to confuse pci-e controller to
2273 * the point of bringing host system to a complete stop (i.e. hang).
2274 */
2275 ath10k_pci_warm_reset_si0(ar);
2276 ath10k_pci_warm_reset_cpu(ar);
2277 ath10k_pci_init_pipes(ar);
2278 ath10k_pci_wait_for_target_init(ar);
fc36e3ff 2279
61c1648b
MK
2280 ath10k_pci_warm_reset_clear_lf(ar);
2281 ath10k_pci_warm_reset_ce(ar);
2282 ath10k_pci_warm_reset_cpu(ar);
2283 ath10k_pci_init_pipes(ar);
fc36e3ff 2284
61c1648b
MK
2285 ret = ath10k_pci_wait_for_target_init(ar);
2286 if (ret) {
2287 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2288 return ret;
2289 }
fc36e3ff 2290
7aa7a72a 2291 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
fc36e3ff 2292
c0c378f9 2293 return 0;
fc36e3ff
MK
2294}
2295
6e4202c3
VT
2296static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2297{
2298 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
2299 return ath10k_pci_warm_reset(ar);
2300 } else if (QCA_REV_99X0(ar)) {
2301 ath10k_pci_irq_disable(ar);
2302 return ath10k_pci_qca99x0_chip_reset(ar);
2303 } else {
2304 return -ENOTSUPP;
2305 }
2306}
2307
d63955b3 2308static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
0bc14d06
MK
2309{
2310 int i, ret;
2311 u32 val;
2312
d63955b3 2313 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
0bc14d06
MK
2314
2315 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2316 * It is thus preferred to use warm reset which is safer but may not be
2317 * able to recover the device from all possible fail scenarios.
2318 *
2319 * Warm reset doesn't always work on first try so attempt it a few
2320 * times before giving up.
2321 */
2322 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2323 ret = ath10k_pci_warm_reset(ar);
2324 if (ret) {
2325 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2326 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2327 ret);
2328 continue;
2329 }
2330
2331 /* FIXME: Sometimes copy engine doesn't recover after warm
2332 * reset. In most cases this needs cold reset. In some of these
2333 * cases the device is in such a state that a cold reset may
2334 * lock up the host.
2335 *
2336 * Reading any host interest register via copy engine is
2337 * sufficient to verify if device is capable of booting
2338 * firmware blob.
2339 */
2340 ret = ath10k_pci_init_pipes(ar);
2341 if (ret) {
2342 ath10k_warn(ar, "failed to init copy engine: %d\n",
2343 ret);
2344 continue;
2345 }
2346
2347 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2348 &val);
2349 if (ret) {
2350 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2351 ret);
2352 continue;
2353 }
2354
2355 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2356 return 0;
2357 }
2358
2359 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2360 ath10k_warn(ar, "refusing cold reset as requested\n");
2361 return -EPERM;
2362 }
2363
2364 ret = ath10k_pci_cold_reset(ar);
2365 if (ret) {
2366 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2367 return ret;
2368 }
2369
2370 ret = ath10k_pci_wait_for_target_init(ar);
2371 if (ret) {
2372 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2373 ret);
2374 return ret;
2375 }
2376
d63955b3
MK
2377 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2378
2379 return 0;
2380}
2381
2382static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2383{
2384 int ret;
2385
2386 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2387
2388 /* FIXME: QCA6174 requires cold + warm reset to work. */
2389
2390 ret = ath10k_pci_cold_reset(ar);
2391 if (ret) {
2392 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2393 return ret;
2394 }
2395
2396 ret = ath10k_pci_wait_for_target_init(ar);
2397 if (ret) {
2398 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
617b0f4d 2399 ret);
d63955b3
MK
2400 return ret;
2401 }
2402
2403 ret = ath10k_pci_warm_reset(ar);
2404 if (ret) {
2405 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2406 return ret;
2407 }
2408
2409 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
0bc14d06
MK
2410
2411 return 0;
2412}
2413
6e4202c3
VT
2414static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2415{
2416 int ret;
2417
2418 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2419
2420 ret = ath10k_pci_cold_reset(ar);
2421 if (ret) {
2422 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2423 return ret;
2424 }
2425
2426 ret = ath10k_pci_wait_for_target_init(ar);
2427 if (ret) {
2428 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2429 ret);
2430 return ret;
2431 }
2432
2433 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2434
2435 return 0;
2436}
2437
d63955b3
MK
2438static int ath10k_pci_chip_reset(struct ath10k *ar)
2439{
2440 if (QCA_REV_988X(ar))
2441 return ath10k_pci_qca988x_chip_reset(ar);
2442 else if (QCA_REV_6174(ar))
2443 return ath10k_pci_qca6174_chip_reset(ar);
a226b519
BM
2444 else if (QCA_REV_9377(ar))
2445 return ath10k_pci_qca6174_chip_reset(ar);
6e4202c3
VT
2446 else if (QCA_REV_99X0(ar))
2447 return ath10k_pci_qca99x0_chip_reset(ar);
d63955b3
MK
2448 else
2449 return -ENOTSUPP;
2450}
2451
0bc14d06 2452static int ath10k_pci_hif_power_up(struct ath10k *ar)
8c5c5368 2453{
76d870ed 2454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
8c5c5368
MK
2455 int ret;
2456
0bc14d06
MK
2457 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2458
76d870ed
JD
2459 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2460 &ar_pci->link_ctl);
2461 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2462 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2463
8c5c5368
MK
2464 /*
2465 * Bring the target up cleanly.
2466 *
2467 * The target may be in an undefined state with an AUX-powered Target
2468 * and a Host in WoW mode. If the Host crashes, loses power, or is
2469 * restarted (without unloading the driver) then the Target is left
2470 * (aux) powered and running. On a subsequent driver load, the Target
2471 * is in an unexpected state. We try to catch that here in order to
2472 * reset the Target and retry the probe.
2473 */
0bc14d06 2474 ret = ath10k_pci_chip_reset(ar);
5b2589fc 2475 if (ret) {
a2fa8800
MK
2476 if (ath10k_pci_has_fw_crashed(ar)) {
2477 ath10k_warn(ar, "firmware crashed during chip reset\n");
2478 ath10k_pci_fw_crashed_clear(ar);
2479 ath10k_pci_fw_crashed_dump(ar);
2480 }
2481
0bc14d06 2482 ath10k_err(ar, "failed to reset chip: %d\n", ret);
707b1bbd 2483 goto err_sleep;
5b2589fc 2484 }
8c5c5368 2485
84cbf3a7 2486 ret = ath10k_pci_init_pipes(ar);
1d2b48d6 2487 if (ret) {
7aa7a72a 2488 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
707b1bbd 2489 goto err_sleep;
ab977bd0
MK
2490 }
2491
98563d5a
MK
2492 ret = ath10k_pci_init_config(ar);
2493 if (ret) {
7aa7a72a 2494 ath10k_err(ar, "failed to setup init config: %d\n", ret);
5c771e74 2495 goto err_ce;
98563d5a 2496 }
8c5c5368
MK
2497
2498 ret = ath10k_pci_wake_target_cpu(ar);
2499 if (ret) {
7aa7a72a 2500 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
5c771e74 2501 goto err_ce;
8c5c5368
MK
2502 }
2503
2504 return 0;
2505
2506err_ce:
2507 ath10k_pci_ce_deinit(ar);
61c95cea 2508
707b1bbd 2509err_sleep:
61c95cea
MK
2510 return ret;
2511}
2512
f52f5171 2513void ath10k_pci_hif_power_down(struct ath10k *ar)
8c5c5368 2514{
7aa7a72a 2515 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
50f87a67 2516
c011b281
MK
2517 /* Currently hif_power_up performs effectively a reset and hif_stop
2518 * resets the chip as well so there's no point in resetting here.
2519 */
8c5c5368
MK
2520}
2521
8cd13cad
MK
2522#ifdef CONFIG_PM
2523
8cd13cad
MK
2524static int ath10k_pci_hif_suspend(struct ath10k *ar)
2525{
77258d40
MK
2526 /* The grace timer can still be counting down and ar->ps_awake be true.
2527 * It is known that the device may be asleep after resuming regardless
2528 * of the SoC powersave state before suspending. Hence make sure the
2529 * device is asleep before proceeding.
2530 */
2531 ath10k_pci_sleep_sync(ar);
320e14b8 2532
8cd13cad
MK
2533 return 0;
2534}
2535
2536static int ath10k_pci_hif_resume(struct ath10k *ar)
2537{
2538 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2539 struct pci_dev *pdev = ar_pci->pdev;
2540 u32 val;
1aaf8efb
AK
2541 int ret = 0;
2542
d9d6a5ae
RM
2543 ret = ath10k_pci_force_wake(ar);
2544 if (ret) {
2545 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2546 return ret;
1aaf8efb 2547 }
8cd13cad 2548
9ff4be96
MK
2549 /* Suspend/Resume resets the PCI configuration space, so we have to
2550 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2551 * from interfering with C3 CPU state. pci_restore_state won't help
2552 * here since it only restores the first 64 bytes pci config header.
2553 */
2554 pci_read_config_dword(pdev, 0x40, &val);
2555 if ((val & 0x0000ff00) != 0)
2556 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
8cd13cad 2557
1aaf8efb 2558 return ret;
8cd13cad
MK
2559}
2560#endif
2561
5e3dd157 2562static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
726346fc 2563 .tx_sg = ath10k_pci_hif_tx_sg,
eef25405 2564 .diag_read = ath10k_pci_hif_diag_read,
9f65ad25 2565 .diag_write = ath10k_pci_diag_write_mem,
5e3dd157
KV
2566 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2567 .start = ath10k_pci_hif_start,
2568 .stop = ath10k_pci_hif_stop,
2569 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2570 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2571 .send_complete_check = ath10k_pci_hif_send_complete_check,
5e3dd157 2572 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
2573 .power_up = ath10k_pci_hif_power_up,
2574 .power_down = ath10k_pci_hif_power_down,
077a3804
YL
2575 .read32 = ath10k_pci_read32,
2576 .write32 = ath10k_pci_write32,
8cd13cad
MK
2577#ifdef CONFIG_PM
2578 .suspend = ath10k_pci_hif_suspend,
2579 .resume = ath10k_pci_hif_resume,
2580#endif
5e3dd157
KV
2581};
2582
5e3dd157
KV
2583/*
2584 * Top-level interrupt handler for all PCI interrupts from a Target.
2585 * When a block of MSI interrupts is allocated, this top-level handler
2586 * is not used; instead, we directly call the correct sub-handler.
2587 */
2588static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2589{
2590 struct ath10k *ar = arg;
2591 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1aaf8efb
AK
2592 int ret;
2593
d9d6a5ae
RM
2594 ret = ath10k_pci_force_wake(ar);
2595 if (ret) {
2596 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2597 return IRQ_NONE;
1aaf8efb 2598 }
5e3dd157 2599
cfe9011a 2600 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
e539887b
MK
2601 if (!ath10k_pci_irq_pending(ar))
2602 return IRQ_NONE;
2603
2685218b 2604 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157
KV
2605 }
2606
2607 tasklet_schedule(&ar_pci->intr_tq);
2608
2609 return IRQ_HANDLED;
2610}
2611
5c771e74 2612static void ath10k_pci_tasklet(unsigned long data)
ab977bd0
MK
2613{
2614 struct ath10k *ar = (struct ath10k *)data;
5c771e74 2615 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ab977bd0 2616
5c771e74 2617 if (ath10k_pci_has_fw_crashed(ar)) {
6f3b7ff4 2618 ath10k_pci_irq_disable(ar);
5c771e74 2619 ath10k_pci_fw_crashed_clear(ar);
0e9848c0 2620 ath10k_pci_fw_crashed_dump(ar);
ab977bd0
MK
2621 return;
2622 }
2623
5e3dd157
KV
2624 ath10k_ce_per_engine_service_any(ar);
2625
2685218b 2626 /* Re-enable legacy irq that was disabled in the irq handler */
cfe9011a 2627 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
2685218b 2628 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
2629}
2630
fc15ca13 2631static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
2632{
2633 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2634 int ret;
2635
5e3dd157
KV
2636 ret = request_irq(ar_pci->pdev->irq,
2637 ath10k_pci_interrupt_handler,
2638 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13 2639 if (ret) {
7aa7a72a 2640 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
fc15ca13 2641 ar_pci->pdev->irq, ret);
5e3dd157
KV
2642 return ret;
2643 }
2644
5e3dd157
KV
2645 return 0;
2646}
2647
fc15ca13 2648static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
2649{
2650 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2651 int ret;
2652
2653 ret = request_irq(ar_pci->pdev->irq,
2654 ath10k_pci_interrupt_handler,
2655 IRQF_SHARED, "ath10k_pci", ar);
f3782744 2656 if (ret) {
7aa7a72a 2657 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
fc15ca13 2658 ar_pci->pdev->irq, ret);
5e3dd157 2659 return ret;
f3782744 2660 }
5e3dd157 2661
5e3dd157
KV
2662 return 0;
2663}
2664
fc15ca13
MK
2665static int ath10k_pci_request_irq(struct ath10k *ar)
2666{
2667 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2668
cfe9011a
RM
2669 switch (ar_pci->oper_irq_mode) {
2670 case ATH10K_PCI_IRQ_LEGACY:
fc15ca13 2671 return ath10k_pci_request_irq_legacy(ar);
cfe9011a 2672 case ATH10K_PCI_IRQ_MSI:
fc15ca13 2673 return ath10k_pci_request_irq_msi(ar);
b8402d82 2674 default:
cfe9011a 2675 return -EINVAL;
fc15ca13 2676 }
5e3dd157
KV
2677}
2678
fc15ca13
MK
2679static void ath10k_pci_free_irq(struct ath10k *ar)
2680{
2681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 2682
cfe9011a 2683 free_irq(ar_pci->pdev->irq, ar);
fc15ca13
MK
2684}
2685
f52f5171 2686void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
5e3dd157
KV
2687{
2688 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2689
fc15ca13 2690 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
fc15ca13
MK
2691}
2692
2693static int ath10k_pci_init_irq(struct ath10k *ar)
2694{
2695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2696 int ret;
5e3dd157 2697
fc15ca13 2698 ath10k_pci_init_irq_tasklets(ar);
5e3dd157 2699
403d627b 2700 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
7aa7a72a
MK
2701 ath10k_info(ar, "limiting irq mode to: %d\n",
2702 ath10k_pci_irq_mode);
5e3dd157 2703
fc15ca13 2704 /* Try MSI */
cfe9c45b 2705 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
cfe9011a 2706 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
cfe9c45b 2707 ret = pci_enable_msi(ar_pci->pdev);
5e3dd157 2708 if (ret == 0)
cfe9c45b 2709 return 0;
5e3dd157 2710
cfe9c45b 2711 /* fall-through */
5e3dd157
KV
2712 }
2713
fc15ca13
MK
2714 /* Try legacy irq
2715 *
2716 * A potential race occurs here: The CORE_BASE write
2717 * depends on target correctly decoding AXI address but
2718 * host won't know when target writes BAR to CORE_CTRL.
2719 * This write might get lost if target has NOT written BAR.
2720 * For now, fix the race by repeating the write in below
2721 * synchronization checking. */
cfe9011a 2722 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
5e3dd157 2723
fc15ca13
MK
2724 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2725 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
fc15ca13
MK
2726
2727 return 0;
5e3dd157
KV
2728}
2729
c0c378f9 2730static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
5e3dd157 2731{
fc15ca13
MK
2732 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2733 0);
5e3dd157
KV
2734}
2735
fc15ca13 2736static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
2737{
2738 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2739
cfe9011a
RM
2740 switch (ar_pci->oper_irq_mode) {
2741 case ATH10K_PCI_IRQ_LEGACY:
c0c378f9 2742 ath10k_pci_deinit_irq_legacy(ar);
b8402d82 2743 break;
bb8b621a
AG
2744 default:
2745 pci_disable_msi(ar_pci->pdev);
b8402d82 2746 break;
fc15ca13
MK
2747 }
2748
b8402d82 2749 return 0;
5e3dd157
KV
2750}
2751
f52f5171 2752int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
2753{
2754 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0399eca8 2755 unsigned long timeout;
0399eca8 2756 u32 val;
5e3dd157 2757
7aa7a72a 2758 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
5e3dd157 2759
0399eca8
KV
2760 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2761
2762 do {
2763 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2764
7aa7a72a
MK
2765 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2766 val);
50f87a67 2767
0399eca8
KV
2768 /* target should never return this */
2769 if (val == 0xffffffff)
2770 continue;
2771
7710cd2e
MK
2772 /* the device has crashed so don't bother trying anymore */
2773 if (val & FW_IND_EVENT_PENDING)
2774 break;
2775
0399eca8
KV
2776 if (val & FW_IND_INITIALIZED)
2777 break;
2778
cfe9011a 2779 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
5e3dd157 2780 /* Fix potential race by repeating CORE_BASE writes */
a428249d 2781 ath10k_pci_enable_legacy_irq(ar);
0399eca8 2782
5e3dd157 2783 mdelay(10);
0399eca8 2784 } while (time_before(jiffies, timeout));
5e3dd157 2785
a428249d 2786 ath10k_pci_disable_and_clear_legacy_irq(ar);
7c0f0e3c 2787 ath10k_pci_irq_msi_fw_mask(ar);
a428249d 2788
6a4f6e1d 2789 if (val == 0xffffffff) {
7aa7a72a 2790 ath10k_err(ar, "failed to read device register, device is gone\n");
c0c378f9 2791 return -EIO;
6a4f6e1d
MK
2792 }
2793
7710cd2e 2794 if (val & FW_IND_EVENT_PENDING) {
7aa7a72a 2795 ath10k_warn(ar, "device has crashed during init\n");
c0c378f9 2796 return -ECOMM;
7710cd2e
MK
2797 }
2798
6a4f6e1d 2799 if (!(val & FW_IND_INITIALIZED)) {
7aa7a72a 2800 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
0399eca8 2801 val);
c0c378f9 2802 return -ETIMEDOUT;
5e3dd157
KV
2803 }
2804
7aa7a72a 2805 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
c0c378f9 2806 return 0;
5e3dd157
KV
2807}
2808
fc36e3ff 2809static int ath10k_pci_cold_reset(struct ath10k *ar)
5e3dd157 2810{
5e3dd157
KV
2811 u32 val;
2812
7aa7a72a 2813 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
5e3dd157 2814
f51dbe73
BG
2815 spin_lock_bh(&ar->data_lock);
2816
2817 ar->stats.fw_cold_reset_counter++;
2818
2819 spin_unlock_bh(&ar->data_lock);
2820
5e3dd157 2821 /* Put Target, including PCIe, into RESET. */
e479ed43 2822 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 2823 val |= 1;
e479ed43 2824 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157 2825
acd19580
VT
2826 /* After writing into SOC_GLOBAL_RESET to put device into
2827 * reset and pulling out of reset pcie may not be stable
2828 * for any immediate pcie register access and cause bus error,
2829 * add delay before any pcie access request to fix this issue.
2830 */
2831 msleep(20);
5e3dd157
KV
2832
2833 /* Pull Target, including PCIe, out of RESET. */
2834 val &= ~1;
e479ed43 2835 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157 2836
acd19580 2837 msleep(20);
5e3dd157 2838
7aa7a72a 2839 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
50f87a67 2840
5b2589fc 2841 return 0;
5e3dd157
KV
2842}
2843
2986e3ef 2844static int ath10k_pci_claim(struct ath10k *ar)
5e3dd157 2845{
2986e3ef
MK
2846 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2847 struct pci_dev *pdev = ar_pci->pdev;
2986e3ef 2848 int ret;
5e3dd157
KV
2849
2850 pci_set_drvdata(pdev, ar);
2851
5e3dd157
KV
2852 ret = pci_enable_device(pdev);
2853 if (ret) {
7aa7a72a 2854 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2986e3ef 2855 return ret;
5e3dd157
KV
2856 }
2857
5e3dd157
KV
2858 ret = pci_request_region(pdev, BAR_NUM, "ath");
2859 if (ret) {
7aa7a72a 2860 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2986e3ef 2861 ret);
5e3dd157
KV
2862 goto err_device;
2863 }
2864
2986e3ef 2865 /* Target expects 32 bit DMA. Enforce it. */
5e3dd157
KV
2866 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2867 if (ret) {
7aa7a72a 2868 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
5e3dd157
KV
2869 goto err_region;
2870 }
2871
2872 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2873 if (ret) {
7aa7a72a 2874 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2986e3ef 2875 ret);
5e3dd157
KV
2876 goto err_region;
2877 }
2878
5e3dd157
KV
2879 pci_set_master(pdev);
2880
5e3dd157 2881 /* Arrange for access to Target SoC registers. */
aeae5b4c 2882 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2986e3ef
MK
2883 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2884 if (!ar_pci->mem) {
7aa7a72a 2885 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
5e3dd157
KV
2886 ret = -EIO;
2887 goto err_master;
2888 }
2889
7aa7a72a 2890 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2986e3ef
MK
2891 return 0;
2892
2893err_master:
2894 pci_clear_master(pdev);
2895
2896err_region:
2897 pci_release_region(pdev, BAR_NUM);
2898
2899err_device:
2900 pci_disable_device(pdev);
2901
2902 return ret;
2903}
2904
2905static void ath10k_pci_release(struct ath10k *ar)
2906{
2907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2908 struct pci_dev *pdev = ar_pci->pdev;
2909
2910 pci_iounmap(pdev, ar_pci->mem);
2911 pci_release_region(pdev, BAR_NUM);
2912 pci_clear_master(pdev);
2913 pci_disable_device(pdev);
2914}
2915
7505f7c3
MK
2916static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2917{
2918 const struct ath10k_pci_supp_chip *supp_chip;
2919 int i;
2920 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2921
2922 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2923 supp_chip = &ath10k_pci_supp_chips[i];
2924
2925 if (supp_chip->dev_id == dev_id &&
2926 supp_chip->rev_id == rev_id)
2927 return true;
2928 }
2929
2930 return false;
2931}
2932
90188f80
RM
2933int ath10k_pci_setup_resource(struct ath10k *ar)
2934{
2935 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2936 int ret;
2937
2938 spin_lock_init(&ar_pci->ce_lock);
2939 spin_lock_init(&ar_pci->ps_lock);
2940
2941 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2942 (unsigned long)ar);
2943
2944 if (QCA_REV_6174(ar))
2945 ath10k_pci_override_ce_config(ar);
2946
2947 ret = ath10k_pci_alloc_pipes(ar);
2948 if (ret) {
2949 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2950 ret);
2951 return ret;
2952 }
2953
2954 return 0;
2955}
2956
2957void ath10k_pci_release_resource(struct ath10k *ar)
2958{
2959 ath10k_pci_kill_tasklet(ar);
2960 ath10k_pci_ce_deinit(ar);
2961 ath10k_pci_free_pipes(ar);
2962}
2963
4ddb3299
RM
2964static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
2965 .read32 = ath10k_bus_pci_read32,
2966 .write32 = ath10k_bus_pci_write32,
2967 .get_num_banks = ath10k_pci_get_num_banks,
2968};
2969
2986e3ef
MK
2970static int ath10k_pci_probe(struct pci_dev *pdev,
2971 const struct pci_device_id *pci_dev)
2972{
2973 int ret = 0;
2974 struct ath10k *ar;
2975 struct ath10k_pci *ar_pci;
d63955b3 2976 enum ath10k_hw_rev hw_rev;
2986e3ef 2977 u32 chip_id;
1aaf8efb 2978 bool pci_ps;
2986e3ef 2979
d63955b3
MK
2980 switch (pci_dev->device) {
2981 case QCA988X_2_0_DEVICE_ID:
2982 hw_rev = ATH10K_HW_QCA988X;
1aaf8efb 2983 pci_ps = false;
d63955b3 2984 break;
36582e5d 2985 case QCA6164_2_1_DEVICE_ID:
d63955b3
MK
2986 case QCA6174_2_1_DEVICE_ID:
2987 hw_rev = ATH10K_HW_QCA6174;
1aaf8efb 2988 pci_ps = true;
d63955b3 2989 break;
8bd47021
VT
2990 case QCA99X0_2_0_DEVICE_ID:
2991 hw_rev = ATH10K_HW_QCA99X0;
1aaf8efb 2992 pci_ps = false;
8bd47021 2993 break;
a226b519
BM
2994 case QCA9377_1_0_DEVICE_ID:
2995 hw_rev = ATH10K_HW_QCA9377;
2996 pci_ps = true;
2997 break;
d63955b3
MK
2998 default:
2999 WARN_ON(1);
3000 return -ENOTSUPP;
3001 }
3002
3003 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3004 hw_rev, &ath10k_pci_hif_ops);
2986e3ef 3005 if (!ar) {
7aa7a72a 3006 dev_err(&pdev->dev, "failed to allocate core\n");
2986e3ef
MK
3007 return -ENOMEM;
3008 }
3009
0a51b343
MP
3010 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3011 pdev->vendor, pdev->device,
3012 pdev->subsystem_vendor, pdev->subsystem_device);
7aa7a72a 3013
2986e3ef
MK
3014 ar_pci = ath10k_pci_priv(ar);
3015 ar_pci->pdev = pdev;
3016 ar_pci->dev = &pdev->dev;
3017 ar_pci->ar = ar;
36582e5d 3018 ar->dev_id = pci_dev->device;
1aaf8efb 3019 ar_pci->pci_ps = pci_ps;
4ddb3299 3020 ar_pci->bus_ops = &ath10k_pci_bus_ops;
5e3dd157 3021
0a51b343
MP
3022 ar->id.vendor = pdev->vendor;
3023 ar->id.device = pdev->device;
3024 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3025 ar->id.subsystem_device = pdev->subsystem_device;
de57e2c8 3026
77258d40
MK
3027 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3028 (unsigned long)ar);
5e3dd157 3029
90188f80 3030 ret = ath10k_pci_setup_resource(ar);
e01ae68c 3031 if (ret) {
90188f80 3032 ath10k_err(ar, "failed to setup resource: %d\n", ret);
2986e3ef 3033 goto err_core_destroy;
e01ae68c
KV
3034 }
3035
90188f80 3036 ret = ath10k_pci_claim(ar);
25d0dbcb 3037 if (ret) {
90188f80
RM
3038 ath10k_err(ar, "failed to claim device: %d\n", ret);
3039 goto err_free_pipes;
25d0dbcb
MK
3040 }
3041
d9d6a5ae
RM
3042 ret = ath10k_pci_force_wake(ar);
3043 if (ret) {
3044 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
90188f80 3045 goto err_sleep;
1aaf8efb
AK
3046 }
3047
aa538aca
RM
3048 ath10k_pci_ce_deinit(ar);
3049 ath10k_pci_irq_disable(ar);
3050
403d627b 3051 ret = ath10k_pci_init_irq(ar);
5e3dd157 3052 if (ret) {
7aa7a72a 3053 ath10k_err(ar, "failed to init irqs: %d\n", ret);
90188f80 3054 goto err_sleep;
5e3dd157
KV
3055 }
3056
cfe9011a
RM
3057 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3058 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
403d627b
MK
3059 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3060
5c771e74
MK
3061 ret = ath10k_pci_request_irq(ar);
3062 if (ret) {
7aa7a72a 3063 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
5c771e74
MK
3064 goto err_deinit_irq;
3065 }
3066
1a7fecb7
MK
3067 ret = ath10k_pci_chip_reset(ar);
3068 if (ret) {
3069 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3070 goto err_free_irq;
3071 }
3072
3073 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3074 if (chip_id == 0xffffffff) {
3075 ath10k_err(ar, "failed to get chip id\n");
3076 goto err_free_irq;
3077 }
3078
3079 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3080 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3081 pdev->device, chip_id);
d9585a92 3082 goto err_free_irq;
1a7fecb7
MK
3083 }
3084
e01ae68c 3085 ret = ath10k_core_register(ar, chip_id);
5e3dd157 3086 if (ret) {
7aa7a72a 3087 ath10k_err(ar, "failed to register driver core: %d\n", ret);
5c771e74 3088 goto err_free_irq;
5e3dd157
KV
3089 }
3090
3091 return 0;
3092
5c771e74
MK
3093err_free_irq:
3094 ath10k_pci_free_irq(ar);
21396271 3095 ath10k_pci_kill_tasklet(ar);
5c771e74 3096
403d627b
MK
3097err_deinit_irq:
3098 ath10k_pci_deinit_irq(ar);
3099
c0c378f9 3100err_sleep:
0bcbbe67 3101 ath10k_pci_sleep_sync(ar);
2986e3ef
MK
3102 ath10k_pci_release(ar);
3103
90188f80
RM
3104err_free_pipes:
3105 ath10k_pci_free_pipes(ar);
3106
e7b54194 3107err_core_destroy:
5e3dd157 3108 ath10k_core_destroy(ar);
5e3dd157
KV
3109
3110 return ret;
3111}
3112
3113static void ath10k_pci_remove(struct pci_dev *pdev)
3114{
3115 struct ath10k *ar = pci_get_drvdata(pdev);
3116 struct ath10k_pci *ar_pci;
3117
7aa7a72a 3118 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
5e3dd157
KV
3119
3120 if (!ar)
3121 return;
3122
3123 ar_pci = ath10k_pci_priv(ar);
3124
3125 if (!ar_pci)
3126 return;
3127
5e3dd157 3128 ath10k_core_unregister(ar);
5c771e74 3129 ath10k_pci_free_irq(ar);
403d627b 3130 ath10k_pci_deinit_irq(ar);
90188f80 3131 ath10k_pci_release_resource(ar);
77258d40 3132 ath10k_pci_sleep_sync(ar);
2986e3ef 3133 ath10k_pci_release(ar);
5e3dd157 3134 ath10k_core_destroy(ar);
5e3dd157
KV
3135}
3136
5e3dd157
KV
3137MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3138
3139static struct pci_driver ath10k_pci_driver = {
3140 .name = "ath10k_pci",
3141 .id_table = ath10k_pci_id_table,
3142 .probe = ath10k_pci_probe,
3143 .remove = ath10k_pci_remove,
5e3dd157
KV
3144};
3145
3146static int __init ath10k_pci_init(void)
3147{
3148 int ret;
3149
3150 ret = pci_register_driver(&ath10k_pci_driver);
3151 if (ret)
7aa7a72a
MK
3152 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3153 ret);
5e3dd157 3154
0b523ced
RM
3155 ret = ath10k_ahb_init();
3156 if (ret)
3157 printk(KERN_ERR "ahb init failed: %d\n", ret);
3158
5e3dd157
KV
3159 return ret;
3160}
3161module_init(ath10k_pci_init);
3162
3163static void __exit ath10k_pci_exit(void)
3164{
3165 pci_unregister_driver(&ath10k_pci_driver);
0b523ced 3166 ath10k_ahb_exit();
5e3dd157
KV
3167}
3168
3169module_exit(ath10k_pci_exit);
3170
3171MODULE_AUTHOR("Qualcomm Atheros");
3172MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
3173MODULE_LICENSE("Dual BSD/GPL");
5c427f5c
BM
3174
3175/* QCA988x 2.0 firmware files */
8026cae7
BM
3176MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3177MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
5c427f5c 3178MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
53513c30 3179MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
5e3dd157 3180MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
0a51b343 3181MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
5c427f5c
BM
3182
3183/* QCA6174 2.1 firmware files */
3184MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
e451c1db 3185MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
5c427f5c 3186MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
0a51b343 3187MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
5c427f5c
BM
3188
3189/* QCA6174 3.1 firmware files */
3190MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
e451c1db 3191MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
5c427f5c 3192MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
0a51b343 3193MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
a226b519
BM
3194
3195/* QCA9377 1.0 firmware files */
3196MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3197MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);