]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
Merge tag 'for-linus-20190118' of git://git.kernel.dk/linux-block
[mirror_ubuntu-disco-kernel.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / pcie.c
1 /* Copyright (c) 2014 Broadcom Corporation
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 #include <linux/pci.h>
20 #include <linux/vmalloc.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/bcma/bcma.h>
24 #include <linux/sched.h>
25 #include <asm/unaligned.h>
26
27 #include <soc.h>
28 #include <chipcommon.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <brcm_hw_ids.h>
32
33 #include "debug.h"
34 #include "bus.h"
35 #include "commonring.h"
36 #include "msgbuf.h"
37 #include "pcie.h"
38 #include "firmware.h"
39 #include "chip.h"
40 #include "core.h"
41 #include "common.h"
42
43
44 enum brcmf_pcie_state {
45 BRCMFMAC_PCIE_STATE_DOWN,
46 BRCMFMAC_PCIE_STATE_UP
47 };
48
49 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
50 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
51 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
52 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
53 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
54 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
55 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
56 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
57 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
58 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
59 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
60 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
61
62 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
63 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
64 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
67 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
68 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
69 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
70 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
73 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
74 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
75 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
76 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
77 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
78 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
79 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
80 };
81
82 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
83
84 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
85
86 /* backplane addres space accessed by BAR0 */
87 #define BRCMF_PCIE_BAR0_WINDOW 0x80
88 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
89 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
90
91 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
92 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
93
94 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
95 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
96
97 #define BRCMF_PCIE_REG_INTSTATUS 0x90
98 #define BRCMF_PCIE_REG_INTMASK 0x94
99 #define BRCMF_PCIE_REG_SBMBX 0x98
100
101 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
102
103 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
104 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
105 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
106 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
107 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
108 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
109 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
110
111 #define BRCMF_PCIE2_INTA 0x01
112 #define BRCMF_PCIE2_INTB 0x02
113
114 #define BRCMF_PCIE_INT_0 0x01
115 #define BRCMF_PCIE_INT_1 0x02
116 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
117 BRCMF_PCIE_INT_1)
118
119 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
120 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
121 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
122 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
123 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
124 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
125 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
126 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
127 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
128 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
129
130 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
131 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
132 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
133 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
134 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
135 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
136 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
137 BRCMF_PCIE_MB_INT_D2H3_DB1)
138
139 #define BRCMF_PCIE_SHARED_VERSION_7 7
140 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
141 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
142 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
143 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
144 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
145 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
146
147 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
148 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
149
150 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
151 #define BRCMF_SHARED_RING_BASE_OFFSET 52
152 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
153 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
154 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
155 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
156 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
157 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
158 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
159 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
160 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
161
162 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
163 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
164 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
165 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
166
167 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
168 #define BRCMF_RING_MAX_ITEM_OFFSET 4
169 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
170 #define BRCMF_RING_MEM_SZ 16
171 #define BRCMF_RING_STATE_SZ 8
172
173 #define BRCMF_DEF_MAX_RXBUFPOST 255
174
175 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
176 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
177 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
178
179 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
180 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
181
182 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
183 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
184 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
185 #define BRCMF_D2H_DEV_FWHALT 0x10000000
186
187 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
188 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
189 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
190 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
191
192 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
193
194 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
195 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
196 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
197 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
198 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
199 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
200 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
201 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
202 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
203 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
204 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
205 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
206 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
207
208 /* Magic number at a magic location to find RAM size */
209 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
210 #define BRCMF_RAMSIZE_OFFSET 0x6c
211
212
213 struct brcmf_pcie_console {
214 u32 base_addr;
215 u32 buf_addr;
216 u32 bufsize;
217 u32 read_idx;
218 u8 log_str[256];
219 u8 log_idx;
220 };
221
222 struct brcmf_pcie_shared_info {
223 u32 tcm_base_address;
224 u32 flags;
225 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
226 struct brcmf_pcie_ringbuf *flowrings;
227 u16 max_rxbufpost;
228 u16 max_flowrings;
229 u16 max_submissionrings;
230 u16 max_completionrings;
231 u32 rx_dataoffset;
232 u32 htod_mb_data_addr;
233 u32 dtoh_mb_data_addr;
234 u32 ring_info_addr;
235 struct brcmf_pcie_console console;
236 void *scratch;
237 dma_addr_t scratch_dmahandle;
238 void *ringupd;
239 dma_addr_t ringupd_dmahandle;
240 u8 version;
241 };
242
243 struct brcmf_pcie_core_info {
244 u32 base;
245 u32 wrapbase;
246 };
247
248 struct brcmf_pciedev_info {
249 enum brcmf_pcie_state state;
250 bool in_irq;
251 struct pci_dev *pdev;
252 char fw_name[BRCMF_FW_NAME_LEN];
253 char nvram_name[BRCMF_FW_NAME_LEN];
254 void __iomem *regs;
255 void __iomem *tcm;
256 u32 ram_base;
257 u32 ram_size;
258 struct brcmf_chip *ci;
259 u32 coreid;
260 struct brcmf_pcie_shared_info shared;
261 wait_queue_head_t mbdata_resp_wait;
262 bool mbdata_completed;
263 bool irq_allocated;
264 bool wowl_enabled;
265 u8 dma_idx_sz;
266 void *idxbuf;
267 u32 idxbuf_sz;
268 dma_addr_t idxbuf_dmahandle;
269 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
270 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
271 u16 value);
272 struct brcmf_mp_device *settings;
273 };
274
275 struct brcmf_pcie_ringbuf {
276 struct brcmf_commonring commonring;
277 dma_addr_t dma_handle;
278 u32 w_idx_addr;
279 u32 r_idx_addr;
280 struct brcmf_pciedev_info *devinfo;
281 u8 id;
282 };
283
284 /**
285 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
286 *
287 * @ringmem: dongle memory pointer to ring memory location
288 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
289 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
290 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
291 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
292 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
293 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
294 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
295 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
296 * @max_flowrings: maximum number of tx flow rings supported.
297 * @max_submissionrings: maximum number of submission rings(h2d) supported.
298 * @max_completionrings: maximum number of completion rings(d2h) supported.
299 */
300 struct brcmf_pcie_dhi_ringinfo {
301 __le32 ringmem;
302 __le32 h2d_w_idx_ptr;
303 __le32 h2d_r_idx_ptr;
304 __le32 d2h_w_idx_ptr;
305 __le32 d2h_r_idx_ptr;
306 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
307 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
308 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
309 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
310 __le16 max_flowrings;
311 __le16 max_submissionrings;
312 __le16 max_completionrings;
313 };
314
315 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
316 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
317 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
318 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
319 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
320 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
321 };
322
323 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
324 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
325 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
326 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
327 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
328 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
329 };
330
331 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
332 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
333 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
334 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
335 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
336 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
337 };
338
339
340 static u32
341 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
342 {
343 void __iomem *address = devinfo->regs + reg_offset;
344
345 return (ioread32(address));
346 }
347
348
349 static void
350 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
351 u32 value)
352 {
353 void __iomem *address = devinfo->regs + reg_offset;
354
355 iowrite32(value, address);
356 }
357
358
359 static u8
360 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
361 {
362 void __iomem *address = devinfo->tcm + mem_offset;
363
364 return (ioread8(address));
365 }
366
367
368 static u16
369 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
370 {
371 void __iomem *address = devinfo->tcm + mem_offset;
372
373 return (ioread16(address));
374 }
375
376
377 static void
378 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
379 u16 value)
380 {
381 void __iomem *address = devinfo->tcm + mem_offset;
382
383 iowrite16(value, address);
384 }
385
386
387 static u16
388 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
389 {
390 u16 *address = devinfo->idxbuf + mem_offset;
391
392 return (*(address));
393 }
394
395
396 static void
397 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
398 u16 value)
399 {
400 u16 *address = devinfo->idxbuf + mem_offset;
401
402 *(address) = value;
403 }
404
405
406 static u32
407 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
408 {
409 void __iomem *address = devinfo->tcm + mem_offset;
410
411 return (ioread32(address));
412 }
413
414
415 static void
416 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
417 u32 value)
418 {
419 void __iomem *address = devinfo->tcm + mem_offset;
420
421 iowrite32(value, address);
422 }
423
424
425 static u32
426 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
427 {
428 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
429
430 return (ioread32(addr));
431 }
432
433
434 static void
435 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
436 u32 value)
437 {
438 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
439
440 iowrite32(value, addr);
441 }
442
443
444 static void
445 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
446 void *srcaddr, u32 len)
447 {
448 void __iomem *address = devinfo->tcm + mem_offset;
449 __le32 *src32;
450 __le16 *src16;
451 u8 *src8;
452
453 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
454 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
455 src8 = (u8 *)srcaddr;
456 while (len) {
457 iowrite8(*src8, address);
458 address++;
459 src8++;
460 len--;
461 }
462 } else {
463 len = len / 2;
464 src16 = (__le16 *)srcaddr;
465 while (len) {
466 iowrite16(le16_to_cpu(*src16), address);
467 address += 2;
468 src16++;
469 len--;
470 }
471 }
472 } else {
473 len = len / 4;
474 src32 = (__le32 *)srcaddr;
475 while (len) {
476 iowrite32(le32_to_cpu(*src32), address);
477 address += 4;
478 src32++;
479 len--;
480 }
481 }
482 }
483
484
485 static void
486 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
487 void *dstaddr, u32 len)
488 {
489 void __iomem *address = devinfo->tcm + mem_offset;
490 __le32 *dst32;
491 __le16 *dst16;
492 u8 *dst8;
493
494 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
495 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
496 dst8 = (u8 *)dstaddr;
497 while (len) {
498 *dst8 = ioread8(address);
499 address++;
500 dst8++;
501 len--;
502 }
503 } else {
504 len = len / 2;
505 dst16 = (__le16 *)dstaddr;
506 while (len) {
507 *dst16 = cpu_to_le16(ioread16(address));
508 address += 2;
509 dst16++;
510 len--;
511 }
512 }
513 } else {
514 len = len / 4;
515 dst32 = (__le32 *)dstaddr;
516 while (len) {
517 *dst32 = cpu_to_le32(ioread32(address));
518 address += 4;
519 dst32++;
520 len--;
521 }
522 }
523 }
524
525
526 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
527 CHIPCREGOFFS(reg), value)
528
529
530 static void
531 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
532 {
533 const struct pci_dev *pdev = devinfo->pdev;
534 struct brcmf_core *core;
535 u32 bar0_win;
536
537 core = brcmf_chip_get_core(devinfo->ci, coreid);
538 if (core) {
539 bar0_win = core->base;
540 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
541 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
542 &bar0_win) == 0) {
543 if (bar0_win != core->base) {
544 bar0_win = core->base;
545 pci_write_config_dword(pdev,
546 BRCMF_PCIE_BAR0_WINDOW,
547 bar0_win);
548 }
549 }
550 } else {
551 brcmf_err("Unsupported core selected %x\n", coreid);
552 }
553 }
554
555
556 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
557 {
558 struct brcmf_core *core;
559 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
560 BRCMF_PCIE_CFGREG_PM_CSR,
561 BRCMF_PCIE_CFGREG_MSI_CAP,
562 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
563 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
564 BRCMF_PCIE_CFGREG_MSI_DATA,
565 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
566 BRCMF_PCIE_CFGREG_RBAR_CTRL,
567 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
568 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
569 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
570 u32 i;
571 u32 val;
572 u32 lsc;
573
574 if (!devinfo->ci)
575 return;
576
577 /* Disable ASPM */
578 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
579 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
580 &lsc);
581 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
582 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
583 val);
584
585 /* Watchdog reset */
586 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
587 WRITECC32(devinfo, watchdog, 4);
588 msleep(100);
589
590 /* Restore ASPM */
591 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
592 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
593 lsc);
594
595 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
596 if (core->rev <= 13) {
597 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
598 brcmf_pcie_write_reg32(devinfo,
599 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
600 cfg_offset[i]);
601 val = brcmf_pcie_read_reg32(devinfo,
602 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
603 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
604 cfg_offset[i], val);
605 brcmf_pcie_write_reg32(devinfo,
606 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
607 val);
608 }
609 }
610 }
611
612
613 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
614 {
615 u32 config;
616
617 /* BAR1 window may not be sized properly */
618 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
619 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
620 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
621 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
622
623 device_wakeup_enable(&devinfo->pdev->dev);
624 }
625
626
627 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
628 {
629 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
630 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
631 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
632 5);
633 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
634 0);
635 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
636 7);
637 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
638 0);
639 }
640 return 0;
641 }
642
643
644 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
645 u32 resetintr)
646 {
647 struct brcmf_core *core;
648
649 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
650 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
651 brcmf_chip_resetcore(core, 0, 0, 0);
652 }
653
654 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
655 return -EINVAL;
656 return 0;
657 }
658
659
660 static int
661 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
662 {
663 struct brcmf_pcie_shared_info *shared;
664 u32 addr;
665 u32 cur_htod_mb_data;
666 u32 i;
667
668 shared = &devinfo->shared;
669 addr = shared->htod_mb_data_addr;
670 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
671
672 if (cur_htod_mb_data != 0)
673 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
674 cur_htod_mb_data);
675
676 i = 0;
677 while (cur_htod_mb_data != 0) {
678 msleep(10);
679 i++;
680 if (i > 100)
681 return -EIO;
682 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
683 }
684
685 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
686 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
687 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
688
689 return 0;
690 }
691
692
693 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
694 {
695 struct brcmf_pcie_shared_info *shared;
696 u32 addr;
697 u32 dtoh_mb_data;
698
699 shared = &devinfo->shared;
700 addr = shared->dtoh_mb_data_addr;
701 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
702
703 if (!dtoh_mb_data)
704 return;
705
706 brcmf_pcie_write_tcm32(devinfo, addr, 0);
707
708 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
709 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
710 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
711 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
712 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
713 }
714 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
715 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
716 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
717 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
718 devinfo->mbdata_completed = true;
719 wake_up(&devinfo->mbdata_resp_wait);
720 }
721 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
722 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
723 brcmf_dev_coredump(&devinfo->pdev->dev);
724 }
725 }
726
727
728 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
729 {
730 struct brcmf_pcie_shared_info *shared;
731 struct brcmf_pcie_console *console;
732 u32 addr;
733
734 shared = &devinfo->shared;
735 console = &shared->console;
736 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
737 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
738
739 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
740 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
741 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
742 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
743
744 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
745 console->base_addr, console->buf_addr, console->bufsize);
746 }
747
748
749 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
750 {
751 struct brcmf_pcie_console *console;
752 u32 addr;
753 u8 ch;
754 u32 newidx;
755
756 if (!BRCMF_FWCON_ON())
757 return;
758
759 console = &devinfo->shared.console;
760 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
761 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
762 while (newidx != console->read_idx) {
763 addr = console->buf_addr + console->read_idx;
764 ch = brcmf_pcie_read_tcm8(devinfo, addr);
765 console->read_idx++;
766 if (console->read_idx == console->bufsize)
767 console->read_idx = 0;
768 if (ch == '\r')
769 continue;
770 console->log_str[console->log_idx] = ch;
771 console->log_idx++;
772 if ((ch != '\n') &&
773 (console->log_idx == (sizeof(console->log_str) - 2))) {
774 ch = '\n';
775 console->log_str[console->log_idx] = ch;
776 console->log_idx++;
777 }
778 if (ch == '\n') {
779 console->log_str[console->log_idx] = 0;
780 pr_debug("CONSOLE: %s", console->log_str);
781 console->log_idx = 0;
782 }
783 }
784 }
785
786
787 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
788 {
789 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
790 }
791
792
793 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
794 {
795 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
796 BRCMF_PCIE_MB_INT_D2H_DB |
797 BRCMF_PCIE_MB_INT_FN0_0 |
798 BRCMF_PCIE_MB_INT_FN0_1);
799 }
800
801 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
802 {
803 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
804 brcmf_pcie_write_reg32(devinfo,
805 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
806 }
807
808 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
809 {
810 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
811
812 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
813 brcmf_pcie_intr_disable(devinfo);
814 brcmf_dbg(PCIE, "Enter\n");
815 return IRQ_WAKE_THREAD;
816 }
817 return IRQ_NONE;
818 }
819
820
821 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
822 {
823 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
824 u32 status;
825
826 devinfo->in_irq = true;
827 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
828 brcmf_dbg(PCIE, "Enter %x\n", status);
829 if (status) {
830 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
831 status);
832 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
833 BRCMF_PCIE_MB_INT_FN0_1))
834 brcmf_pcie_handle_mb_data(devinfo);
835 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
836 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
837 brcmf_proto_msgbuf_rx_trigger(
838 &devinfo->pdev->dev);
839 }
840 }
841 brcmf_pcie_bus_console_read(devinfo);
842 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
843 brcmf_pcie_intr_enable(devinfo);
844 devinfo->in_irq = false;
845 return IRQ_HANDLED;
846 }
847
848
849 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
850 {
851 struct pci_dev *pdev;
852
853 pdev = devinfo->pdev;
854
855 brcmf_pcie_intr_disable(devinfo);
856
857 brcmf_dbg(PCIE, "Enter\n");
858
859 pci_enable_msi(pdev);
860 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
861 brcmf_pcie_isr_thread, IRQF_SHARED,
862 "brcmf_pcie_intr", devinfo)) {
863 pci_disable_msi(pdev);
864 brcmf_err("Failed to request IRQ %d\n", pdev->irq);
865 return -EIO;
866 }
867 devinfo->irq_allocated = true;
868 return 0;
869 }
870
871
872 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
873 {
874 struct pci_dev *pdev;
875 u32 status;
876 u32 count;
877
878 if (!devinfo->irq_allocated)
879 return;
880
881 pdev = devinfo->pdev;
882
883 brcmf_pcie_intr_disable(devinfo);
884 free_irq(pdev->irq, devinfo);
885 pci_disable_msi(pdev);
886
887 msleep(50);
888 count = 0;
889 while ((devinfo->in_irq) && (count < 20)) {
890 msleep(50);
891 count++;
892 }
893 if (devinfo->in_irq)
894 brcmf_err("Still in IRQ (processing) !!!\n");
895
896 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
897 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
898
899 devinfo->irq_allocated = false;
900 }
901
902
903 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
904 {
905 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
906 struct brcmf_pciedev_info *devinfo = ring->devinfo;
907 struct brcmf_commonring *commonring = &ring->commonring;
908
909 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
910 return -EIO;
911
912 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
913 commonring->w_ptr, ring->id);
914
915 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
916
917 return 0;
918 }
919
920
921 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
922 {
923 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
924 struct brcmf_pciedev_info *devinfo = ring->devinfo;
925 struct brcmf_commonring *commonring = &ring->commonring;
926
927 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
928 return -EIO;
929
930 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
931 commonring->r_ptr, ring->id);
932
933 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
934
935 return 0;
936 }
937
938
939 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
940 {
941 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
942 struct brcmf_pciedev_info *devinfo = ring->devinfo;
943
944 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
945 return -EIO;
946
947 brcmf_dbg(PCIE, "RING !\n");
948 /* Any arbitrary value will do, lets use 1 */
949 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
950
951 return 0;
952 }
953
954
955 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
956 {
957 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
958 struct brcmf_pciedev_info *devinfo = ring->devinfo;
959 struct brcmf_commonring *commonring = &ring->commonring;
960
961 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
962 return -EIO;
963
964 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
965
966 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
967 commonring->w_ptr, ring->id);
968
969 return 0;
970 }
971
972
973 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
974 {
975 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
976 struct brcmf_pciedev_info *devinfo = ring->devinfo;
977 struct brcmf_commonring *commonring = &ring->commonring;
978
979 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
980 return -EIO;
981
982 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
983
984 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
985 commonring->r_ptr, ring->id);
986
987 return 0;
988 }
989
990
991 static void *
992 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
993 u32 size, u32 tcm_dma_phys_addr,
994 dma_addr_t *dma_handle)
995 {
996 void *ring;
997 u64 address;
998
999 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1000 GFP_KERNEL);
1001 if (!ring)
1002 return NULL;
1003
1004 address = (u64)*dma_handle;
1005 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1006 address & 0xffffffff);
1007 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1008
1009 memset(ring, 0, size);
1010
1011 return (ring);
1012 }
1013
1014
1015 static struct brcmf_pcie_ringbuf *
1016 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1017 u32 tcm_ring_phys_addr)
1018 {
1019 void *dma_buf;
1020 dma_addr_t dma_handle;
1021 struct brcmf_pcie_ringbuf *ring;
1022 u32 size;
1023 u32 addr;
1024 const u32 *ring_itemsize_array;
1025
1026 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1027 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1028 else
1029 ring_itemsize_array = brcmf_ring_itemsize;
1030
1031 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1032 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1033 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1034 &dma_handle);
1035 if (!dma_buf)
1036 return NULL;
1037
1038 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1039 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1040 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1041 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1042
1043 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1044 if (!ring) {
1045 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1046 dma_handle);
1047 return NULL;
1048 }
1049 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1050 ring_itemsize_array[ring_id], dma_buf);
1051 ring->dma_handle = dma_handle;
1052 ring->devinfo = devinfo;
1053 brcmf_commonring_register_cb(&ring->commonring,
1054 brcmf_pcie_ring_mb_ring_bell,
1055 brcmf_pcie_ring_mb_update_rptr,
1056 brcmf_pcie_ring_mb_update_wptr,
1057 brcmf_pcie_ring_mb_write_rptr,
1058 brcmf_pcie_ring_mb_write_wptr, ring);
1059
1060 return (ring);
1061 }
1062
1063
1064 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1065 struct brcmf_pcie_ringbuf *ring)
1066 {
1067 void *dma_buf;
1068 u32 size;
1069
1070 if (!ring)
1071 return;
1072
1073 dma_buf = ring->commonring.buf_addr;
1074 if (dma_buf) {
1075 size = ring->commonring.depth * ring->commonring.item_len;
1076 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1077 }
1078 kfree(ring);
1079 }
1080
1081
1082 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1083 {
1084 u32 i;
1085
1086 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1087 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1088 devinfo->shared.commonrings[i]);
1089 devinfo->shared.commonrings[i] = NULL;
1090 }
1091 kfree(devinfo->shared.flowrings);
1092 devinfo->shared.flowrings = NULL;
1093 if (devinfo->idxbuf) {
1094 dma_free_coherent(&devinfo->pdev->dev,
1095 devinfo->idxbuf_sz,
1096 devinfo->idxbuf,
1097 devinfo->idxbuf_dmahandle);
1098 devinfo->idxbuf = NULL;
1099 }
1100 }
1101
1102
1103 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1104 {
1105 struct brcmf_pcie_ringbuf *ring;
1106 struct brcmf_pcie_ringbuf *rings;
1107 u32 d2h_w_idx_ptr;
1108 u32 d2h_r_idx_ptr;
1109 u32 h2d_w_idx_ptr;
1110 u32 h2d_r_idx_ptr;
1111 u32 ring_mem_ptr;
1112 u32 i;
1113 u64 address;
1114 u32 bufsz;
1115 u8 idx_offset;
1116 struct brcmf_pcie_dhi_ringinfo ringinfo;
1117 u16 max_flowrings;
1118 u16 max_submissionrings;
1119 u16 max_completionrings;
1120
1121 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1122 sizeof(ringinfo));
1123 if (devinfo->shared.version >= 6) {
1124 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1125 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1126 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1127 } else {
1128 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1129 max_flowrings = max_submissionrings -
1130 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1131 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1132 }
1133
1134 if (devinfo->dma_idx_sz != 0) {
1135 bufsz = (max_submissionrings + max_completionrings) *
1136 devinfo->dma_idx_sz * 2;
1137 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1138 &devinfo->idxbuf_dmahandle,
1139 GFP_KERNEL);
1140 if (!devinfo->idxbuf)
1141 devinfo->dma_idx_sz = 0;
1142 }
1143
1144 if (devinfo->dma_idx_sz == 0) {
1145 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1146 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1147 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1148 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1149 idx_offset = sizeof(u32);
1150 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1151 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1152 brcmf_dbg(PCIE, "Using TCM indices\n");
1153 } else {
1154 memset(devinfo->idxbuf, 0, bufsz);
1155 devinfo->idxbuf_sz = bufsz;
1156 idx_offset = devinfo->dma_idx_sz;
1157 devinfo->write_ptr = brcmf_pcie_write_idx;
1158 devinfo->read_ptr = brcmf_pcie_read_idx;
1159
1160 h2d_w_idx_ptr = 0;
1161 address = (u64)devinfo->idxbuf_dmahandle;
1162 ringinfo.h2d_w_idx_hostaddr.low_addr =
1163 cpu_to_le32(address & 0xffffffff);
1164 ringinfo.h2d_w_idx_hostaddr.high_addr =
1165 cpu_to_le32(address >> 32);
1166
1167 h2d_r_idx_ptr = h2d_w_idx_ptr +
1168 max_submissionrings * idx_offset;
1169 address += max_submissionrings * idx_offset;
1170 ringinfo.h2d_r_idx_hostaddr.low_addr =
1171 cpu_to_le32(address & 0xffffffff);
1172 ringinfo.h2d_r_idx_hostaddr.high_addr =
1173 cpu_to_le32(address >> 32);
1174
1175 d2h_w_idx_ptr = h2d_r_idx_ptr +
1176 max_submissionrings * idx_offset;
1177 address += max_submissionrings * idx_offset;
1178 ringinfo.d2h_w_idx_hostaddr.low_addr =
1179 cpu_to_le32(address & 0xffffffff);
1180 ringinfo.d2h_w_idx_hostaddr.high_addr =
1181 cpu_to_le32(address >> 32);
1182
1183 d2h_r_idx_ptr = d2h_w_idx_ptr +
1184 max_completionrings * idx_offset;
1185 address += max_completionrings * idx_offset;
1186 ringinfo.d2h_r_idx_hostaddr.low_addr =
1187 cpu_to_le32(address & 0xffffffff);
1188 ringinfo.d2h_r_idx_hostaddr.high_addr =
1189 cpu_to_le32(address >> 32);
1190
1191 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1192 &ringinfo, sizeof(ringinfo));
1193 brcmf_dbg(PCIE, "Using host memory indices\n");
1194 }
1195
1196 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1197
1198 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1199 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1200 if (!ring)
1201 goto fail;
1202 ring->w_idx_addr = h2d_w_idx_ptr;
1203 ring->r_idx_addr = h2d_r_idx_ptr;
1204 ring->id = i;
1205 devinfo->shared.commonrings[i] = ring;
1206
1207 h2d_w_idx_ptr += idx_offset;
1208 h2d_r_idx_ptr += idx_offset;
1209 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1210 }
1211
1212 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1213 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1214 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1215 if (!ring)
1216 goto fail;
1217 ring->w_idx_addr = d2h_w_idx_ptr;
1218 ring->r_idx_addr = d2h_r_idx_ptr;
1219 ring->id = i;
1220 devinfo->shared.commonrings[i] = ring;
1221
1222 d2h_w_idx_ptr += idx_offset;
1223 d2h_r_idx_ptr += idx_offset;
1224 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1225 }
1226
1227 devinfo->shared.max_flowrings = max_flowrings;
1228 devinfo->shared.max_submissionrings = max_submissionrings;
1229 devinfo->shared.max_completionrings = max_completionrings;
1230 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1231 if (!rings)
1232 goto fail;
1233
1234 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1235
1236 for (i = 0; i < max_flowrings; i++) {
1237 ring = &rings[i];
1238 ring->devinfo = devinfo;
1239 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1240 brcmf_commonring_register_cb(&ring->commonring,
1241 brcmf_pcie_ring_mb_ring_bell,
1242 brcmf_pcie_ring_mb_update_rptr,
1243 brcmf_pcie_ring_mb_update_wptr,
1244 brcmf_pcie_ring_mb_write_rptr,
1245 brcmf_pcie_ring_mb_write_wptr,
1246 ring);
1247 ring->w_idx_addr = h2d_w_idx_ptr;
1248 ring->r_idx_addr = h2d_r_idx_ptr;
1249 h2d_w_idx_ptr += idx_offset;
1250 h2d_r_idx_ptr += idx_offset;
1251 }
1252 devinfo->shared.flowrings = rings;
1253
1254 return 0;
1255
1256 fail:
1257 brcmf_err("Allocating ring buffers failed\n");
1258 brcmf_pcie_release_ringbuffers(devinfo);
1259 return -ENOMEM;
1260 }
1261
1262
1263 static void
1264 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1265 {
1266 if (devinfo->shared.scratch)
1267 dma_free_coherent(&devinfo->pdev->dev,
1268 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1269 devinfo->shared.scratch,
1270 devinfo->shared.scratch_dmahandle);
1271 if (devinfo->shared.ringupd)
1272 dma_free_coherent(&devinfo->pdev->dev,
1273 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1274 devinfo->shared.ringupd,
1275 devinfo->shared.ringupd_dmahandle);
1276 }
1277
1278 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1279 {
1280 u64 address;
1281 u32 addr;
1282
1283 devinfo->shared.scratch =
1284 dma_alloc_coherent(&devinfo->pdev->dev,
1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1286 &devinfo->shared.scratch_dmahandle,
1287 GFP_KERNEL);
1288 if (!devinfo->shared.scratch)
1289 goto fail;
1290
1291 addr = devinfo->shared.tcm_base_address +
1292 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1293 address = (u64)devinfo->shared.scratch_dmahandle;
1294 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1295 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1296 addr = devinfo->shared.tcm_base_address +
1297 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1299
1300 devinfo->shared.ringupd =
1301 dma_alloc_coherent(&devinfo->pdev->dev,
1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1303 &devinfo->shared.ringupd_dmahandle,
1304 GFP_KERNEL);
1305 if (!devinfo->shared.ringupd)
1306 goto fail;
1307
1308 addr = devinfo->shared.tcm_base_address +
1309 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1310 address = (u64)devinfo->shared.ringupd_dmahandle;
1311 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1312 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1313 addr = devinfo->shared.tcm_base_address +
1314 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1315 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1316 return 0;
1317
1318 fail:
1319 brcmf_err("Allocating scratch buffers failed\n");
1320 brcmf_pcie_release_scratchbuffers(devinfo);
1321 return -ENOMEM;
1322 }
1323
1324
1325 static void brcmf_pcie_down(struct device *dev)
1326 {
1327 }
1328
1329
1330 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1331 {
1332 return 0;
1333 }
1334
1335
1336 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1337 uint len)
1338 {
1339 return 0;
1340 }
1341
1342
1343 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1344 uint len)
1345 {
1346 return 0;
1347 }
1348
1349
1350 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1351 {
1352 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1353 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1354 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1355
1356 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1357 devinfo->wowl_enabled = enabled;
1358 }
1359
1360
1361 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1362 {
1363 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1364 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1365 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1366
1367 return devinfo->ci->ramsize - devinfo->ci->srsize;
1368 }
1369
1370
1371 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1372 {
1373 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1374 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1375 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1376
1377 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1378 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1379 return 0;
1380 }
1381
1382 static
1383 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1384 {
1385 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1386 struct brcmf_fw_request *fwreq;
1387 struct brcmf_fw_name fwnames[] = {
1388 { ext, fw_name },
1389 };
1390
1391 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1392 brcmf_pcie_fwnames,
1393 ARRAY_SIZE(brcmf_pcie_fwnames),
1394 fwnames, ARRAY_SIZE(fwnames));
1395 if (!fwreq)
1396 return -ENOMEM;
1397
1398 kfree(fwreq);
1399 return 0;
1400 }
1401
1402 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1403 .txdata = brcmf_pcie_tx,
1404 .stop = brcmf_pcie_down,
1405 .txctl = brcmf_pcie_tx_ctlpkt,
1406 .rxctl = brcmf_pcie_rx_ctlpkt,
1407 .wowl_config = brcmf_pcie_wowl_config,
1408 .get_ramsize = brcmf_pcie_get_ramsize,
1409 .get_memdump = brcmf_pcie_get_memdump,
1410 .get_fwname = brcmf_pcie_get_fwname,
1411 };
1412
1413
1414 static void
1415 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1416 u32 data_len)
1417 {
1418 __le32 *field;
1419 u32 newsize;
1420
1421 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1422 return;
1423
1424 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1425 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1426 return;
1427 field++;
1428 newsize = le32_to_cpup(field);
1429
1430 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1431 newsize);
1432 devinfo->ci->ramsize = newsize;
1433 }
1434
1435
1436 static int
1437 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1438 u32 sharedram_addr)
1439 {
1440 struct brcmf_pcie_shared_info *shared;
1441 u32 addr;
1442
1443 shared = &devinfo->shared;
1444 shared->tcm_base_address = sharedram_addr;
1445
1446 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1447 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1448 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1449 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1450 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1451 brcmf_err("Unsupported PCIE version %d\n", shared->version);
1452 return -EINVAL;
1453 }
1454
1455 /* check firmware support dma indicies */
1456 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1457 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1458 devinfo->dma_idx_sz = sizeof(u16);
1459 else
1460 devinfo->dma_idx_sz = sizeof(u32);
1461 }
1462
1463 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1464 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1465 if (shared->max_rxbufpost == 0)
1466 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1467
1468 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1469 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1470
1471 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1472 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1473
1474 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1475 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1476
1477 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1478 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1479
1480 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1481 shared->max_rxbufpost, shared->rx_dataoffset);
1482
1483 brcmf_pcie_bus_console_init(devinfo);
1484
1485 return 0;
1486 }
1487
1488
1489 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1490 const struct firmware *fw, void *nvram,
1491 u32 nvram_len)
1492 {
1493 u32 sharedram_addr;
1494 u32 sharedram_addr_written;
1495 u32 loop_counter;
1496 int err;
1497 u32 address;
1498 u32 resetintr;
1499
1500 brcmf_dbg(PCIE, "Halt ARM.\n");
1501 err = brcmf_pcie_enter_download_state(devinfo);
1502 if (err)
1503 return err;
1504
1505 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1506 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1507 (void *)fw->data, fw->size);
1508
1509 resetintr = get_unaligned_le32(fw->data);
1510 release_firmware(fw);
1511
1512 /* reset last 4 bytes of RAM address. to be used for shared
1513 * area. This identifies when FW is running
1514 */
1515 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1516
1517 if (nvram) {
1518 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1519 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1520 nvram_len;
1521 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1522 brcmf_fw_nvram_free(nvram);
1523 } else {
1524 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1525 devinfo->nvram_name);
1526 }
1527
1528 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1529 devinfo->ci->ramsize -
1530 4);
1531 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1532 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1533 if (err)
1534 return err;
1535
1536 brcmf_dbg(PCIE, "Wait for FW init\n");
1537 sharedram_addr = sharedram_addr_written;
1538 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1539 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1540 msleep(50);
1541 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1542 devinfo->ci->ramsize -
1543 4);
1544 loop_counter--;
1545 }
1546 if (sharedram_addr == sharedram_addr_written) {
1547 brcmf_err("FW failed to initialize\n");
1548 return -ENODEV;
1549 }
1550 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1551
1552 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1553 }
1554
1555
1556 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1557 {
1558 struct pci_dev *pdev;
1559 int err;
1560 phys_addr_t bar0_addr, bar1_addr;
1561 ulong bar1_size;
1562
1563 pdev = devinfo->pdev;
1564
1565 err = pci_enable_device(pdev);
1566 if (err) {
1567 brcmf_err("pci_enable_device failed err=%d\n", err);
1568 return err;
1569 }
1570
1571 pci_set_master(pdev);
1572
1573 /* Bar-0 mapped address */
1574 bar0_addr = pci_resource_start(pdev, 0);
1575 /* Bar-1 mapped address */
1576 bar1_addr = pci_resource_start(pdev, 2);
1577 /* read Bar-1 mapped memory range */
1578 bar1_size = pci_resource_len(pdev, 2);
1579 if ((bar1_size == 0) || (bar1_addr == 0)) {
1580 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1581 bar1_size, (unsigned long long)bar1_addr);
1582 return -EINVAL;
1583 }
1584
1585 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1586 devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
1587
1588 if (!devinfo->regs || !devinfo->tcm) {
1589 brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
1590 devinfo->tcm);
1591 return -EINVAL;
1592 }
1593 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1594 devinfo->regs, (unsigned long long)bar0_addr);
1595 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1596 devinfo->tcm, (unsigned long long)bar1_addr,
1597 (unsigned int)bar1_size);
1598
1599 return 0;
1600 }
1601
1602
1603 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1604 {
1605 if (devinfo->tcm)
1606 iounmap(devinfo->tcm);
1607 if (devinfo->regs)
1608 iounmap(devinfo->regs);
1609
1610 pci_disable_device(devinfo->pdev);
1611 }
1612
1613
1614 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1615 {
1616 u32 ret_addr;
1617
1618 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1619 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1620 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1621
1622 return ret_addr;
1623 }
1624
1625
1626 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1627 {
1628 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1629
1630 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1631 return brcmf_pcie_read_reg32(devinfo, addr);
1632 }
1633
1634
1635 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1636 {
1637 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1638
1639 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1640 brcmf_pcie_write_reg32(devinfo, addr, value);
1641 }
1642
1643
1644 static int brcmf_pcie_buscoreprep(void *ctx)
1645 {
1646 return brcmf_pcie_get_resource(ctx);
1647 }
1648
1649
1650 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1651 {
1652 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1653 u32 val;
1654
1655 devinfo->ci = chip;
1656 brcmf_pcie_reset_device(devinfo);
1657
1658 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1659 if (val != 0xffffffff)
1660 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1661 val);
1662
1663 return 0;
1664 }
1665
1666
1667 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1668 u32 rstvec)
1669 {
1670 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1671
1672 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1673 }
1674
1675
1676 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1677 .prepare = brcmf_pcie_buscoreprep,
1678 .reset = brcmf_pcie_buscore_reset,
1679 .activate = brcmf_pcie_buscore_activate,
1680 .read32 = brcmf_pcie_buscore_read32,
1681 .write32 = brcmf_pcie_buscore_write32,
1682 };
1683
1684 #define BRCMF_PCIE_FW_CODE 0
1685 #define BRCMF_PCIE_FW_NVRAM 1
1686
1687 static void brcmf_pcie_setup(struct device *dev, int ret,
1688 struct brcmf_fw_request *fwreq)
1689 {
1690 const struct firmware *fw;
1691 void *nvram;
1692 struct brcmf_bus *bus;
1693 struct brcmf_pciedev *pcie_bus_dev;
1694 struct brcmf_pciedev_info *devinfo;
1695 struct brcmf_commonring **flowrings;
1696 u32 i, nvram_len;
1697
1698 /* check firmware loading result */
1699 if (ret)
1700 goto fail;
1701
1702 bus = dev_get_drvdata(dev);
1703 pcie_bus_dev = bus->bus_priv.pcie;
1704 devinfo = pcie_bus_dev->devinfo;
1705 brcmf_pcie_attach(devinfo);
1706
1707 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1708 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1709 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1710 kfree(fwreq);
1711
1712 /* Some of the firmwares have the size of the memory of the device
1713 * defined inside the firmware. This is because part of the memory in
1714 * the device is shared and the devision is determined by FW. Parse
1715 * the firmware and adjust the chip memory size now.
1716 */
1717 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1718
1719 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1720 if (ret)
1721 goto fail;
1722
1723 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1724
1725 ret = brcmf_pcie_init_ringbuffers(devinfo);
1726 if (ret)
1727 goto fail;
1728
1729 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1730 if (ret)
1731 goto fail;
1732
1733 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1734 ret = brcmf_pcie_request_irq(devinfo);
1735 if (ret)
1736 goto fail;
1737
1738 /* hook the commonrings in the bus structure. */
1739 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1740 bus->msgbuf->commonrings[i] =
1741 &devinfo->shared.commonrings[i]->commonring;
1742
1743 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1744 GFP_KERNEL);
1745 if (!flowrings)
1746 goto fail;
1747
1748 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1749 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1750 bus->msgbuf->flowrings = flowrings;
1751
1752 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1753 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1754 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1755
1756 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1757
1758 brcmf_pcie_intr_enable(devinfo);
1759 brcmf_pcie_hostready(devinfo);
1760 if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
1761 return;
1762
1763 brcmf_pcie_bus_console_read(devinfo);
1764
1765 fail:
1766 device_release_driver(dev);
1767 }
1768
1769 static struct brcmf_fw_request *
1770 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1771 {
1772 struct brcmf_fw_request *fwreq;
1773 struct brcmf_fw_name fwnames[] = {
1774 { ".bin", devinfo->fw_name },
1775 { ".txt", devinfo->nvram_name },
1776 };
1777
1778 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1779 brcmf_pcie_fwnames,
1780 ARRAY_SIZE(brcmf_pcie_fwnames),
1781 fwnames, ARRAY_SIZE(fwnames));
1782 if (!fwreq)
1783 return NULL;
1784
1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1788 fwreq->board_type = devinfo->settings->board_type;
1789 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1790 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1791 fwreq->bus_nr = devinfo->pdev->bus->number;
1792
1793 return fwreq;
1794 }
1795
1796 static int
1797 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1798 {
1799 int ret;
1800 struct brcmf_fw_request *fwreq;
1801 struct brcmf_pciedev_info *devinfo;
1802 struct brcmf_pciedev *pcie_bus_dev;
1803 struct brcmf_bus *bus;
1804
1805 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1806
1807 ret = -ENOMEM;
1808 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1809 if (devinfo == NULL)
1810 return ret;
1811
1812 devinfo->pdev = pdev;
1813 pcie_bus_dev = NULL;
1814 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1815 if (IS_ERR(devinfo->ci)) {
1816 ret = PTR_ERR(devinfo->ci);
1817 devinfo->ci = NULL;
1818 goto fail;
1819 }
1820
1821 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1822 if (pcie_bus_dev == NULL) {
1823 ret = -ENOMEM;
1824 goto fail;
1825 }
1826
1827 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1828 BRCMF_BUSTYPE_PCIE,
1829 devinfo->ci->chip,
1830 devinfo->ci->chiprev);
1831 if (!devinfo->settings) {
1832 ret = -ENOMEM;
1833 goto fail;
1834 }
1835
1836 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1837 if (!bus) {
1838 ret = -ENOMEM;
1839 goto fail;
1840 }
1841 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1842 if (!bus->msgbuf) {
1843 ret = -ENOMEM;
1844 kfree(bus);
1845 goto fail;
1846 }
1847
1848 /* hook it all together. */
1849 pcie_bus_dev->devinfo = devinfo;
1850 pcie_bus_dev->bus = bus;
1851 bus->dev = &pdev->dev;
1852 bus->bus_priv.pcie = pcie_bus_dev;
1853 bus->ops = &brcmf_pcie_bus_ops;
1854 bus->proto_type = BRCMF_PROTO_MSGBUF;
1855 bus->chip = devinfo->coreid;
1856 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1857 dev_set_drvdata(&pdev->dev, bus);
1858
1859 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1860 if (!fwreq) {
1861 ret = -ENOMEM;
1862 goto fail_bus;
1863 }
1864
1865 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1866 if (ret < 0) {
1867 kfree(fwreq);
1868 goto fail_bus;
1869 }
1870 return 0;
1871
1872 fail_bus:
1873 kfree(bus->msgbuf);
1874 kfree(bus);
1875 fail:
1876 brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
1877 brcmf_pcie_release_resource(devinfo);
1878 if (devinfo->ci)
1879 brcmf_chip_detach(devinfo->ci);
1880 if (devinfo->settings)
1881 brcmf_release_module_param(devinfo->settings);
1882 kfree(pcie_bus_dev);
1883 kfree(devinfo);
1884 return ret;
1885 }
1886
1887
1888 static void
1889 brcmf_pcie_remove(struct pci_dev *pdev)
1890 {
1891 struct brcmf_pciedev_info *devinfo;
1892 struct brcmf_bus *bus;
1893
1894 brcmf_dbg(PCIE, "Enter\n");
1895
1896 bus = dev_get_drvdata(&pdev->dev);
1897 if (bus == NULL)
1898 return;
1899
1900 devinfo = bus->bus_priv.pcie->devinfo;
1901
1902 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1903 if (devinfo->ci)
1904 brcmf_pcie_intr_disable(devinfo);
1905
1906 brcmf_detach(&pdev->dev);
1907
1908 kfree(bus->bus_priv.pcie);
1909 kfree(bus->msgbuf->flowrings);
1910 kfree(bus->msgbuf);
1911 kfree(bus);
1912
1913 brcmf_pcie_release_irq(devinfo);
1914 brcmf_pcie_release_scratchbuffers(devinfo);
1915 brcmf_pcie_release_ringbuffers(devinfo);
1916 brcmf_pcie_reset_device(devinfo);
1917 brcmf_pcie_release_resource(devinfo);
1918
1919 if (devinfo->ci)
1920 brcmf_chip_detach(devinfo->ci);
1921 if (devinfo->settings)
1922 brcmf_release_module_param(devinfo->settings);
1923
1924 kfree(devinfo);
1925 dev_set_drvdata(&pdev->dev, NULL);
1926 }
1927
1928
1929 #ifdef CONFIG_PM
1930
1931
1932 static int brcmf_pcie_pm_enter_D3(struct device *dev)
1933 {
1934 struct brcmf_pciedev_info *devinfo;
1935 struct brcmf_bus *bus;
1936
1937 brcmf_dbg(PCIE, "Enter\n");
1938
1939 bus = dev_get_drvdata(dev);
1940 devinfo = bus->bus_priv.pcie->devinfo;
1941
1942 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1943
1944 devinfo->mbdata_completed = false;
1945 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
1946
1947 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
1948 BRCMF_PCIE_MBDATA_TIMEOUT);
1949 if (!devinfo->mbdata_completed) {
1950 brcmf_err("Timeout on response for entering D3 substate\n");
1951 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
1952 return -EIO;
1953 }
1954
1955 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1956
1957 return 0;
1958 }
1959
1960
1961 static int brcmf_pcie_pm_leave_D3(struct device *dev)
1962 {
1963 struct brcmf_pciedev_info *devinfo;
1964 struct brcmf_bus *bus;
1965 struct pci_dev *pdev;
1966 int err;
1967
1968 brcmf_dbg(PCIE, "Enter\n");
1969
1970 bus = dev_get_drvdata(dev);
1971 devinfo = bus->bus_priv.pcie->devinfo;
1972 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
1973
1974 /* Check if device is still up and running, if so we are ready */
1975 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
1976 brcmf_dbg(PCIE, "Try to wakeup device....\n");
1977 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
1978 goto cleanup;
1979 brcmf_dbg(PCIE, "Hot resume, continue....\n");
1980 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1981 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1982 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
1983 brcmf_pcie_intr_enable(devinfo);
1984 brcmf_pcie_hostready(devinfo);
1985 return 0;
1986 }
1987
1988 cleanup:
1989 brcmf_chip_detach(devinfo->ci);
1990 devinfo->ci = NULL;
1991 pdev = devinfo->pdev;
1992 brcmf_pcie_remove(pdev);
1993
1994 err = brcmf_pcie_probe(pdev, NULL);
1995 if (err)
1996 brcmf_err("probe after resume failed, err=%d\n", err);
1997
1998 return err;
1999 }
2000
2001
2002 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2003 .suspend = brcmf_pcie_pm_enter_D3,
2004 .resume = brcmf_pcie_pm_leave_D3,
2005 .freeze = brcmf_pcie_pm_enter_D3,
2006 .restore = brcmf_pcie_pm_leave_D3,
2007 };
2008
2009
2010 #endif /* CONFIG_PM */
2011
2012
2013 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2014 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2015 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2016 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2017 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2018
2019 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2020 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2021 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2022 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2023 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2024 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2025 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2026 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2027 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2028 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2029 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2030 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2031 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2032 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2033 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2034 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2035 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2036 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2037 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2038 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2039 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2040 { /* end: all zeroes */ }
2041 };
2042
2043
2044 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2045
2046
2047 static struct pci_driver brcmf_pciedrvr = {
2048 .node = {},
2049 .name = KBUILD_MODNAME,
2050 .id_table = brcmf_pcie_devid_table,
2051 .probe = brcmf_pcie_probe,
2052 .remove = brcmf_pcie_remove,
2053 #ifdef CONFIG_PM
2054 .driver.pm = &brcmf_pciedrvr_pm,
2055 #endif
2056 .driver.coredump = brcmf_dev_coredump,
2057 };
2058
2059
2060 void brcmf_pcie_register(void)
2061 {
2062 int err;
2063
2064 brcmf_dbg(PCIE, "Enter\n");
2065 err = pci_register_driver(&brcmf_pciedrvr);
2066 if (err)
2067 brcmf_err("PCIE driver registration failed, err=%d\n", err);
2068 }
2069
2070
2071 void brcmf_pcie_exit(void)
2072 {
2073 brcmf_dbg(PCIE, "Enter\n");
2074 pci_unregister_driver(&brcmf_pciedrvr);
2075 }