]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
brcm80211: smac: use inline access functions for struct si_pub fields
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / brcm80211 / brcmsmac / nicpci.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
20
21 #include <defs.h>
22 #include <soc.h>
23 #include <chipcommon.h>
24 #include "aiutils.h"
25 #include "pub.h"
26 #include "nicpci.h"
27
28 /* SPROM offsets */
29 #define SRSH_ASPM_OFFSET 4 /* word 4 */
30 #define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
31 #define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
32 #define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
33
34 #define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
35 #define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
36 #define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
37 #define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
38 #define SRSH_BD_OFFSET 6 /* word 6 */
39
40 /* chipcontrol */
41 #define CHIPCTRL_4321_PLL_DOWN 0x800000/* serdes PLL down override */
42
43 /* MDIO control */
44 #define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
45 #define MDIOCTL_DIVISOR_VAL 0x2
46 #define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
47 #define MDIOCTL_ACCESS_DONE 0x100 /* Transaction complete */
48
49 /* MDIO Data */
50 #define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
51 #define MDIODATA_TA 0x00020000 /* Turnaround */
52
53 #define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
54 #define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
55 #define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
56 #define MDIODATA_DEVADDR_MASK 0x0f800000
57 /* Physmedia devaddr Mask */
58
59 /* MDIO Data for older revisions < 10 */
60 #define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift */
61 #define MDIODATA_REGADDR_MASK_OLD 0x003c0000
62 /* Regaddr Mask */
63 #define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift */
64 #define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000
65 /* Physmedia devaddr Mask */
66
67 /* Transactions flags */
68 #define MDIODATA_WRITE 0x10000000
69 #define MDIODATA_READ 0x20000000
70 #define MDIODATA_START 0x40000000
71
72 #define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
73 #define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
74
75 /* serdes regs (rev < 10) */
76 #define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
77 #define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
78 #define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
79
80 /* SERDES RX registers */
81 #define SERDES_RX_CTRL 1 /* Rx cntrl */
82 #define SERDES_RX_TIMER1 2 /* Rx Timer1 */
83 #define SERDES_RX_CDR 6 /* CDR */
84 #define SERDES_RX_CDRBW 7 /* CDR BW */
85 /* SERDES RX control register */
86 #define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
87 #define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
88
89 /* SERDES PLL registers */
90 #define SERDES_PLL_CTRL 1 /* PLL control reg */
91 #define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
92
93 /* Linkcontrol reg offset in PCIE Cap */
94 #define PCIE_CAP_LINKCTRL_OFFSET 16 /* offset in pcie cap */
95 #define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
96 #define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
97 #define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
98
99 #define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
100 #define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
101 #define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
102 #define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
103
104 /* Power management threshold */
105 #define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
106 #define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
107 #define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
108 #define PCIE_ASPMTIMER_EXTEND 0x01000000
109 /* > rev7:
110 * enable extend ASPM timer
111 */
112
113 /* different register spaces to access thru pcie indirect access */
114 #define PCIE_CONFIGREGS 1 /* Access to config space */
115 #define PCIE_PCIEREGS 2 /* Access to pcie registers */
116
117 /* PCIE protocol PHY diagnostic registers */
118 #define PCIE_PLP_STATUSREG 0x204 /* Status */
119
120 /* Status reg PCIE_PLP_STATUSREG */
121 #define PCIE_PLP_POLARITYINV_STAT 0x10
122
123 /* PCIE protocol DLLP diagnostic registers */
124 #define PCIE_DLLP_LCREG 0x100 /* Link Control */
125 #define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
126
127 /* PCIE protocol TLP diagnostic registers */
128 #define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
129
130 /* Sonics to PCI translation types */
131 #define SBTOPCI_PREF 0x4 /* prefetch enable */
132 #define SBTOPCI_BURST 0x8 /* burst enable */
133 #define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
134
135 #define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
136
137 /* PCI core index in SROM shadow area */
138 #define SRSH_PI_OFFSET 0 /* first word */
139 #define SRSH_PI_MASK 0xf000 /* bit 15:12 */
140 #define SRSH_PI_SHIFT 12 /* bit 15:12 */
141
142 /* Sonics side: PCI core and host control registers */
143 struct sbpciregs {
144 u32 control; /* PCI control */
145 u32 PAD[3];
146 u32 arbcontrol; /* PCI arbiter control */
147 u32 clkrun; /* Clkrun Control (>=rev11) */
148 u32 PAD[2];
149 u32 intstatus; /* Interrupt status */
150 u32 intmask; /* Interrupt mask */
151 u32 sbtopcimailbox; /* Sonics to PCI mailbox */
152 u32 PAD[9];
153 u32 bcastaddr; /* Sonics broadcast address */
154 u32 bcastdata; /* Sonics broadcast data */
155 u32 PAD[2];
156 u32 gpioin; /* ro: gpio input (>=rev2) */
157 u32 gpioout; /* rw: gpio output (>=rev2) */
158 u32 gpioouten; /* rw: gpio output enable (>= rev2) */
159 u32 gpiocontrol; /* rw: gpio control (>= rev2) */
160 u32 PAD[36];
161 u32 sbtopci0; /* Sonics to PCI translation 0 */
162 u32 sbtopci1; /* Sonics to PCI translation 1 */
163 u32 sbtopci2; /* Sonics to PCI translation 2 */
164 u32 PAD[189];
165 u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
166 u16 sprom[36]; /* SPROM shadow Area */
167 u32 PAD[46];
168 };
169
170 /* SB side: PCIE core and host control registers */
171 struct sbpcieregs {
172 u32 control; /* host mode only */
173 u32 PAD[2];
174 u32 biststatus; /* bist Status: 0x00C */
175 u32 gpiosel; /* PCIE gpio sel: 0x010 */
176 u32 gpioouten; /* PCIE gpio outen: 0x14 */
177 u32 PAD[2];
178 u32 intstatus; /* Interrupt status: 0x20 */
179 u32 intmask; /* Interrupt mask: 0x24 */
180 u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
181 u32 PAD[53];
182 u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
183 u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
184 u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
185 u32 PAD[5];
186
187 /* pcie core supports in direct access to config space */
188 u32 configaddr; /* pcie config space access: Address field: 0x120 */
189 u32 configdata; /* pcie config space access: Data field: 0x124 */
190
191 /* mdio access to serdes */
192 u32 mdiocontrol; /* controls the mdio access: 0x128 */
193 u32 mdiodata; /* Data to the mdio access: 0x12c */
194
195 /* pcie protocol phy/dllp/tlp register indirect access mechanism */
196 u32 pcieindaddr; /* indirect access to
197 * the internal register: 0x130
198 */
199 u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
200
201 u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
202 u32 PAD[177];
203 u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
204 u16 sprom[64]; /* SPROM shadow Area */
205 };
206
207 struct pcicore_info {
208 union {
209 struct sbpcieregs __iomem *pcieregs;
210 struct sbpciregs __iomem *pciregs;
211 } regs; /* Memory mapped register to the core */
212
213 struct si_pub *sih; /* System interconnect handle */
214 struct pci_dev *dev;
215 u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
216 * in the config space
217 */
218 bool pcie_pr42767;
219 u8 pcie_polarity;
220 u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
221
222 u8 pmecap_offset; /* PM Capability offset in the config space */
223 bool pmecap; /* Capable of generating PME */
224 };
225
226 #define PCIE_ASPM(sih) \
227 ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
228 ((ai_get_buscorerev(sih) >= 3) && \
229 (ai_get_buscorerev(sih) <= 5)))
230
231
232 /* delay needed between the mdio control/ mdiodata register data access */
233 static void pr28829_delay(void)
234 {
235 udelay(10);
236 }
237
238 /* Initialize the PCI core.
239 * It's caller's responsibility to make sure that this is done only once
240 */
241 struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
242 void __iomem *regs)
243 {
244 struct pcicore_info *pi;
245
246 /* alloc struct pcicore_info */
247 pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
248 if (pi == NULL)
249 return NULL;
250
251 pi->sih = sih;
252 pi->dev = pdev;
253
254 if (ai_get_buscoretype(sih) == PCIE_CORE_ID) {
255 u8 cap_ptr;
256 pi->regs.pcieregs = regs;
257 cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
258 NULL, NULL);
259 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
260 } else
261 pi->regs.pciregs = regs;
262
263 return pi;
264 }
265
266 void pcicore_deinit(struct pcicore_info *pch)
267 {
268 kfree(pch);
269 }
270
271 /* return cap_offset if requested capability exists in the PCI config space */
272 /* Note that it's caller's responsibility to make sure it's a pci bus */
273 u8
274 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
275 unsigned char *buf, u32 *buflen)
276 {
277 u8 cap_id;
278 u8 cap_ptr = 0;
279 u32 bufsize;
280 u8 byte_val;
281
282 /* check for Header type 0 */
283 pci_read_config_byte(dev, PCI_HEADER_TYPE, &byte_val);
284 if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
285 goto end;
286
287 /* check if the capability pointer field exists */
288 pci_read_config_byte(dev, PCI_STATUS, &byte_val);
289 if (!(byte_val & PCI_STATUS_CAP_LIST))
290 goto end;
291
292 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
293 /* check if the capability pointer is 0x00 */
294 if (cap_ptr == 0x00)
295 goto end;
296
297 /* loop thru the capability list
298 * and see if the pcie capability exists
299 */
300
301 pci_read_config_byte(dev, cap_ptr, &cap_id);
302
303 while (cap_id != req_cap_id) {
304 pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
305 if (cap_ptr == 0x00)
306 break;
307 pci_read_config_byte(dev, cap_ptr, &cap_id);
308 }
309 if (cap_id != req_cap_id)
310 goto end;
311
312 /* found the caller requested capability */
313 if (buf != NULL && buflen != NULL) {
314 u8 cap_data;
315
316 bufsize = *buflen;
317 if (!bufsize)
318 goto end;
319 *buflen = 0;
320 /* copy the capability data excluding cap ID and next ptr */
321 cap_data = cap_ptr + 2;
322 if ((bufsize + cap_data) > PCI_SZPCR)
323 bufsize = PCI_SZPCR - cap_data;
324 *buflen = bufsize;
325 while (bufsize--) {
326 pci_read_config_byte(dev, cap_data, buf);
327 cap_data++;
328 buf++;
329 }
330 }
331 end:
332 return cap_ptr;
333 }
334
335 /* ***** Register Access API */
336 static uint
337 pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
338 {
339 uint retval = 0xFFFFFFFF;
340
341 switch (addrtype) {
342 case PCIE_CONFIGREGS:
343 W_REG(&pcieregs->configaddr, offset);
344 (void)R_REG((&pcieregs->configaddr));
345 retval = R_REG(&pcieregs->configdata);
346 break;
347 case PCIE_PCIEREGS:
348 W_REG(&pcieregs->pcieindaddr, offset);
349 (void)R_REG(&pcieregs->pcieindaddr);
350 retval = R_REG(&pcieregs->pcieinddata);
351 break;
352 }
353
354 return retval;
355 }
356
357 static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
358 uint offset, uint val)
359 {
360 switch (addrtype) {
361 case PCIE_CONFIGREGS:
362 W_REG((&pcieregs->configaddr), offset);
363 W_REG((&pcieregs->configdata), val);
364 break;
365 case PCIE_PCIEREGS:
366 W_REG((&pcieregs->pcieindaddr), offset);
367 W_REG((&pcieregs->pcieinddata), val);
368 break;
369 default:
370 break;
371 }
372 return 0;
373 }
374
375 static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
376 {
377 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
378 uint mdiodata, i = 0;
379 uint pcie_serdes_spinwait = 200;
380
381 mdiodata = (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
382 (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
383 (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
384 (blk << 4));
385 W_REG(&pcieregs->mdiodata, mdiodata);
386
387 pr28829_delay();
388 /* retry till the transaction is complete */
389 while (i < pcie_serdes_spinwait) {
390 if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
391 break;
392
393 udelay(1000);
394 i++;
395 }
396
397 if (i >= pcie_serdes_spinwait)
398 return false;
399
400 return true;
401 }
402
403 static int
404 pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
405 uint *val)
406 {
407 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
408 uint mdiodata;
409 uint i = 0;
410 uint pcie_serdes_spinwait = 10;
411
412 /* enable mdio access to SERDES */
413 W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
414
415 if (pi->sih->buscorerev >= 10) {
416 /* new serdes is slower in rw,
417 * using two layers of reg address mapping
418 */
419 if (!pcie_mdiosetblock(pi, physmedia))
420 return 1;
421 mdiodata = ((MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
422 (regaddr << MDIODATA_REGADDR_SHF));
423 pcie_serdes_spinwait *= 20;
424 } else {
425 mdiodata = ((physmedia << MDIODATA_DEVADDR_SHF_OLD) |
426 (regaddr << MDIODATA_REGADDR_SHF_OLD));
427 }
428
429 if (!write)
430 mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
431 else
432 mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
433 *val);
434
435 W_REG(&pcieregs->mdiodata, mdiodata);
436
437 pr28829_delay();
438
439 /* retry till the transaction is complete */
440 while (i < pcie_serdes_spinwait) {
441 if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
442 if (!write) {
443 pr28829_delay();
444 *val = (R_REG(&pcieregs->mdiodata) &
445 MDIODATA_MASK);
446 }
447 /* Disable mdio access to SERDES */
448 W_REG(&pcieregs->mdiocontrol, 0);
449 return 0;
450 }
451 udelay(1000);
452 i++;
453 }
454
455 /* Timed out. Disable mdio access to SERDES. */
456 W_REG(&pcieregs->mdiocontrol, 0);
457 return 1;
458 }
459
460 /* use the mdio interface to read from mdio slaves */
461 static int
462 pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint regaddr,
463 uint *regval)
464 {
465 return pcie_mdioop(pi, physmedia, regaddr, false, regval);
466 }
467
468 /* use the mdio interface to write to mdio slaves */
469 static int
470 pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
471 {
472 return pcie_mdioop(pi, physmedia, regaddr, true, &val);
473 }
474
475 /* ***** Support functions ***** */
476 static u8 pcie_clkreq(struct pcicore_info *pi, u32 mask, u32 val)
477 {
478 u32 reg_val;
479 u8 offset;
480
481 offset = pi->pciecap_lcreg_offset;
482 if (!offset)
483 return 0;
484
485 pci_read_config_dword(pi->dev, offset, &reg_val);
486 /* set operation */
487 if (mask) {
488 if (val)
489 reg_val |= PCIE_CLKREQ_ENAB;
490 else
491 reg_val &= ~PCIE_CLKREQ_ENAB;
492 pci_write_config_dword(pi->dev, offset, reg_val);
493 pci_read_config_dword(pi->dev, offset, &reg_val);
494 }
495 if (reg_val & PCIE_CLKREQ_ENAB)
496 return 1;
497 else
498 return 0;
499 }
500
501 static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
502 {
503 u32 w;
504 struct si_pub *sih = pi->sih;
505 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
506
507 if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
508 ai_get_buscorerev(sih) < 7)
509 return;
510
511 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
512 if (extend)
513 w |= PCIE_ASPMTIMER_EXTEND;
514 else
515 w &= ~PCIE_ASPMTIMER_EXTEND;
516 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
517 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
518 }
519
520 /* centralized clkreq control policy */
521 static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
522 {
523 struct si_pub *sih = pi->sih;
524
525 switch (state) {
526 case SI_DOATTACH:
527 if (PCIE_ASPM(sih))
528 pcie_clkreq(pi, 1, 0);
529 break;
530 case SI_PCIDOWN:
531 /* turn on serdes PLL down */
532 if (ai_get_buscorerev(sih) == 6) {
533 ai_corereg(sih, SI_CC_IDX,
534 offsetof(struct chipcregs, chipcontrol_addr),
535 ~0, 0);
536 ai_corereg(sih, SI_CC_IDX,
537 offsetof(struct chipcregs, chipcontrol_data),
538 ~0x40, 0);
539 } else if (pi->pcie_pr42767) {
540 pcie_clkreq(pi, 1, 1);
541 }
542 break;
543 case SI_PCIUP:
544 /* turn off serdes PLL down */
545 if (ai_get_buscorerev(sih) == 6) {
546 ai_corereg(sih, SI_CC_IDX,
547 offsetof(struct chipcregs, chipcontrol_addr),
548 ~0, 0);
549 ai_corereg(sih, SI_CC_IDX,
550 offsetof(struct chipcregs, chipcontrol_data),
551 ~0x40, 0x40);
552 } else if (PCIE_ASPM(sih)) { /* disable clkreq */
553 pcie_clkreq(pi, 1, 0);
554 }
555 break;
556 }
557 }
558
559 /* ***** PCI core WARs ***** */
560 /* Done only once at attach time */
561 static void pcie_war_polarity(struct pcicore_info *pi)
562 {
563 u32 w;
564
565 if (pi->pcie_polarity != 0)
566 return;
567
568 w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
569
570 /* Detect the current polarity at attach and force that polarity and
571 * disable changing the polarity
572 */
573 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
574 pi->pcie_polarity = SERDES_RX_CTRL_FORCE;
575 else
576 pi->pcie_polarity = (SERDES_RX_CTRL_FORCE |
577 SERDES_RX_CTRL_POLARITY);
578 }
579
580 /* enable ASPM and CLKREQ if srom doesn't have it */
581 /* Needs to happen when update to shadow SROM is needed
582 * : Coming out of 'standby'/'hibernate'
583 * : If pcie_war_aspm_ovr state changed
584 */
585 static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
586 {
587 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
588 struct si_pub *sih = pi->sih;
589 u16 val16;
590 u16 __iomem *reg16;
591 u32 w;
592
593 if (!PCIE_ASPM(sih))
594 return;
595
596 /* bypass this on QT or VSIM */
597 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
598 val16 = R_REG(reg16);
599
600 val16 &= ~SRSH_ASPM_ENB;
601 if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
602 val16 |= SRSH_ASPM_ENB;
603 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
604 val16 |= SRSH_ASPM_L1_ENB;
605 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
606 val16 |= SRSH_ASPM_L0s_ENB;
607
608 W_REG(reg16, val16);
609
610 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
611 w &= ~PCIE_ASPM_ENAB;
612 w |= pi->pcie_war_aspm_ovr;
613 pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
614
615 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
616 val16 = R_REG(reg16);
617
618 if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
619 val16 |= SRSH_CLKREQ_ENB;
620 pi->pcie_pr42767 = true;
621 } else
622 val16 &= ~SRSH_CLKREQ_ENB;
623
624 W_REG(reg16, val16);
625 }
626
627 /* Apply the polarity determined at the start */
628 /* Needs to happen when coming out of 'standby'/'hibernate' */
629 static void pcie_war_serdes(struct pcicore_info *pi)
630 {
631 u32 w = 0;
632
633 if (pi->pcie_polarity != 0)
634 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
635 pi->pcie_polarity);
636
637 pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
638 if (w & PLL_CTRL_FREQDET_EN) {
639 w &= ~PLL_CTRL_FREQDET_EN;
640 pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
641 }
642 }
643
644 /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
645 /* Needs to happen when coming out of 'standby'/'hibernate' */
646 static void pcie_misc_config_fixup(struct pcicore_info *pi)
647 {
648 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
649 u16 val16;
650 u16 __iomem *reg16;
651
652 reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
653 val16 = R_REG(reg16);
654
655 if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
656 val16 |= SRSH_L23READY_EXIT_NOPERST;
657 W_REG(reg16, val16);
658 }
659 }
660
661 /* quick hack for testing */
662 /* Needs to happen when coming out of 'standby'/'hibernate' */
663 static void pcie_war_noplldown(struct pcicore_info *pi)
664 {
665 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
666 u16 __iomem *reg16;
667
668 /* turn off serdes PLL down */
669 ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
670 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
671
672 /* clear srom shadow backdoor */
673 reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
674 W_REG(reg16, 0);
675 }
676
677 /* Needs to happen when coming out of 'standby'/'hibernate' */
678 static void pcie_war_pci_setup(struct pcicore_info *pi)
679 {
680 struct si_pub *sih = pi->sih;
681 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
682 u32 w;
683
684 if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
685 w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
686 PCIE_TLP_WORKAROUNDSREG);
687 w |= 0x8;
688 pcie_writereg(pcieregs, PCIE_PCIEREGS,
689 PCIE_TLP_WORKAROUNDSREG, w);
690 }
691
692 if (ai_get_buscorerev(sih) == 1) {
693 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
694 w |= 0x40;
695 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
696 }
697
698 if (ai_get_buscorerev(sih) == 0) {
699 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
700 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
701 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
702 } else if (PCIE_ASPM(sih)) {
703 /* Change the L1 threshold for better performance */
704 w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
705 PCIE_DLLP_PMTHRESHREG);
706 w &= ~PCIE_L1THRESHOLDTIME_MASK;
707 w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
708 pcie_writereg(pcieregs, PCIE_PCIEREGS,
709 PCIE_DLLP_PMTHRESHREG, w);
710
711 pcie_war_serdes(pi);
712
713 pcie_war_aspm_clkreq(pi);
714 } else if (ai_get_buscorerev(pi->sih) == 7)
715 pcie_war_noplldown(pi);
716
717 /* Note that the fix is actually in the SROM,
718 * that's why this is open-ended
719 */
720 if (ai_get_buscorerev(pi->sih) >= 6)
721 pcie_misc_config_fixup(pi);
722 }
723
724 /* ***** Functions called during driver state changes ***** */
725 void pcicore_attach(struct pcicore_info *pi, int state)
726 {
727 struct si_pub *sih = pi->sih;
728 u32 bfl2 = (u32)getintvar(sih, BRCMS_SROM_BOARDFLAGS2);
729
730 /* Determine if this board needs override */
731 if (PCIE_ASPM(sih)) {
732 if (bfl2 & BFL2_PCIEWAR_OVR)
733 pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
734 else
735 pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
736 }
737
738 /* These need to happen in this order only */
739 pcie_war_polarity(pi);
740
741 pcie_war_serdes(pi);
742
743 pcie_war_aspm_clkreq(pi);
744
745 pcie_clkreq_upd(pi, state);
746
747 }
748
749 void pcicore_hwup(struct pcicore_info *pi)
750 {
751 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
752 return;
753
754 pcie_war_pci_setup(pi);
755 }
756
757 void pcicore_up(struct pcicore_info *pi, int state)
758 {
759 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
760 return;
761
762 /* Restore L1 timer for better performance */
763 pcie_extendL1timer(pi, true);
764
765 pcie_clkreq_upd(pi, state);
766 }
767
768 /* When the device is going to enter D3 state
769 * (or the system is going to enter S3/S4 states)
770 */
771 void pcicore_sleep(struct pcicore_info *pi)
772 {
773 u32 w;
774
775 if (!pi || !PCIE_ASPM(pi->sih))
776 return;
777
778 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
779 w &= ~PCIE_CAP_LCREG_ASPML1;
780 pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
781
782 pi->pcie_pr42767 = false;
783 }
784
785 void pcicore_down(struct pcicore_info *pi, int state)
786 {
787 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
788 return;
789
790 pcie_clkreq_upd(pi, state);
791
792 /* Reduce L1 timer for better power savings */
793 pcie_extendL1timer(pi, false);
794 }
795
796 /* precondition: current core is sii->buscoretype */
797 static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
798 {
799 struct si_info *sii = (struct si_info *)(pi->sih);
800 u16 val16;
801 uint pciidx;
802
803 pciidx = ai_coreidx(&sii->pub);
804 val16 = R_REG(reg16);
805 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
806 val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
807 (val16 & ~SRSH_PI_MASK);
808 W_REG(reg16, val16);
809 }
810 }
811
812 void
813 pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
814 {
815 pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
816 }
817
818 void pcicore_fixcfg_pcie(struct pcicore_info *pi,
819 struct sbpcieregs __iomem *pcieregs)
820 {
821 pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
822 }
823
824 /* precondition: current core is pci core */
825 void
826 pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
827 {
828 u32 w;
829
830 OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
831
832 if (ai_get_buscorerev(pi->sih) >= 11) {
833 OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
834 w = R_REG(&pciregs->clkrun);
835 W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
836 w = R_REG(&pciregs->clkrun);
837 }
838 }