]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/brcm80211/util/aiutils.c
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
21 #include <linux/netdevice.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
33 #define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
34 (CHIPREV(sih->chiprev) == 0) && \
35 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
40 get_erom_ent(si_t
*sih
, u32
**eromptr
, u32 mask
, u32 match
)
43 uint inv
= 0, nom
= 0;
46 ent
= R_REG(si_osh(sih
), *eromptr
);
52 if ((ent
& ER_VALID
) == 0) {
57 if (ent
== (ER_END
| ER_VALID
))
60 if ((ent
& mask
) == match
)
66 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__
, ent
));
68 SI_VMSG((" after %d invalid and %d non-matching entries\n",
75 get_asd(si_t
*sih
, u32
**eromptr
, uint sp
, uint ad
, uint st
,
76 u32
*addrl
, u32
*addrh
, u32
*sizel
, u32
*sizeh
)
80 asd
= get_erom_ent(sih
, eromptr
, ER_VALID
, ER_VALID
);
81 if (((asd
& ER_TAG1
) != ER_ADD
) ||
82 (((asd
& AD_SP_MASK
) >> AD_SP_SHIFT
) != sp
) ||
83 ((asd
& AD_ST_MASK
) != st
)) {
84 /* This is not what we want, "push" it back */
88 *addrl
= asd
& AD_ADDR_MASK
;
90 *addrh
= get_erom_ent(sih
, eromptr
, 0, 0);
94 sz
= asd
& AD_SZ_MASK
;
95 if (sz
== AD_SZ_SZD
) {
96 szd
= get_erom_ent(sih
, eromptr
, 0, 0);
97 *sizel
= szd
& SD_SZ_MASK
;
99 *sizeh
= get_erom_ent(sih
, eromptr
, 0, 0);
101 *sizel
= AD_SZ_BASE
<< (sz
>> AD_SZ_SHIFT
);
103 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
104 sp
, ad
, st
, *sizeh
, *sizel
, *addrh
, *addrl
));
109 static void ai_hwfixup(si_info_t
*sii
)
113 /* parse the enumeration rom to identify all cores */
114 void ai_scan(si_t
*sih
, void *regs
, uint devid
)
116 si_info_t
*sii
= SI_INFO(sih
);
117 chipcregs_t
*cc
= (chipcregs_t
*) regs
;
118 u32 erombase
, *eromptr
, *eromlim
;
120 erombase
= R_REG(sii
->osh
, &cc
->eromptr
);
122 switch (sih
->bustype
) {
124 eromptr
= (u32
*) REG_MAP(erombase
, SI_CORE_SIZE
);
128 /* Set wrappers address */
129 sii
->curwrap
= (void *)((unsigned long)regs
+ SI_CORE_SIZE
);
131 /* Now point the window at the erom */
132 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN
, erombase
);
140 eromptr
= (u32
*)(unsigned long)erombase
;
144 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
149 eromlim
= eromptr
+ (ER_REMAPCONTROL
/ sizeof(u32
));
151 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs
, erombase
, eromptr
, eromlim
));
152 while (eromptr
< eromlim
) {
153 u32 cia
, cib
, cid
, mfg
, crev
, nmw
, nsw
, nmp
, nsp
;
154 u32 mpd
, asd
, addrl
, addrh
, sizel
, sizeh
;
161 /* Grok a component */
162 cia
= get_erom_ent(sih
, &eromptr
, ER_TAG
, ER_CI
);
163 if (cia
== (ER_END
| ER_VALID
)) {
164 SI_VMSG(("Found END of erom after %d cores\n",
170 cib
= get_erom_ent(sih
, &eromptr
, 0, 0);
172 if ((cib
& ER_TAG
) != ER_CI
) {
173 SI_ERROR(("CIA not followed by CIB\n"));
177 cid
= (cia
& CIA_CID_MASK
) >> CIA_CID_SHIFT
;
178 mfg
= (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
179 crev
= (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
180 nmw
= (cib
& CIB_NMW_MASK
) >> CIB_NMW_SHIFT
;
181 nsw
= (cib
& CIB_NSW_MASK
) >> CIB_NSW_SHIFT
;
182 nmp
= (cib
& CIB_NMP_MASK
) >> CIB_NMP_SHIFT
;
183 nsp
= (cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
;
185 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg
, cid
, crev
, base
, nmw
, nsw
, nmp
, nsp
));
187 if (((mfg
== MFGID_ARM
) && (cid
== DEF_AI_COMP
)) || (nsp
== 0))
189 if ((nmw
+ nsw
== 0)) {
190 /* A component which is not a core */
191 if (cid
== OOB_ROUTER_CORE_ID
) {
192 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
,
193 &addrl
, &addrh
, &sizel
, &sizeh
);
195 sii
->oob_router
= addrl
;
202 /* sii->eromptr[idx] = base; */
205 sii
->coreid
[idx
] = cid
;
207 for (i
= 0; i
< nmp
; i
++) {
208 mpd
= get_erom_ent(sih
, &eromptr
, ER_VALID
, ER_VALID
);
209 if ((mpd
& ER_TAG
) != ER_MP
) {
210 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid
));
213 SI_VMSG((" Master port %d, mp: %d id: %d\n", i
,
214 (mpd
& MPD_MP_MASK
) >> MPD_MP_SHIFT
,
215 (mpd
& MPD_MUI_MASK
) >> MPD_MUI_SHIFT
));
218 /* First Slave Address Descriptor should be port 0:
219 * the main register space for the core
222 get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
, &addrl
, &addrh
,
225 /* Try again to see if it is a bridge */
227 get_asd(sih
, &eromptr
, 0, 0, AD_ST_BRIDGE
, &addrl
,
228 &addrh
, &sizel
, &sizeh
);
231 else if ((addrh
!= 0) || (sizeh
!= 0)
232 || (sizel
!= SI_CORE_SIZE
)) {
233 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid
, asd
));
237 sii
->coresba
[idx
] = addrl
;
238 sii
->coresba_size
[idx
] = sizel
;
239 /* Get any more ASDs in port 0 */
243 get_asd(sih
, &eromptr
, 0, j
, AD_ST_SLAVE
, &addrl
,
244 &addrh
, &sizel
, &sizeh
);
245 if ((asd
!= 0) && (j
== 1) && (sizel
== SI_CORE_SIZE
)) {
246 sii
->coresba2
[idx
] = addrl
;
247 sii
->coresba2_size
[idx
] = sizel
;
252 /* Go through the ASDs for other slave ports */
253 for (i
= 1; i
< nsp
; i
++) {
257 get_asd(sih
, &eromptr
, i
, j
++, AD_ST_SLAVE
,
258 &addrl
, &addrh
, &sizel
, &sizeh
);
261 SI_ERROR((" SP %d has no address descriptors\n",
267 /* Now get master wrappers */
268 for (i
= 0; i
< nmw
; i
++) {
270 get_asd(sih
, &eromptr
, i
, 0, AD_ST_MWRAP
, &addrl
,
271 &addrh
, &sizel
, &sizeh
);
273 SI_ERROR(("Missing descriptor for MW %d\n", i
));
276 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
277 SI_ERROR(("Master wrapper %d is not 4KB\n", i
));
281 sii
->wrapba
[idx
] = addrl
;
284 /* And finally slave wrappers */
285 for (i
= 0; i
< nsw
; i
++) {
286 uint fwp
= (nsp
== 1) ? 0 : 1;
288 get_asd(sih
, &eromptr
, fwp
+ i
, 0, AD_ST_SWRAP
,
289 &addrl
, &addrh
, &sizel
, &sizeh
);
291 SI_ERROR(("Missing descriptor for SW %d\n", i
));
294 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
295 SI_ERROR(("Slave wrapper %d is not 4KB\n", i
));
298 if ((nmw
== 0) && (i
== 0))
299 sii
->wrapba
[idx
] = addrl
;
302 /* Don't record bridges */
310 SI_ERROR(("Reached end of erom without finding END"));
317 /* This function changes the logical "focus" to the indicated core.
318 * Return the current core's virtual address.
320 void *ai_setcoreidx(si_t
*sih
, uint coreidx
)
322 si_info_t
*sii
= SI_INFO(sih
);
323 u32 addr
= sii
->coresba
[coreidx
];
324 u32 wrap
= sii
->wrapba
[coreidx
];
327 if (coreidx
>= sii
->numcores
)
331 * If the user has provided an interrupt mask enabled function,
332 * then assert interrupts are disabled before switching the core.
334 ASSERT((sii
->intrsenabled_fn
== NULL
)
335 || !(*(sii
)->intrsenabled_fn
) ((sii
)->intr_arg
));
337 switch (sih
->bustype
) {
340 if (!sii
->regs
[coreidx
]) {
341 sii
->regs
[coreidx
] = REG_MAP(addr
, SI_CORE_SIZE
);
342 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
344 sii
->curmap
= regs
= sii
->regs
[coreidx
];
345 if (!sii
->wrappers
[coreidx
]) {
346 sii
->wrappers
[coreidx
] = REG_MAP(wrap
, SI_CORE_SIZE
);
347 ASSERT(GOODREGS(sii
->wrappers
[coreidx
]));
349 sii
->curwrap
= sii
->wrappers
[coreidx
];
353 /* point bar0 window */
354 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN
, addr
);
356 /* point bar0 2nd 4KB window */
357 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN2
, wrap
);
364 sii
->curmap
= regs
= (void *)(unsigned long)addr
;
365 sii
->curwrap
= (void *)(unsigned long)wrap
;
375 sii
->curidx
= coreidx
;
380 /* Return the number of address spaces in current core */
381 int ai_numaddrspaces(si_t
*sih
)
386 /* Return the address of the nth address space in the current core */
387 u32
ai_addrspace(si_t
*sih
, uint asidx
)
396 return sii
->coresba
[cidx
];
398 return sii
->coresba2
[cidx
];
400 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
405 /* Return the size of the nth address space in the current core */
406 u32
ai_addrspacesize(si_t
*sih
, uint asidx
)
415 return sii
->coresba_size
[cidx
];
417 return sii
->coresba2_size
[cidx
];
419 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
424 uint
ai_flag(si_t
*sih
)
430 if (BCM47162_DMP()) {
431 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__
));
436 return R_REG(sii
->osh
, &ai
->oobselouta30
) & 0x1f;
439 void ai_setint(si_t
*sih
, int siflag
)
443 void ai_write_wrap_reg(si_t
*sih
, u32 offset
, u32 val
)
445 si_info_t
*sii
= SI_INFO(sih
);
446 u32
*w
= (u32
*) sii
->curwrap
;
447 W_REG(sii
->osh
, w
+ (offset
/ 4), val
);
451 uint
ai_corevendor(si_t
*sih
)
457 cia
= sii
->cia
[sii
->curidx
];
458 return (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
461 uint
ai_corerev(si_t
*sih
)
467 cib
= sii
->cib
[sii
->curidx
];
468 return (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
471 bool ai_iscoreup(si_t
*sih
)
479 return (((R_REG(sii
->osh
, &ai
->ioctrl
) & (SICF_FGC
| SICF_CLOCK_EN
)) ==
481 && ((R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
) == 0));
485 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
486 * switch back to the original core, and return the new value.
488 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
490 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
491 * and (on newer pci cores) chipcommon registers.
493 uint
ai_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
504 ASSERT(GOODIDX(coreidx
));
505 ASSERT(regoff
< SI_CORE_SIZE
);
506 ASSERT((val
& ~mask
) == 0);
508 if (coreidx
>= SI_MAXCORES
)
511 if (sih
->bustype
== SI_BUS
) {
512 /* If internal bus, we can always get at everything */
514 /* map if does not exist */
515 if (!sii
->regs
[coreidx
]) {
516 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
518 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
520 r
= (u32
*) ((unsigned char *) sii
->regs
[coreidx
] + regoff
);
521 } else if (sih
->bustype
== PCI_BUS
) {
522 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
524 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
525 /* Chipc registers are mapped at 12KB */
528 r
= (u32
*) ((char *)sii
->curmap
+
529 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
530 } else if (sii
->pub
.buscoreidx
== coreidx
) {
531 /* pci registers are at either in the last 2KB of an 8KB window
532 * or, in pcie and pci rev 13 at 8KB
536 r
= (u32
*) ((char *)sii
->curmap
+
537 PCI_16KB0_PCIREGS_OFFSET
+
540 r
= (u32
*) ((char *)sii
->curmap
+
541 ((regoff
>= SBCONFIGOFF
) ?
542 PCI_BAR0_PCISBR_OFFSET
:
543 PCI_BAR0_PCIREGS_OFFSET
) +
549 INTR_OFF(sii
, intr_val
);
551 /* save current core index */
552 origidx
= si_coreidx(&sii
->pub
);
555 r
= (u32
*) ((unsigned char *) ai_setcoreidx(&sii
->pub
, coreidx
) +
562 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
563 W_REG(sii
->osh
, r
, w
);
567 w
= R_REG(sii
->osh
, r
);
570 /* restore core index */
571 if (origidx
!= coreidx
)
572 ai_setcoreidx(&sii
->pub
, origidx
);
574 INTR_RESTORE(sii
, intr_val
);
580 void ai_core_disable(si_t
*sih
, u32 bits
)
588 ASSERT(GOODREGS(sii
->curwrap
));
591 /* if core is already in reset, just return */
592 if (R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
)
595 W_REG(sii
->osh
, &ai
->ioctrl
, bits
);
596 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
599 W_REG(sii
->osh
, &ai
->resetctrl
, AIRC_RESET
);
603 /* reset and re-enable a core
605 * bits - core specific bits that are set during and after reset sequence
606 * resetbits - core specific bits that are set only during reset sequence
608 void ai_core_reset(si_t
*sih
, u32 bits
, u32 resetbits
)
615 ASSERT(GOODREGS(sii
->curwrap
));
619 * Must do the disable sequence first to work for arbitrary current core state.
621 ai_core_disable(sih
, (bits
| resetbits
));
624 * Now do the initialization sequence.
626 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_FGC
| SICF_CLOCK_EN
));
627 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
628 W_REG(sii
->osh
, &ai
->resetctrl
, 0);
631 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_CLOCK_EN
));
632 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
636 void ai_core_cflags_wo(si_t
*sih
, u32 mask
, u32 val
)
644 if (BCM47162_DMP()) {
645 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
650 ASSERT(GOODREGS(sii
->curwrap
));
653 ASSERT((val
& ~mask
) == 0);
656 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
657 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
661 u32
ai_core_cflags(si_t
*sih
, u32 mask
, u32 val
)
668 if (BCM47162_DMP()) {
669 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
674 ASSERT(GOODREGS(sii
->curwrap
));
677 ASSERT((val
& ~mask
) == 0);
680 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
681 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
684 return R_REG(sii
->osh
, &ai
->ioctrl
);
687 u32
ai_core_sflags(si_t
*sih
, u32 mask
, u32 val
)
694 if (BCM47162_DMP()) {
695 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__
));
699 ASSERT(GOODREGS(sii
->curwrap
));
702 ASSERT((val
& ~mask
) == 0);
703 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
706 w
= ((R_REG(sii
->osh
, &ai
->iostatus
) & ~mask
) | val
);
707 W_REG(sii
->osh
, &ai
->iostatus
, w
);
710 return R_REG(sii
->osh
, &ai
->iostatus
);